text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def _candidates_from_grid(self, n=1000):
"""Get unused candidates from the grid or parameters."""
used_vectors = set(tuple(v) for v in self.X)
# if every point has been used before, gridding is done.
grid_size = self.grid_width ** len(self.tunables)
if len(used_vectors) == grid_size:
return None
all_vectors = set(itertools.product(*self._grid_axes))
remaining_vectors = all_vectors - used_vectors
candidates = np.array(list(map(np.array, remaining_vectors)))
np.random.shuffle(candidates)
return candidates[0:n] | 0.003289 |
def slug(request, url):
"""Look up a page by url (which is a tree of slugs)"""
page = None
if url:
for slug in url.split('/'):
if not slug:
continue
try:
page = Page.objects.get(slug=slug, parent=page)
except Page.DoesNotExist:
raise Http404
else:
try:
page = Page.objects.get(slug='index', parent=None)
except Page.DoesNotExist:
return TemplateView.as_view(
template_name='wafer/index.html')(request)
if 'edit' in request.GET:
if not request.user.has_perm('pages.change_page'):
raise PermissionDenied
return EditPage.as_view()(request, pk=page.id)
if 'compare' in request.GET:
if not request.user.has_perm('pages.change_page'):
raise PermissionDenied
return ComparePage.as_view()(request, pk=page.id)
return ShowPage.as_view()(request, pk=page.id) | 0.001016 |
def _get_future_devices(self, context):
"""Return a generator yielding new devices."""
monitor = Monitor.from_netlink(context)
monitor.filter_by("hidraw")
monitor.start()
self._scanning_log_message()
for device in iter(monitor.poll, None):
if device.action == "add":
# Sometimes udev rules has not been applied at this point,
# causing permission denied error if we are running in user
# mode. With this sleep this will hopefully not happen.
sleep(1)
yield device
self._scanning_log_message() | 0.003077 |
def run_tag_from_session_and_metric(session_name, metric_name):
"""Returns a (run,tag) tuple storing the evaluations of the specified metric.
Args:
session_name: str.
metric_name: MetricName protobuffer.
Returns: (run, tag) tuple.
"""
assert isinstance(session_name, six.string_types)
assert isinstance(metric_name, api_pb2.MetricName)
# os.path.join() will append a final slash if the group is empty; it seems
# like multiplexer.Tensors won't recognize paths that end with a '/' so
# we normalize the result of os.path.join() to remove the final '/' in that
# case.
run = os.path.normpath(os.path.join(session_name, metric_name.group))
tag = metric_name.tag
return run, tag | 0.015515 |
def stdlib_list(version=None):
"""
Given a ``version``, return a ``list`` of names of the Python Standard
Libraries for that version. These names are obtained from the Sphinx inventory
file (used in :py:mod:`sphinx.ext.intersphinx`).
:param str|None version: The version (as a string) whose list of libraries you want
(one of ``"2.6"``, ``"2.7"``, ``"3.2"``, ``"3.3"``, ``"3.4"``, or ``"3.5"``).
If not specified, the current version of Python will be used.
:return: A list of standard libraries from the specified version of Python
:rtype: list
"""
version = get_canonical_version(version) if version is not None else '.'.join(
str(x) for x in sys.version_info[:2])
module_list_file = os.path.join(list_dir, "{}.txt".format(version))
with open(module_list_file) as f:
result = [y for y in [x.strip() for x in f.readlines()] if y]
return result | 0.005423 |
def _get_stack_info_for_trace(
self,
frames,
library_frame_context_lines=None,
in_app_frame_context_lines=None,
with_locals=True,
locals_processor_func=None,
):
"""Overrideable in derived clients to add frames/info, e.g. templates"""
return stacks.get_stack_info(
frames,
library_frame_context_lines=library_frame_context_lines,
in_app_frame_context_lines=in_app_frame_context_lines,
with_locals=with_locals,
include_paths_re=self.include_paths_re,
exclude_paths_re=self.exclude_paths_re,
locals_processor_func=locals_processor_func,
) | 0.005755 |
def add_pyobj(self, py_obj, **kwargs):
"""Adds a picklable Python object as a file to IPFS.
.. deprecated:: 0.4.2
The ``*_pyobj`` APIs allow for arbitrary code execution if abused.
Either switch to :meth:`~ipfsapi.Client.add_json` or use
``client.add_bytes(pickle.dumps(py_obj))`` instead.
Please see :meth:`~ipfsapi.Client.get_pyobj` for the
**security risks** of using these methods!
.. code-block:: python
>>> c.add_pyobj([0, 1.0, 2j, '3', 4e5])
'QmWgXZSUTNNDD8LdkdJ8UXSn55KfFnNvTP1r7SyaQd74Ji'
Parameters
----------
py_obj : object
A picklable Python object
Returns
-------
str : Hash of the added IPFS object
"""
warnings.warn("Using `*_pyobj` on untrusted data is a security risk",
DeprecationWarning)
return self.add_bytes(encoding.Pickle().encode(py_obj), **kwargs) | 0.002028 |
def set_error_pages(self, codes_map=None, common_prefix=None):
"""Add an error pages for managed 403, 404, 500 responses.
Shortcut for ``.set_error_page()``.
:param dict codes_map: Status code mapped into an html filepath or
just a filename if common_prefix is used.
If not set, filename containing status code is presumed: 400.html, 500.html, etc.
:param str|unicode common_prefix: Common path (prefix) for all files.
"""
statuses = [403, 404, 500]
if common_prefix:
if not codes_map:
codes_map = {code: '%s.html' % code for code in statuses}
for code, filename in codes_map.items():
codes_map[code] = os.path.join(common_prefix, filename)
for code, filepath in codes_map.items():
self.set_error_page(code, filepath)
return self._section | 0.003293 |
def objectprep(self):
"""
If the script is being run as part of a pipeline, create and populate the objects for the current analysis
"""
for sample in self.metadata:
setattr(sample, self.analysistype, GenObject())
# Set the destination folder
sample[self.analysistype].outputdir = os.path.join(self.path, self.analysistype)
# Make the destination folder
make_path(sample[self.analysistype].outputdir)
sample[self.analysistype].baitedfastq = os.path.join(
sample[self.analysistype].outputdir,
'{at}_targetMatches.fastq.gz'.format(at=self.analysistype))
# Set the file type for the downstream analysis
sample[self.analysistype].filetype = self.filetype
if self.filetype == 'fasta':
sample[self.analysistype].assemblyfile = sample.general.bestassemblyfile | 0.005319 |
def load_stream(self, key, binary=False):
"""
Return a managed file-like object from which the calling code can read
previously-serialized data.
:param key:
:return: A managed stream-like object
"""
value = self.load_value(key, binary=binary)
yield io.BytesIO(value) if binary else io.StringIO(value) | 0.005479 |
def full(self, asvector=False):
"""Returns full array (uncompressed).
.. warning::
TT compression allows to keep in memory tensors much larger than ones PC can handle in
raw format. Therefore this function is quite unsafe; use it at your own risk.
:returns: numpy.ndarray -- full tensor.
"""
# Generate correct size vector
sz = self.n.copy()
if self.r[0] > 1:
sz = _np.concatenate(([self.r[0]], sz))
if self.r[self.d] > 1:
sz = _np.concatenate(([self.r[self.d]], sz))
if (_np.iscomplex(self.core).any()):
a = _tt_f90.tt_f90.ztt_to_full(
self.n, self.r, self.ps, self.core, _np.prod(sz))
else:
a = _tt_f90.tt_f90.dtt_to_full(
self.n, self.r, self.ps, _np.real(
self.core), _np.prod(sz))
a = a.reshape(sz, order='F')
if asvector:
a=a.flatten(order='F')
return a | 0.00501 |
def _hashes_match(self, a, b):
"""Constant time comparison of bytes for py3, strings for py2"""
if len(a) != len(b):
return False
diff = 0
if six.PY2:
a = bytearray(a)
b = bytearray(b)
for x, y in zip(a, b):
diff |= x ^ y
return not diff | 0.006006 |
def delete_app_id(self, app_id, mount_point='app-id'):
"""DELETE /auth/<mount_point>/map/app-id/<app_id>
:param app_id:
:type app_id:
:param mount_point:
:type mount_point:
:return:
:rtype:
"""
return self._adapter.delete('/v1/auth/{0}/map/app-id/{1}'.format(mount_point, app_id)) | 0.008499 |
def parse_fields(text, name_prefix=None, version=None, encoding_chars=None, validation_level=None,
references=None, force_varies=False):
"""
Parse the given ER7-encoded fields and return a list of :class:`hl7apy.core.Field`.
:type text: ``str``
:param text: the ER7-encoded string containing the fields to be parsed
:type name_prefix: ``str``
:param name_prefix: the field prefix (e.g. MSH)
:type version: ``str``
:param version: the HL7 version (e.g. "2.5"), or ``None`` to use the default
(see :func:`set_default_version <hl7apy.set_default_version>`)
:type encoding_chars: ``dict``
:param encoding_chars: a dictionary containing the encoding chars or None to use the default
(see :func:`set_default_encoding_chars <hl7apy.set_default_encoding_chars>`)
:type validation_level: ``int``
:param validation_level: the validation level. Possible values are those defined in
:class:`VALIDATION_LEVEL <hl7apy.consts.VALIDATION_LEVEL>` class or ``None`` to use the default
validation level (see :func:`set_default_validation_level <hl7apy.set_default_validation_level>`)
:type references: ``list``
:param references: A list of the references of the :class:`Field <hl7apy.core.Field>`'s children
:type force_varies: ``bool``
:param force_varies: flag that force the fields to use a varies structure when no reference is found.
It is used when a segment ends with a field of type varies that thus support infinite children
:return: a list of :class:`Field <hl7apy.core.Field>` instances
>>> fields = "1|NUCLEAR^NELDA^W|SPO|2222 HOME STREET^^ANN ARBOR^MI^^USA"
>>> nk1_fields = parse_fields(fields, name_prefix="NK1")
>>> print(nk1_fields)
[<Field NK1_1 (SET_ID_NK1) of type SI>, <Field NK1_2 (NAME) of type XPN>, <Field NK1_3 (RELATIONSHIP) of type CE>, \
<Field NK1_4 (ADDRESS) of type XAD>]
>>> s = Segment("NK1")
>>> s.children = nk1_fields
>>> print(s.to_er7())
NK1|1|NUCLEAR^NELDA^W|SPO|2222 HOME STREET^^ANN ARBOR^MI^^USA
>>> unknown_fields = parse_fields(fields)
>>> s.children = unknown_fields
>>> print(s.to_er7())
NK1||||||||||||||||||||||||||||||||||||||||1|NUCLEAR^NELDA^W|SPO|2222 HOME STREET^^ANN ARBOR^MI^^USA
"""
version = _get_version(version)
encoding_chars = _get_encoding_chars(encoding_chars, version)
validation_level = _get_validation_level(validation_level)
text = text.strip("\r")
field_sep = encoding_chars['FIELD']
repetition_sep = encoding_chars['REPETITION']
splitted_fields = text.split(field_sep)
fields = []
for index, field in enumerate(splitted_fields):
name = "{0}_{1}".format(name_prefix, index+1) if name_prefix is not None else None
try:
reference = references[name]['ref'] if references is not None else None
except KeyError:
reference = None
if field.strip() or name is None:
if name == 'MSH_2':
fields.append(parse_field(field, name, version, encoding_chars, validation_level,
reference))
else:
for rep in field.split(repetition_sep):
fields.append(parse_field(rep, name, version, encoding_chars, validation_level,
reference, force_varies))
elif name == "MSH_1":
fields.append(parse_field(field_sep, name, version, encoding_chars, validation_level,
reference))
return fields | 0.005259 |
def step(self, action):
"""
Apply sequence of actions to sequence of environments
actions -> (observations, rewards, news)
where 'news' is a boolean vector indicating whether each element is new.
"""
obs, rews, news, infos = self.env.step(action)
self.ret = self.ret * self.gamma + rews
obs = self._filter_observation(obs)
if self.ret_rms:
self.ret_rms.update(np.array([self.ret]))
rews = np.clip(rews / np.sqrt(self.ret_rms.var + self.epsilon), -self.cliprew, self.cliprew)
return obs, rews, news, infos | 0.006525 |
def index(self, i):
"""xrangeobject.index(value, [start, [stop]]) -> integer --
return index of value.
Raise ValueError if the value is not present.
"""
if self.count(i) == 0:
raise ValueError("{} is not in range".format(i))
return (i - self._start) // self._step | 0.006192 |
def _to_java(self):
"""
Transfer this instance to a Java CrossValidator. Used for ML persistence.
:return: Java object equivalent to this instance.
"""
estimator, epms, evaluator = super(CrossValidator, self)._to_java_impl()
_java_obj = JavaParams._new_java_obj("org.apache.spark.ml.tuning.CrossValidator", self.uid)
_java_obj.setEstimatorParamMaps(epms)
_java_obj.setEvaluator(evaluator)
_java_obj.setEstimator(estimator)
_java_obj.setSeed(self.getSeed())
_java_obj.setNumFolds(self.getNumFolds())
_java_obj.setParallelism(self.getParallelism())
_java_obj.setCollectSubModels(self.getCollectSubModels())
return _java_obj | 0.006784 |
def send(self, sender: PytgbotApiBot):
"""
Send the message via pytgbot.
:param sender: The bot instance to send with.
:type sender: pytgbot.bot.Bot
:rtype: PytgbotApiMessage
"""
return sender.send_video_note(
# receiver, self.media, disable_notification=self.disable_notification, reply_to_message_id=reply_id
video_note=self.video_note, chat_id=self.receiver, reply_to_message_id=self.reply_id, duration=self.duration, length=self.length, thumb=self.thumb, disable_notification=self.disable_notification, reply_markup=self.reply_markup
) | 0.006349 |
def pack_into_dict(fmt, names, buf, offset, data, **kwargs):
"""Same as :func:`~bitstruct.pack_into()`, but data is read from a
dictionary.
See :func:`~bitstruct.pack_dict()` for details on `names`.
"""
return CompiledFormatDict(fmt, names).pack_into(buf,
offset,
data,
**kwargs) | 0.002183 |
def strip_fields(self):
"""Clear any fields listed in field_list."""
for tag in self.record.keys():
if tag in self.fields_list:
record_delete_fields(self.record, tag) | 0.009524 |
def initial_annotate_kwargs():
"""Return default parameters passed to Axes.annotate to create labels."""
return dict(
xycoords="data", textcoords="data",
rotation=90, horizontalalignment="center", verticalalignment="center",
arrowprops=dict(arrowstyle="-", relpos=(0.5, 0.0))
) | 0.003195 |
def setGamepadFocusOverlay(self, ulNewFocusOverlay):
"""Sets the current Gamepad focus overlay"""
fn = self.function_table.setGamepadFocusOverlay
result = fn(ulNewFocusOverlay)
return result | 0.008969 |
def get_violation_if_found(self, node, lint_context):
""" Returns a violation if the node is invalid. """
if self.is_valid(node, lint_context):
return None
return self.create_violation_report(node, lint_context) | 0.008065 |
def payload_set(self, value):
"""
Set the message payload (and update header)
:param value: New payload value
:type value: unicode
:rtype: None
"""
self._payload = value
self._header.payload_length = len(self._payload) | 0.007067 |
def current_user(self):
"""
.. versionadded:: 0.6.0
Requires SMC version >= 6.4
Return the currently logged on API Client user element.
:raises UnsupportedEntryPoint: Current user is only supported with SMC
version >= 6.4
:rtype: Element
"""
if self.session:
try:
response = self.session.get(self.entry_points.get('current_user'))
if response.status_code in (200, 201):
admin_href=response.json().get('value')
request = SMCRequest(href=admin_href)
smcresult = send_request(self, 'get', request)
return ElementFactory(admin_href, smcresult)
except UnsupportedEntryPoint:
pass | 0.007282 |
def _handle_tag_definebutton2(self):
"""Handle the DefineButton2 tag."""
obj = _make_object("DefineButton2")
obj.ButtonId = unpack_ui16(self._src)
bc = BitConsumer(self._src)
bc.ReservedFlags = bc.u_get(7)
bc.TrackAsMenu = bc.u_get(1)
obj.ActionOffset = unpack_ui16(self._src)
# characters
obj.Characters = characters = []
while True:
end_flag = unpack_ui8(self._src)
if end_flag == 0:
# all done
obj.CharacterEndFlag = 0
break
# we have a BUTTONRECORD, let's go back the 8 bits and set the obj
self._src.seek(-1, io.SEEK_CUR)
character = _make_object("ButtonRecord")
characters.append(character)
bc = BitConsumer(self._src)
character.ButtonReserved = bc.u_get(2)
character.ButtonHasBlendMode = bc.u_get(1)
character.ButtonHasFilterList = bc.u_get(1)
character.ButtonStateHitTest = bc.u_get(1)
character.ButtonStateDown = bc.u_get(1)
character.ButtonStateOver = bc.u_get(1)
character.ButtonStateUp = bc.u_get(1)
character.CharacterId = unpack_ui16(self._src)
character.PlaceDepth = unpack_ui16(self._src)
character.PlaceMatrix = self._get_struct_matrix()
character.ColorTransform = self._get_struct_cxformwithalpha()
if character.ButtonHasFilterList:
character.FilterList = self._get_struct_filterlist()
if character.ButtonHasBlendMode:
character.BlendMode = unpack_ui8(self._src)
obj.Actions = actions = []
still_have_actions = True
while still_have_actions:
end_flag = unpack_ui16(self._src)
if end_flag == 0:
# this is the last action, parse it and then exit
still_have_actions = False
bca = _make_object("ButtonCondAction")
actions.append(bca)
bca.CondActionSize = end_flag
bc = BitConsumer(self._src)
bca.CondIdleToOverDown = bc.u_get(1)
bca.CondOutDownToIdle = bc.u_get(1)
bca.CondOutDownToOverDown = bc.u_get(1)
bca.CondOverDownToOutDown = bc.u_get(1)
bca.CondOverDownToOverUp = bc.u_get(1)
bca.CondOverUpToOverDown = bc.u_get(1)
bca.CondOverUpToIdle = bc.u_get(1)
bca.CondIdleToOverUp = bc.u_get(1)
bca.CondKeyPress = bc.u_get(7)
bca.CondOverDownToIdle = bc.u_get(1)
bca.Actions = self._generic_action_parser()
return obj | 0.000739 |
def option(self, url, headers=None, kwargs=None):
"""Make a OPTION request.
To make a OPTION request pass, ``url``
:param url: ``str``
:param headers: ``dict``
:param kwargs: ``dict``
"""
return self._request(
method='option',
url=url,
headers=headers,
kwargs=kwargs
) | 0.005236 |
def _patch_vm_uuid(self):
"""
Fix the VM uuid in the case of linked clone
"""
if os.path.exists(self._linked_vbox_file()):
try:
tree = ET.parse(self._linked_vbox_file())
except ET.ParseError:
raise VirtualBoxError("Cannot modify VirtualBox linked nodes file. "
"File {} is corrupted.".format(self._linked_vbox_file()))
machine = tree.getroot().find("{http://www.virtualbox.org/}Machine")
if machine is not None and machine.get("uuid") != "{" + self.id + "}":
for image in tree.getroot().findall("{http://www.virtualbox.org/}Image"):
currentSnapshot = machine.get("currentSnapshot")
if currentSnapshot:
newSnapshot = re.sub("\{.*\}", "{" + str(uuid.uuid4()) + "}", currentSnapshot)
shutil.move(os.path.join(self.working_dir, self._vmname, "Snapshots", currentSnapshot) + ".vdi",
os.path.join(self.working_dir, self._vmname, "Snapshots", newSnapshot) + ".vdi")
image.set("uuid", newSnapshot)
machine.set("uuid", "{" + self.id + "}")
tree.write(self._linked_vbox_file()) | 0.009202 |
def drain(self, ignore_daemonsets=False, delete_local_storage=False, force=False):
"""
Removes all K8sPods from this K8sNode,
and prevents additional K8sPods from being scheduled.
:param ignore_daemonsets: a boolean.
If false, will fail if a K8sDaemonSet-managed K8sPod is present.
If true, will continue even if a K8sDaemonSet-managed K8sPod is present.
:param delete_local_storage: a boolean.
If false, will fail if a K8sVolume of type 'emptyDir' is found.
If true, will continue even if an 'emptyDir' K8sVolume is found.
:param force: a boolean.
If false, will fail if any K8sPods unmanaged by a parent object are found.
If true, will continue and any unmanaged K8sPods are lost.
:return: self.
"""
# inventory of K8sPods found on this node.
daemonset_pods = []
pods = self._pod_inventory()
# cordon the node.
self.unschedulable = True
self.update()
# loop through all pods and delete them.
for pod in pods:
if self._is_daemonset(pod):
if not ignore_daemonsets:
raise DrainNodeException("K8sNode: pod: [ {} ] is managed by a DaemonSet.".format(pod.name))
else:
daemonset_pods.append(pod)
continue
if self._has_local_storage(pod) and not delete_local_storage:
raise DrainNodeException("K8sNode: pod: [ {} ] has local storage that will be lost.".format(pod.name))
if self._is_orphan(pod) and not force:
raise DrainNodeException("K8sNode: pod: [ {} ] is unmanaged and will be lost.".format(pod.name))
pod.delete()
self._wait_for_pod_deletion(daemonset_pods)
return self | 0.008754 |
def __reorganize(self):
"""
Reorganize the keys into their proper section order for the NOAA output file
DO NOT parse data tables (paleoData or chronData). We will do those separately.
:param str key:
:param any value:
:return none:
"""
logger_lpd_noaa.info("enter reorganize")
# NOAA files are organized in sections differently than NOAA. try to translate these sections.
for key, value in self.lipd_data.items():
# if this key has a noaa match, it'll be returned. otherwise, empty string for no match
noaa_key = self.__get_noaa_key(key)
# check if this lipd key is in the NOAA_KEYS conversion dictionary.
# if it's not, then stash it in our ignore list.
if key not in LIPD_NOAA_MAP_FLAT:
self.noaa_data_sorted["Ignore"][noaa_key] = value
# studyName is placed two times in file. Line #1, and under the 'title' section
elif noaa_key == "Study_Name":
# study name gets put in two locations
self.noaa_data_sorted["Top"][noaa_key] = value
self.noaa_data_sorted["Title"][noaa_key] = value
# put archiveType in self, because we'll reuse it later for the 9-part-variables as well
elif noaa_key == "Archive":
self.lsts_tmp["archive"].append(value)
# Dataset_DOI is a repeatable element. the key could be a single DOI, or a list of DOIs.
elif noaa_key == "Dataset_DOI":
self.__parse_dois(value)
# all other keys. determine which noaa section they belong in.
else:
# noaa keys are sorted by section.
for header, content in NOAA_KEYS_BY_SECTION.items():
try:
# if our key is a noaa header key, then that means it's the ONLY key in the section.
# set value directly
if noaa_key == header:
self.noaa_data_sorted[header] = value
# all other cases, the key is part of the section
elif noaa_key in content:
self.noaa_data_sorted[header][noaa_key] = value
except KeyError:
# this shouldn't ever really happen, but just in case
logger_lpd_noaa.warn("lpd_noaa: reorganize: KeyError: {}".format(noaa_key))
return | 0.004346 |
def apply_line_types(network):
"""Calculate line electrical parameters x, r, b, g from standard
types.
"""
lines_with_types_b = network.lines.type != ""
if lines_with_types_b.zsum() == 0:
return
missing_types = (pd.Index(network.lines.loc[lines_with_types_b, 'type'].unique())
.difference(network.line_types.index))
assert missing_types.empty, ("The type(s) {} do(es) not exist in network.line_types"
.format(", ".join(missing_types)))
# Get a copy of the lines data
l = (network.lines.loc[lines_with_types_b, ["type", "length", "num_parallel"]]
.join(network.line_types, on='type'))
for attr in ["r","x"]:
l[attr] = l[attr + "_per_length"] * l["length"] / l["num_parallel"]
l["b"] = 2*np.pi*1e-9*l["f_nom"] * l["c_per_length"] * l["length"] * l["num_parallel"]
# now set calculated values on live lines
for attr in ["r", "x", "b"]:
network.lines.loc[lines_with_types_b, attr] = l[attr] | 0.00679 |
def next(self):
"""
Returns the next row from the Instances object.
:return: the next Instance object
:rtype: Instance
"""
if self.row < self.data.num_instances:
index = self.row
self.row += 1
return self.data.get_instance(index)
else:
raise StopIteration() | 0.005525 |
def fcoe_fcoe_fabric_map_fcoe_fcf_map_fcf_map_fcf_rbid(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fcoe = ET.SubElement(config, "fcoe", xmlns="urn:brocade.com:mgmt:brocade-fcoe")
fcoe_fabric_map = ET.SubElement(fcoe, "fcoe-fabric-map")
fcoe_fabric_map_name_key = ET.SubElement(fcoe_fabric_map, "fcoe-fabric-map-name")
fcoe_fabric_map_name_key.text = kwargs.pop('fcoe_fabric_map_name')
fcoe_fcf_map = ET.SubElement(fcoe_fabric_map, "fcoe-fcf-map")
fcf_map_name_key = ET.SubElement(fcoe_fcf_map, "fcf-map-name")
fcf_map_name_key.text = kwargs.pop('fcf_map_name')
fcf_map_fcf_rbid = ET.SubElement(fcoe_fcf_map, "fcf-map-fcf-rbid")
fcf_map_fcf_rbid.text = kwargs.pop('fcf_map_fcf_rbid')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.004449 |
def _str_extract_frame(arr, pat, flags=0):
"""
For each subject string in the Series, extract groups from the
first match of regular expression pat. This function is called from
str_extract(expand=True), and always returns a DataFrame.
"""
from pandas import DataFrame
regex = re.compile(pat, flags=flags)
groups_or_na = _groups_or_na_fun(regex)
names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))
columns = [names.get(1 + i, i) for i in range(regex.groups)]
if len(arr) == 0:
return DataFrame(columns=columns, dtype=object)
try:
result_index = arr.index
except AttributeError:
result_index = None
return DataFrame(
[groups_or_na(val) for val in arr],
columns=columns,
index=result_index,
dtype=object) | 0.001198 |
def write_single_register(self, reg_addr, reg_value):
"""Modbus function WRITE_SINGLE_REGISTER (0x06)
:param reg_addr: register address (0 to 65535)
:type reg_addr: int
:param reg_value: register value to write
:type reg_value: int
:returns: True if write ok or None if fail
:rtype: bool or None
"""
# check params
if not (0 <= int(reg_addr) <= 65535):
self.__debug_msg('write_single_register(): reg_addr out of range')
return None
if not (0 <= int(reg_value) <= 65535):
self.__debug_msg('write_single_register(): reg_value out of range')
return None
# build frame
tx_buffer = self._mbus_frame(const.WRITE_SINGLE_REGISTER,
struct.pack('>HH', reg_addr, reg_value))
# send request
s_send = self._send_mbus(tx_buffer)
# check error
if not s_send:
return None
# receive
f_body = self._recv_mbus()
# check error
if not f_body:
return None
# check fix frame size
if len(f_body) != 4:
self.__last_error = const.MB_RECV_ERR
self.__debug_msg('write_single_register(): rx frame size error')
self.close()
return None
# register extract
rx_reg_addr, rx_reg_value = struct.unpack('>HH', f_body)
# check register write
is_ok = (rx_reg_addr == reg_addr) and (rx_reg_value == reg_value)
return True if is_ok else None | 0.001263 |
def add_author(self, author, file_as=None, role=None, uid='creator'):
"Add author for this document"
self.add_metadata('DC', 'creator', author, {'id': uid})
if file_as:
self.add_metadata(None, 'meta', file_as, {'refines': '#' + uid,
'property': 'file-as',
'scheme': 'marc:relators'})
if role:
self.add_metadata(None, 'meta', role, {'refines': '#' + uid,
'property': 'role',
'scheme': 'marc:relators'}) | 0.004484 |
def local_accuracy(X, y, model_generator, method_name):
""" Local Accuracy
transform = "identity"
sort_order = 2
"""
def score_map(true, pred):
""" Converts local accuracy from % of standard deviation to numerical scores for coloring.
"""
v = min(1.0, np.std(pred - true) / (np.std(true) + 1e-8))
if v < 1e-6:
return 1.0
elif v < 0.01:
return 0.9
elif v < 0.05:
return 0.75
elif v < 0.1:
return 0.6
elif v < 0.2:
return 0.4
elif v < 0.3:
return 0.3
elif v < 0.5:
return 0.2
elif v < 0.7:
return 0.1
else:
return 0.0
def score_function(X_train, X_test, y_train, y_test, attr_function, trained_model, random_state):
return measures.local_accuracy(
X_train, y_train, X_test, y_test, attr_function(X_test),
model_generator, score_map, trained_model
)
return None, __score_method(X, y, None, model_generator, score_function, method_name) | 0.004521 |
async def validate(state, holdout_glob):
"""Validate the trained model against holdout games.
Args:
state: the RL loop State instance.
holdout_glob: a glob that matches holdout games.
"""
if not glob.glob(holdout_glob):
print('Glob "{}" didn\'t match any files, skipping validation'.format(
holdout_glob))
else:
await run(
'python3', 'validate.py', holdout_glob,
'--flagfile={}'.format(os.path.join(FLAGS.flags_dir, 'validate.flags')),
'--work_dir={}'.format(fsdb.working_dir())) | 0.009242 |
def clean(ctx):
"""Clean previously built package artifacts.
"""
ctx.run(f"python setup.py clean")
dist = ROOT.joinpath("dist")
print(f"[clean] Removing {dist}")
if dist.exists():
shutil.rmtree(str(dist)) | 0.004237 |
def _copy_settings_file(source, destination, name):
"""
Copy a file from the repo to the user's home directory.
"""
if os.path.exists(destination):
try:
ch = six.moves.input(
'File %s already exists, overwrite? y/[n]):' % destination)
if ch not in ('Y', 'y'):
return
except KeyboardInterrupt:
return
filepath = os.path.dirname(destination)
if not os.path.exists(filepath):
os.makedirs(filepath)
print('Copying default %s to %s' % (name, destination))
shutil.copy(source, destination)
os.chmod(destination, 0o664) | 0.001555 |
def parse_dict(value):
"""Parse a dict value from the tox config.
.. code-block: ini
[travis]
python =
2.7: py27, docs
3.5: py{35,36}
With this config, the value of ``python`` would be parsed
by this function, and would return::
{
'2.7': 'py27, docs',
'3.5': 'py{35,36}',
}
"""
lines = [line.strip() for line in value.strip().splitlines()]
pairs = [line.split(':', 1) for line in lines if line]
return dict((k.strip(), v.strip()) for k, v in pairs) | 0.00177 |
def char_offsets_to_xpaths(html, char_offsets):
'''Converts HTML and a sequence of char offsets to xpath offsets.
Returns a generator of :class:`streamcorpus.XpathRange` objects
in correspondences with the sequence of ``char_offsets`` given.
Namely, each ``XpathRange`` should address precisely the same text
as that ``char_offsets`` (sans the HTML).
Depending on how ``char_offsets`` was tokenized, it's possible that
some tokens cannot have their xpaths generated reliably. In this
case, a ``None`` value is yielded instead of a ``XpathRange``.
``char_offsets`` must be a sorted and non-overlapping sequence of
character ranges. They do not have to be contiguous.
'''
html = uni(html)
parser = XpathTextCollector()
prev_end = 0
prev_progress = True
for start, end in char_offsets:
if start == end:
# Zero length tokens shall have no quarter!
# Note that this is a special case. If we let zero-length tokens
# be handled normally, then it will be recorded as if the parser
# did not make any progress. But of course, there is no progress
# to be had!
yield None
continue
# If we didn't make any progress on the previous token, then we'll
# need to try and make progress before we can start tracking offsets
# again. Otherwise the parser will report incorrect offset info.
#
# (The parser can fail to make progress when tokens are split at
# weird boundaries, e.g., `&` followed by `;`. The parser won't
# make progress after `&` but will once `;` is given.)
#
# Here, we feed the parser one character at a time between where the
# last token ended and where the next token will start. In most cases,
# this will be enough to nudge the parser along. Once done, we can pick
# up where we left off and start handing out offsets again.
#
# If this still doesn't let us make progress, then we'll have to skip
# this token too.
if not prev_progress:
for i in xrange(prev_end, start):
parser.feed(html[i])
prev_end += 1
if parser.made_progress:
break
if not parser.made_progress:
yield None
continue
# Hand the parser everything from the end of the last token to the
# start of this one. Then ask for the Xpath, which should be at the
# start of `char_offsets`.
if prev_end < start:
parser.feed(html[prev_end:start])
if not parser.made_progress:
parser.feed(html[start:end])
prev_progress = parser.made_progress
prev_end = end
yield None
continue
xstart = parser.xpath_offset()
# print('START', xstart)
# Hand it the actual token and ask for the ending offset.
parser.feed(html[start:end])
xend = parser.xpath_offset()
# print('END', xend)
prev_end = end
# If we couldn't make progress then the xpaths generated are probably
# incorrect. (If the parser doesn't make progress, then we can't rely
# on the callbacks to have been called, which means we may not have
# captured all state correctly.)
#
# Therefore, we simply give up and claim this token is not addressable.
if not parser.made_progress:
prev_progress = False
yield None
else:
prev_progress = True
yield XpathRange(xstart[0], xstart[1], xend[0], xend[1])
parser.feed(html[prev_end:])
parser.close() | 0.000264 |
def post(self, request, *args, **kwargs):
""" Handles POST requests. """
return self.disapprove(request, *args, **kwargs) | 0.014599 |
def listen(self, addr=None):
"""Wait for a connection/reconnection from a DCC peer.
Returns the DCCConnection object.
The local IP address and port are available as
self.localaddress and self.localport. After connection from a
peer, the peer address and port are available as
self.peeraddress and self.peerport.
"""
self.buffer = buffer.LineBuffer()
self.handlers = {}
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.passive = True
default_addr = socket.gethostbyname(socket.gethostname()), 0
try:
self.socket.bind(addr or default_addr)
self.localaddress, self.localport = self.socket.getsockname()
self.socket.listen(10)
except socket.error as x:
raise DCCConnectionError("Couldn't bind socket: %s" % x)
return self | 0.002203 |
def listen(identifier):
"""
Launch a listener and return the compactor context.
"""
context = Context()
process = WebProcess(identifier)
context.spawn(process)
log.info("Launching PID %s", process.pid)
return process, context | 0.028571 |
def turn_off(self, channel, callback=None):
"""
Turn off switch.
:return: None
"""
if callback is None:
def callb():
"""No-op"""
pass
callback = callb
message = velbus.SwitchRelayOffMessage(self._address)
message.relay_channels = [channel]
self._controller.send(message, callback) | 0.004988 |
def _access_user_info(self):
"""
Vimeo requires the user ID to access the user info endpoint, so we need
to make two requests: one to get user ID and second to get user info.
"""
response = super(Vimeo, self)._access_user_info()
uid = response.data.get('oauth', {}).get('user', {}).get('id')
if uid:
return self.access('http://vimeo.com/api/v2/{0}/info.json'
.format(uid))
return response | 0.00404 |
def _string_to_int(s, state, region, base, signed, read_length=None):
"""
reads values from s and generates the symbolic number that it would equal
the first char is either a number in the given base, or the result is 0
expression indicates whether or not it was successful
"""
# if length wasn't provided, read the maximum bytes
length = state.libc.max_strtol_len if read_length is None else read_length
# expression whether or not it was valid at all
expression, _ = strtol._char_to_val(region.load(s, 1), base)
cases = []
# to detect overflows we keep it in a larger bv and extract it at the end
num_bits = min(state.arch.bits*2, 128)
current_val = state.solver.BVV(0, num_bits)
num_bytes = state.solver.BVS("num_bytes", state.arch.bits)
constraints_num_bytes = []
conditions = []
cutoff = False
# we need all the conditions to hold except the last one to have found a value
for i in range(length):
char = region.load(s + i, 1)
condition, value = strtol._char_to_val(char, base)
# if it was the end we'll get the current val
cases.append((num_bytes == i, current_val))
# identify the constraints necessary to set num_bytes to the current value
# the current char (i.e. the terminator if this is satisfied) should not be a char,
# so `condition` should be false, plus all the previous conditions should be satisfied
case_constraints = conditions + [state.solver.Not(condition)] + [num_bytes == i]
constraints_num_bytes.append(state.solver.And(*case_constraints))
# break the loop early if no value past this is viable
if condition.is_false():
cutoff = True # ???
break
# add the value and the condition
current_val = current_val*base + value.zero_extend(num_bits-8)
conditions.append(condition)
# the last one is unterminated, let's ignore it
if not cutoff:
cases.append((num_bytes == length, current_val))
case_constraints = conditions + [num_bytes == length]
constraints_num_bytes.append(state.solver.And(*case_constraints))
# only one of the constraints need to hold
# since the constraints look like (num_bytes == 2 and the first 2 chars are valid, and the 3rd isn't)
final_constraint = state.solver.Or(*constraints_num_bytes)
if final_constraint.op == '__eq__' and final_constraint.args[0] is num_bytes and not final_constraint.args[1].symbolic:
# CONCRETE CASE
result = cases[state.solver.eval(final_constraint.args[1])][1]
num_bytes = final_constraint.args[1]
else:
# symbolic case
state.add_constraints(final_constraint)
result = state.solver.ite_cases(cases, 0)
# overflow check
max_bits = state.arch.bits-1 if signed else state.arch.bits
max_val = 2**max_bits - 1
result = state.solver.If(result < max_val, state.solver.Extract(state.arch.bits-1, 0, result),
state.solver.BVV(max_val, state.arch.bits))
return expression, result, num_bytes | 0.004178 |
def _to_key_val_pairs(defs):
""" Helper to split strings, lists and dicts into (current, value) tuples for accumulation """
if isinstance(defs, STRING_TYPES):
# Convert 'a' to [('a', None)], or 'a.b.c' to [('a', 'b.c')]
return [defs.split('.', 1) if '.' in defs else (defs, None)]
else:
pairs = []
# Convert collections of strings or lists as above; break dicts into component items
pairs.extend(p for s in defs if isinstance(s, STRING_TYPES) for p in _to_key_val_pairs(s))
pairs.extend(p for l in defs if isinstance(l, list) for p in _to_key_val_pairs(l))
pairs.extend(p for d in defs if isinstance(d, dict) for p in iteritems(d))
return pairs | 0.009682 |
def getServiceJobsToStart(self, maxWait):
"""
:param float maxWait: Time in seconds to wait to get a job before returning.
:return: a tuple of (serviceJobStoreID, memory, cores, disk, ..) representing
a service job to start.
:rtype: toil.job.ServiceJobNode
"""
try:
serviceJob = self._serviceJobGraphsToStart.get(timeout=maxWait)
assert self.jobsIssuedToServiceManager >= 0
self.jobsIssuedToServiceManager -= 1
return serviceJob
except Empty:
return None | 0.00692 |
def create_image(self, instance_id, image_name, tag_list=None):
'''
method for imaging an instance on AWS EC2
:param instance_id: string with AWS id of running instance
:param image_name: string with name to give new image
:param tag_list: [optional] list of resources tags to add to image
:return: string with AWS id of image
'''
title = '%s.create_image' % self.__class__.__name__
# validate inputs
input_fields = {
'instance_id': instance_id,
'image_name': image_name,
'tag_list': tag_list
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# check instance state
self.iam.printer('Initiating image of instance %s.' % instance_id)
instance_state = self.check_instance_state(instance_id)
# stop instance
if instance_state == 'running':
self.iam.printer('Instance %s is %s.\nStopping instance %s to image it.' % (instance_id, instance_state, instance_id))
try:
response = self.connection.stop_instances(
InstanceIds=[ instance_id ]
)
instance_state = response['StoppingInstances'][0]['CurrentState']['Name']
except:
raise AWSConnectionError(title)
if instance_state == 'stopping':
from time import sleep
from timeit import timeit as timer
self.iam.printer('Instance %s is %s' % (instance_id, instance_state), flush=True)
delay = 3
while instance_state == 'stopping':
self.iam.printer('.', flush=True)
sleep(delay)
t3 = timer()
try:
response = self.connection.describe_instances(
InstanceIds=[ instance_id ]
)
except:
raise AWSConnectionError(title)
t4 = timer()
response_time = t4 - t3
if 3 - response_time > 0:
delay = 3 - response_time
else:
delay = 0
instance_state = response['Reservations'][0]['Instances'][0]['State']['Name']
self.iam.printer(' done.')
if instance_state != 'stopped':
raise Exception('Instance %s is currently in a state that cannot be imaged.' % instance_id)
# discover tags associated with instance
old_tags = []
try:
response = self.connection.describe_instances(
InstanceIds=[ instance_id ]
)
instance_tags = response['Reservations'][0]['Instances'][0]['Tags']
import re
aws_tag_pattern = re.compile('aws:')
for i in range(0, len(instance_tags)):
if not aws_tag_pattern.findall(instance_tags[i]['Key']):
tag = {}
tag['Key'] = instance_tags[i]['Key']
tag['Value'] = instance_tags[i]['Value']
old_tags.append(tag)
except:
raise AWSConnectionError(title)
# replace tag list if new tag input
new_tags = True
if not tag_list:
tag_list = self.iam.ingest(old_tags)
new_tags = False
# create image of the instance
try:
response = self.connection.create_image(
InstanceId=instance_id,
Name=image_name
)
image_id = response['ImageId']
self.iam.printer('Image %s is being created.' % image_name)
except:
raise AWSConnectionError(title)
# add tags to image
self.tag_image(image_id, tag_list)
if new_tags:
self.iam.printer('Tags from input have been added to image %s.' % image_id)
else:
self.iam.printer('Instance %s tags have been added to image %s.' % (instance_id, image_id))
# restart instance
try:
self.connection.start_instances(
InstanceIds=[ instance_id ]
)
self.iam.printer('Restarting instance %s now.' % instance_id)
except:
raise AWSConnectionError
return image_id | 0.004931 |
def __getDBNameForVersion(cls, dbVersion):
""" Generates the ClientJobs database name for the given version of the
database
Parameters:
----------------------------------------------------------------
dbVersion: ClientJobs database version number
retval: the ClientJobs database name for the given DB version
"""
# DB Name prefix for the given version
prefix = cls.__getDBNamePrefixForVersion(dbVersion)
# DB Name suffix
suffix = Configuration.get('nupic.cluster.database.nameSuffix')
# Replace dash and dot with underscore (e.g. 'ec2-user' or ec2.user will break SQL)
suffix = suffix.replace("-", "_")
suffix = suffix.replace(".", "_")
# Create the name of the database for the given DB version
dbName = '%s_%s' % (prefix, suffix)
return dbName | 0.002401 |
def _copy(self, axis=True, attr=True, data=False):
"""Create a new instance of Data, but does not copy the data
necessarily.
Parameters
----------
axis : bool, optional
deep copy the axes (default: True)
attr : bool, optional
deep copy the attributes (default: True)
data : bool, optional
deep copy the data (default: False)
Returns
-------
instance of Data (or ChanTime, ChanFreq, ChanTimeFreq)
copy of the data, but without the actual data
Notes
-----
It's important that we copy all the relevant information here. If you
add new attributes, you should add them here.
Remember that it deep-copies all the information, so if you copy data
the size might become really large.
"""
cdata = type(self)() # create instance of the same class
cdata.s_freq = self.s_freq
cdata.start_time = self.start_time
if axis:
cdata.axis = deepcopy(self.axis)
else:
cdata_axis = OrderedDict()
for axis_name in self.axis:
cdata_axis[axis_name] = array([], dtype='O')
cdata.axis = cdata_axis
if attr:
cdata.attr = deepcopy(self.attr)
if data:
cdata.data = deepcopy(self.data)
else:
# empty data with the correct number of trials
cdata.data = empty(self.number_of('trial'), dtype='O')
return cdata | 0.001287 |
def update_keyboard_mapping(conn, e):
"""
Whenever the keyboard mapping is changed, this function needs to be called
to update xpybutil's internal representing of the current keysym table.
Indeed, xpybutil will do this for you automatically.
Moreover, if something is changed that affects the current keygrabs,
xpybutil will initiate a regrab with the changed keycode.
:param e: The MappingNotify event.
:type e: xcb.xproto.MappingNotifyEvent
:rtype: void
"""
global __kbmap, __keysmods
newmap = get_keyboard_mapping(conn).reply()
if e is None:
__kbmap = newmap
__keysmods = get_keys_to_mods(conn)
return
if e.request == xproto.Mapping.Keyboard:
changes = {}
for kc in range(*get_min_max_keycode(conn)):
knew = get_keysym(kc, kbmap=newmap)
oldkc = get_keycode(conn, knew)
if oldkc != kc:
changes[oldkc] = kc
__kbmap = newmap
__regrab(changes)
elif e.request == xproto.Mapping.Modifier:
__keysmods = get_keys_to_mods() | 0.00091 |
def get_fact_by_id(self, fact_id):
""" Obtains fact data by it's id.
As the fact is unique, it returns a tuple like:
(activity_id, start_time, end_time, description).
If there is no fact with id == fact_id, a NoHamsterData
exception will be raise
"""
columns = 'activity_id, start_time, end_time, description'
query = "SELECT %s FROM facts WHERE id = %s"
result = self._query(query % (columns, fact_id))
if result:
return result[0] # there only one fact with the id
else:
raise NoHamsterData('facts', fact_id) | 0.003195 |
def newBuild(self, requests):
"""Create a new Build instance.
@param requests: a list of buildrequest dictionaries describing what is
to be built
"""
b = self.buildClass(requests)
b.useProgress = self.useProgress
b.workdir = self.workdir
b.setStepFactories(self.steps)
return b | 0.005714 |
def login(self):
"""
Logs into Reddit in order to display a personalised front page.
"""
data = {'user': self.options['username'], 'passwd':
self.options['password'], 'api_type': 'json'}
response = self.client.post('http://www.reddit.com/api/login', data=data)
self.client.modhash = response.json()['json']['data']['modhash'] | 0.007712 |
def generate_snapshot(self, filename='snapshot.zip'):
"""
Generate and retrieve a policy snapshot from the engine
This is blocking as file is downloaded
:param str filename: name of file to save file to, including directory
path
:raises EngineCommandFailed: snapshot failed, possibly invalid filename
specified
:return: None
"""
try:
self.make_request(
EngineCommandFailed,
resource='generate_snapshot',
filename=filename)
except IOError as e:
raise EngineCommandFailed(
'Generate snapshot failed: {}'.format(e)) | 0.002865 |
def load_friends(self):
"""Fetches the MAL user friends page and sets the current user's friends attributes.
:rtype: :class:`.User`
:return: Current user object.
"""
user_friends = self.session.session.get(u'http://myanimelist.net/profile/' + utilities.urlencode(self.username) + u'/friends').text
self.set(self.parse_friends(utilities.get_clean_dom(user_friends)))
return self | 0.007371 |
def get_exptime(self, img):
"""Obtain EXPTIME"""
header = self.get_header(img)
if 'EXPTIME' in header.keys():
etime = header['EXPTIME']
elif 'EXPOSED' in header.keys():
etime = header['EXPOSED']
else:
etime = 1.0
return etime | 0.006452 |
def RegisterTextKey(cls, key, atomid):
"""Register a text key.
If the key you need to register is a simple one-to-one mapping
of MP4 atom name to EasyMP4Tags key, then you can use this
function::
EasyMP4Tags.RegisterTextKey("artist", "\xa9ART")
"""
def getter(tags, key):
return tags[atomid]
def setter(tags, key, value):
tags[atomid] = value
def deleter(tags, key):
del(tags[atomid])
cls.RegisterKey(key, getter, setter, deleter) | 0.003604 |
def get_url_authcode_flow_user(client_id, redirect_uri, display="page", scope=None, state=None):
"""Authorization Code Flow for User Access Token
Use Authorization Code Flow to run VK API methods from the server side of an application.
Access token received this way is not bound to an ip address but set of permissions that can be granted is limited for security reasons.
Args:
client_id (int): Application id.
redirect_uri (str): Address to redirect user after authorization.
display (str): Sets authorization page appearance.
Sets: {`page`, `popup`, `mobile`}
Defaults to `page`
scope (:obj:`str`, optional): Permissions bit mask, to check on authorization and request if necessary.
More scope: https://vk.com/dev/permissions
state (:obj:`str`, optional): An arbitrary string that will be returned together with authorization result.
Returns:
str: Url
Examples:
>>> vk.get_url_authcode_flow_user(1, 'http://example.com/', scope="wall,email")
'https://oauth.vk.com/authorize?client_id=1&display=page&redirect_uri=http://example.com/&scope=wall,email&response_type=code
.. _Docs:
https://vk.com/dev/authcode_flow_user
"""
url = "https://oauth.vk.com/authorize"
params = {
"client_id": client_id,
"redirect_uri": redirect_uri,
"display": display,
"response_type": "code"
}
if scope:
params['scope'] = scope
if state:
params['state'] = state
return u"{url}?{params}".format(url=url, params=urlencode(params)) | 0.0043 |
def create(self, argv):
"""Create a search job."""
opts = cmdline(argv, FLAGS_CREATE)
if len(opts.args) != 1:
error("Command requires a search expression", 2)
query = opts.args[0]
job = self.service.jobs.create(opts.args[0], **opts.kwargs)
print(job.sid) | 0.006369 |
def find_undeclared_variables(ast):
"""Returns a set of all variables in the AST that will be looked up from
the context at runtime. Because at compile time it's not known which
variables will be used depending on the path the execution takes at
runtime, all variables are returned.
>>> from jinja2 import Environment, meta
>>> env = Environment()
>>> ast = env.parse('{% set foo = 42 %}{{ bar + foo }}')
>>> meta.find_undeclared_variables(ast) == set(['bar'])
True
.. admonition:: Implementation
Internally the code generator is used for finding undeclared variables.
This is good to know because the code generator might raise a
:exc:`TemplateAssertionError` during compilation and as a matter of
fact this function can currently raise that exception as well.
"""
codegen = TrackingCodeGenerator(ast.environment)
codegen.visit(ast)
return codegen.undeclared_identifiers | 0.001043 |
def get_token(self):
"""Performs Neurio API token authentication using provided key and secret.
Note:
This method is generally not called by hand; rather it is usually
called as-needed by a Neurio Client object.
Returns:
string: the access token
"""
if self.__token is not None:
return self.__token
url = "https://api.neur.io/v1/oauth2/token"
creds = b64encode(":".join([self.__key,self.__secret]).encode()).decode()
headers = {
"Authorization": " ".join(["Basic", creds]),
}
payload = {
"grant_type": "client_credentials"
}
r = requests.post(url, data=payload, headers=headers)
self.__token = r.json()["access_token"]
return self.__token | 0.004065 |
def send_explode(self):
"""
In earlier versions of the game, sending this caused your cells
to split into lots of small cells and die.
"""
self.send_struct('<B', 20)
self.player.own_ids.clear()
self.player.cells_changed()
self.ingame = False
self.subscriber.on_death() | 0.005882 |
def storeServiceSpecialCase(st, pups):
"""
Adapt a store to L{IServiceCollection}.
@param st: The L{Store} to adapt.
@param pups: A list of L{IServiceCollection} powerups on C{st}.
@return: An L{IServiceCollection} which has all of C{pups} as children.
"""
if st.parent is not None:
# If for some bizarre reason we're starting a substore's service, let's
# just assume that its parent is running its upgraders, rather than
# risk starting the upgrader run twice. (XXX: it *IS* possible to
# figure out whether we need to or not, I just doubt this will ever
# even happen in practice -- fix here if it does)
return serviceSpecialCase(st, pups)
if st._axiom_service is not None:
# not new, don't add twice.
return st._axiom_service
collection = serviceSpecialCase(st, pups)
st._upgradeService.setServiceParent(collection)
if st.dbdir is not None:
from axiom import batch
batcher = batch.BatchProcessingControllerService(st)
batcher.setServiceParent(collection)
scheduler = iaxiom.IScheduler(st)
# If it's an old database, we might get a SubScheduler instance. It has no
# setServiceParent method.
setServiceParent = getattr(scheduler, 'setServiceParent', None)
if setServiceParent is not None:
setServiceParent(collection)
return collection | 0.000709 |
def get_resource_cache(resourceid):
"""
Get a cached dictionary related to an individual resourceid.
:param resourceid: String resource id.
:return: dict
"""
if not resourceid:
raise ResourceInitError("Resource id missing")
if not DutInformationList._cache.get(resourceid):
DutInformationList._cache[resourceid] = dict()
return DutInformationList._cache[resourceid] | 0.004405 |
def _generateAlias(self):
"""Return an unused auth level alias"""
for i in range(1000):
alias = 'cust%d' % (i, )
if alias not in self.auth_level_aliases:
return alias
raise RuntimeError('Could not find an unused alias (tried 1000!)') | 0.006711 |
def to_fastq_str(self):
"""
:return: string representation of this NGS read in FastQ format
"""
return "@" + self.name + "\n" + self.sequenceData +\
"\n" + "+" + self.name + "\n" + self.seq_qual | 0.004525 |
def answerReceived(self, value, originalValue,
originalSender, originalTarget):
"""
An answer was received. Dispatch to the appropriate answer responder,
i.e. a method on this object exposed with L{answerMethod.expose}.
@see IDeliveryConsequence.answerReceived
"""
if value.type != AMP_ANSWER_TYPE:
raise UnknownMessageType()
commandName = self._boxFromData(originalValue.data)[COMMAND]
rawArgs = self._boxFromData(value.data)
placeholder = _ProtocolPlaceholder(originalSender, originalTarget)
if ERROR in rawArgs:
thunk = errorMethod.errbackForName(self, commandName, rawArgs[ERROR])
thunk(Failure(thunk.exception()))
else:
thunk = answerMethod.responderForName(self, commandName)
arguments = thunk.command.parseResponse(rawArgs, placeholder)
thunk(**arguments) | 0.004224 |
def load_history(self):
"""Load upgrade history from database table.
If upgrade table does not exists, the history is assumed to be empty.
"""
if not self.history:
query = Upgrade.query.order_by(desc(Upgrade.applied))
for u in query.all():
self.history[u.upgrade] = u.applied
self.ordered_history.append(u.upgrade) | 0.00495 |
def run_bottleneck_on_image(sess, image_data, image_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor):
"""Runs inference on an image to extract the 'bottleneck' summary layer.
Args:
sess: Current active TensorFlow Session.
image_data: String of raw JPEG data.
image_data_tensor: Input data layer in the graph.
decoded_image_tensor: Output of initial image resizing and preprocessing.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: Layer before the final softmax.
Returns:
Numpy array of bottleneck values.
"""
# First decode the JPEG image, resize it, and rescale the pixel values.
resized_input_values = sess.run(decoded_image_tensor,
{image_data_tensor: image_data})
# Then run it through the recognition network.
bottleneck_values = sess.run(bottleneck_tensor,
{resized_input_tensor: resized_input_values})
bottleneck_values = np.squeeze(bottleneck_values)
return bottleneck_values | 0.007168 |
def cylrec(r, lon, z):
"""
Convert from cylindrical to rectangular coordinates.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/cylrec_c.html
:param r: Distance of a point from z axis.
:type r: float
:param lon: Angle (radians) of a point from xZ plane.
:type lon: float
:param z: Height of a point above xY plane.
:type z: float
:return: Rectangular coordinates of the point.
:rtype: 3-Element Array of floats
"""
r = ctypes.c_double(r)
lon = ctypes.c_double(lon)
z = ctypes.c_double(z)
rectan = stypes.emptyDoubleVector(3)
libspice.cylrec_c(r, lon, z, rectan)
return stypes.cVectorToPython(rectan) | 0.001468 |
def getsig_by_symbol_name(self, name: str) -> Signature:
""" Retrieve the unique Signature of a symbol.
Fail if the Signature is not unique
"""
subscope = self.get_by_symbol_name(name)
if len(subscope) != 1:
raise KeyError("%s have multiple candidates in scope" % name)
v = list(subscope.values())
return v[0] | 0.005305 |
def cleanUpPparse(outputpath, rawfilename, mgf=False):
"""Delete temporary files generated by pparse, including the filetypes
".csv", ".ms1", ".ms2", ".xtract", the files "pParsePlusLog.txt" and
"pParse.para" and optionally also the ".mgf" file generated by pParse.
.. warning:
When the parameter "mgf" is set to "True" all files ending with ".mgf"
and containing the specified "filename" are deleted. This could
potentially also affect MGF files not generated by pParse.
:param outputpath: path to the output directory of pParse
:param rawfilename: filename of the thermo ".raw" file
:param mgf: bool, if True the ".mgf" file generated by pParse is also
removed
"""
extensions = ['csv', 'ms1', 'ms2', 'xtract']
filename, fileext = os.path.splitext(os.path.basename(rawfilename))
additionalFiles = [aux.joinpath(outputpath, 'pParsePlusLog.txt'),
aux.joinpath(outputpath, filename+'.pparse.para'),
]
for ext in extensions:
filepath = aux.joinpath(outputpath, '.'.join([filename, ext]))
if os.path.isfile(filepath):
print('Removing file: ', filepath)
os.remove(filepath)
for filepath in additionalFiles:
if os.path.isfile(filepath):
print('Removing file: ', filepath)
os.remove(filepath)
if mgf:
for _filename in os.listdir(outputpath):
_basename, _fileext = os.path.splitext(_filename)
if _fileext.lower() != '.mgf':
continue
if _basename.find(basename) != -1 and _basename != basename:
filepath = aux.joinpath(outputpath, _filename)
print('Removing file: ', filepath)
os.remove(filepath) | 0.000554 |
def _TTA(learn:Learner, beta:float=0.4, scale:float=1.35, ds_type:DatasetType=DatasetType.Valid, with_loss:bool=False) -> Tensors:
"Applies TTA to predict on `ds_type` dataset."
preds,y = learn.get_preds(ds_type)
all_preds = list(learn.tta_only(scale=scale, ds_type=ds_type))
avg_preds = torch.stack(all_preds).mean(0)
if beta is None: return preds,avg_preds,y
else:
final_preds = preds*beta + avg_preds*(1-beta)
if with_loss:
with NoneReduceOnCPU(learn.loss_func) as lf: loss = lf(final_preds, y)
return final_preds, y, loss
return final_preds, y | 0.036392 |
def populate_resource_columns(item_dict):
"""Operates on item_dict
Promotes the resource_name and resource_type fields to the
top-level of the serialization so they can be printed as columns.
Also makes a copies name field to type, which is a default column."""
item_dict['type'] = item_dict['name']
if len(item_dict['summary_fields']) == 0:
# Singleton roles omit these fields
item_dict['resource_name'] = None
item_dict['resource_type'] = None
else:
sf = item_dict['summary_fields']
# Explination of fallback state:
# The situation where resource_name or resource_type is not present
# should not be seen for singleton roles, and where it is seen,
# there may be a problem with data quality on the server
item_dict['resource_name'] = sf.get('resource_name', '[unknown]')
item_dict['resource_type'] = sf.get('resource_type', '[unknown]') | 0.00197 |
def getMetrics(self):
"""
Gets the current metric values
:returns: (dict) where each key is the metric-name, and the values are
it scalar value. Same as the output of
:meth:`~nupic.frameworks.opf.prediction_metrics_manager.MetricsManager.update`
"""
result = {}
for metricObj, label in zip(self.__metrics, self.__metricLabels):
value = metricObj.getMetric()
result[label] = value['value']
return result | 0.010504 |
def course_discovery(request):
"""
Search for courses
Args:
request (required) - django request object
Returns:
http json response with the following fields
"took" - how many seconds the operation took
"total" - how many results were found
"max_score" - maximum score from these resutls
"results" - json array of result documents
or
"error" - displayable information about an error that occured on the server
POST Params:
"search_string" (optional) - text with which to search for courses
"page_size" (optional)- how many results to return per page (defaults to 20, with maximum cutoff at 100)
"page_index" (optional) - for which page (zero-indexed) to include results (defaults to 0)
"""
results = {
"error": _("Nothing to search")
}
status_code = 500
search_term = request.POST.get("search_string", None)
try:
size, from_, page = _process_pagination_values(request)
field_dictionary = _process_field_values(request)
# Analytics - log search request
track.emit(
'edx.course_discovery.search.initiated',
{
"search_term": search_term,
"page_size": size,
"page_number": page,
}
)
results = course_discovery_search(
search_term=search_term,
size=size,
from_=from_,
field_dictionary=field_dictionary,
)
# Analytics - log search results before sending to browser
track.emit(
'edx.course_discovery.search.results_displayed',
{
"search_term": search_term,
"page_size": size,
"page_number": page,
"results_count": results["total"],
}
)
status_code = 200
except ValueError as invalid_err:
results = {
"error": six.text_type(invalid_err)
}
log.debug(six.text_type(invalid_err))
except QueryParseError:
results = {
"error": _('Your query seems malformed. Check for unmatched quotes.')
}
# Allow for broad exceptions here - this is an entry point from external reference
except Exception as err: # pylint: disable=broad-except
results = {
"error": _('An error occurred when searching for "{search_string}"').format(search_string=search_term)
}
log.exception(
'Search view exception when searching for %s for user %s: %r',
search_term,
request.user.id,
err
)
return JsonResponse(results, status=status_code) | 0.002527 |
def _get_thumbnail_filename(filename, append_text="-thumbnail"):
"""
Returns a thumbnail version of the file name.
"""
name, ext = os.path.splitext(filename)
return ''.join([name, append_text, ext]) | 0.004587 |
def add(self, name, assumer, valuation, template_types, template_dests, template_start_standards, template_start_fees, template_add_standards, template_add_fees, session):
'''taobao.delivery.template.add 新增运费模板
新增运费模板'''
request = TOPRequest('taobao.delivery.template.add')
request['name'] = name
request['assumer'] = assumer
request['valuation'] = valuation
request['template_types'] = template_types
request['template_dests'] = template_dests
request['template_start_standards'] = template_start_standards
request['template_start_fees'] = template_start_fees
request['template_add_standards'] = template_add_standards
request['template_add_fees'] = template_add_fees
self.create(self.execute(request, session)['delivery_template'])
return self | 0.004619 |
def show_network(self):
"""!
@brief Shows connections in the network. It supports only 2-d and 3-d representation.
"""
if ( (self._ccore_network_pointer is not None) and (self._osc_conn is None) ):
self._osc_conn = sync_connectivity_matrix(self._ccore_network_pointer);
dimension = len(self._osc_loc[0]);
if ( (dimension != 3) and (dimension != 2) ):
raise NameError('Network that is located in different from 2-d and 3-d dimensions can not be represented');
from matplotlib.font_manager import FontProperties;
from matplotlib import rcParams;
rcParams['font.sans-serif'] = ['Arial'];
rcParams['font.size'] = 12;
fig = plt.figure();
axes = None;
if (dimension == 2):
axes = fig.add_subplot(111);
elif (dimension == 3):
axes = fig.gca(projection='3d');
surface_font = FontProperties();
surface_font.set_name('Arial');
surface_font.set_size('12');
for i in range(0, self._num_osc, 1):
if (dimension == 2):
axes.plot(self._osc_loc[i][0], self._osc_loc[i][1], 'bo');
if (self._conn_represent == conn_represent.MATRIX):
for j in range(i, self._num_osc, 1): # draw connection between two points only one time
if (self.has_connection(i, j) == True):
axes.plot([self._osc_loc[i][0], self._osc_loc[j][0]], [self._osc_loc[i][1], self._osc_loc[j][1]], 'b-', linewidth = 0.5);
else:
for j in self.get_neighbors(i):
if ( (self.has_connection(i, j) == True) and (i > j) ): # draw connection between two points only one time
axes.plot([self._osc_loc[i][0], self._osc_loc[j][0]], [self._osc_loc[i][1], self._osc_loc[j][1]], 'b-', linewidth = 0.5);
elif (dimension == 3):
axes.scatter(self._osc_loc[i][0], self._osc_loc[i][1], self._osc_loc[i][2], c = 'b', marker = 'o');
if (self._conn_represent == conn_represent.MATRIX):
for j in range(i, self._num_osc, 1): # draw connection between two points only one time
if (self.has_connection(i, j) == True):
axes.plot([self._osc_loc[i][0], self._osc_loc[j][0]], [self._osc_loc[i][1], self._osc_loc[j][1]], [self._osc_loc[i][2], self._osc_loc[j][2]], 'b-', linewidth = 0.5);
else:
for j in self.get_neighbors(i):
if ( (self.has_connection(i, j) == True) and (i > j) ): # draw connection between two points only one time
axes.plot([self._osc_loc[i][0], self._osc_loc[j][0]], [self._osc_loc[i][1], self._osc_loc[j][1]], [self._osc_loc[i][2], self._osc_loc[j][2]], 'b-', linewidth = 0.5);
plt.grid();
plt.show(); | 0.023125 |
def list_mapped_classes():
"""
Returns all the rdfclasses that have and associated elasticsearch
mapping
Args:
None
"""
cls_dict = {key: value
for key, value in MODULE.rdfclass.__dict__.items()
if not isinstance(value, RdfConfigManager)
and key not in ['properties']
and hasattr(value, 'es_defs')
and value.es_defs.get('kds_esIndex')}
new_dict = {}
# remove items that are appearing as a subclass of a main mapping class
# the intersion of the set of the cls_dict values and the a classes
# individual hierarchy will be >1 if the class is a subclass of another
# class in the list
potential_maps = set([cls_.__name__ for cls_ in cls_dict.values()])
for name, cls_ in cls_dict.items():
parents = set(cls_.hierarchy)
if len(parents.intersection(potential_maps)) <= 1:
new_dict[name] = cls_
return new_dict | 0.001828 |
def _prepare_atoms(topology, compute_cycles=False):
"""Compute cycles and add white-/blacklists to atoms."""
atom1 = next(topology.atoms())
has_whitelists = hasattr(atom1, 'whitelist')
has_cycles = hasattr(atom1, 'cycles')
compute_cycles = compute_cycles and not has_cycles
if compute_cycles or not has_whitelists:
for atom in topology.atoms():
if compute_cycles:
atom.cycles = set()
if not has_whitelists:
atom.whitelist = OrderedSet()
atom.blacklist = OrderedSet()
if compute_cycles:
bond_graph = nx.Graph()
bond_graph.add_nodes_from(topology.atoms())
bond_graph.add_edges_from(topology.bonds())
all_cycles = _find_chordless_cycles(bond_graph, max_cycle_size=8)
for atom, cycles in zip(bond_graph.nodes, all_cycles):
for cycle in cycles:
atom.cycles.add(tuple(cycle)) | 0.001056 |
def build_blast_cmd(self, fname, dbname):
"""Return BLASTN command"""
return self.funcs.blastn_func(fname, dbname, self.outdir, self.exes.blast_exe) | 0.018293 |
def _cnf(lexer, varname):
"""Return a DIMACS CNF."""
_expect_token(lexer, {KW_p})
_expect_token(lexer, {KW_cnf})
nvars = _expect_token(lexer, {IntegerToken}).value
nclauses = _expect_token(lexer, {IntegerToken}).value
return _cnf_formula(lexer, varname, nvars, nclauses) | 0.003401 |
def populate_resource_list(self):
"""Populate the list resource list.
"""
minimum_needs = self.minimum_needs.get_full_needs()
for full_resource in minimum_needs["resources"]:
self.add_resource(full_resource)
self.provenance.setText(minimum_needs["provenance"]) | 0.00641 |
def makeThumbnail(cls, inputFile, person, format, smaller):
"""
Make a thumbnail of a mugshot image and store it on disk.
@param inputFile: The image to thumbnail.
@type inputFile: C{file}
@param person: The person this mugshot thumbnail is associated with.
@type person: L{Person}
@param format: The format of the data in C{inputFile}.
@type format: C{str} (e.g. I{jpeg})
@param smaller: Thumbnails are available in two sizes. if C{smaller}
is C{True}, then the thumbnail will be in the smaller of the two
sizes.
@type smaller: C{bool}
@return: path to the thumbnail.
@rtype: L{twisted.python.filepath.FilePath}
"""
dirsegs = ['mugshots', str(person.storeID)]
if smaller:
dirsegs.insert(1, 'smaller')
size = cls.smallerSize
else:
size = cls.size
atomicOutputFile = person.store.newFile(*dirsegs)
makeThumbnail(inputFile, atomicOutputFile, size, format)
atomicOutputFile.close()
return atomicOutputFile.finalpath | 0.00177 |
def save_x509s(self, x509s):
"""Saves the x509 objects to the paths known by this bundle"""
for file_type in TLSFileType:
if file_type.value in x509s:
x509 = x509s[file_type.value]
if file_type is not TLSFileType.CA:
# persist this key or cert to disk
tlsfile = getattr(self, file_type.value)
if tlsfile:
tlsfile.save(x509) | 0.004274 |
def add_cylinder(self, name, position, sizes, mass, precision=[10, 10]):
""" Add Cylinder """
self._create_pure_shape(2, 239, sizes, mass, precision)
self.set_object_position("Cylinder", position)
self.change_object_name("Cylinder", name) | 0.007407 |
def setSignalHeaders(self, signalHeaders):
"""
Sets the parameter for all signals
Parameters
----------
signalHeaders : array_like
containing dict with
'label' : str
channel label (string, <= 16 characters, must be unique)
'dimension' : str
physical dimension (e.g., mV) (string, <= 8 characters)
'sample_rate' : int
sample frequency in hertz
'physical_max' : float
maximum physical value
'physical_min' : float
minimum physical value
'digital_max' : int
maximum digital value (-2**15 <= x < 2**15)
'digital_min' : int
minimum digital value (-2**15 <= x < 2**15)
"""
for edfsignal in np.arange(self.n_channels):
self.channels[edfsignal] = signalHeaders[edfsignal]
self.update_header() | 0.003759 |
def parse_xml_node(self, node):
'''Parse an xml.dom Node object representing a configuration set into
this object.
'''
self.id = node.getAttributeNS(RTS_NS, 'id')
self._config_data = []
for d in node.getElementsByTagNameNS(RTS_NS, 'ConfigurationData'):
self._config_data.append(ConfigurationData().parse_xml_node(d))
return self | 0.005038 |
def window(self, window):
"""
This method does the following:
1. Switches to the given window (it can be located by window instance/lambda/string).
2. Executes the given block (within window located at previous step).
3. Switches back (this step will be invoked even if exception happens at second step).
Args:
window (Window | lambda): The desired :class:`Window`, or a lambda that will be run in
the context of each open window and returns ``True`` for the desired window.
"""
original = self.current_window
if window != original:
self.switch_to_window(window)
self._scopes.append(None)
try:
yield
finally:
self._scopes.pop()
if original != window:
self.switch_to_window(original) | 0.006873 |
def transform(source):
'''Used to convert the source code, making use of known transformers.
"transformers" are modules which must contain a function
transform_source(source)
which returns a tranformed source.
Some transformers (for example, those found in the standard library
module lib2to3) cannot cope with non-standard syntax; as a result, they
may fail during a first attempt. We keep track of all failing
transformers and keep retrying them until either they all succeeded
or a fixed set of them fails twice in a row.
'''
source = extract_transformers_from_source(source)
# Some transformer fail when multiple non-Python constructs
# are present. So, we loop multiple times keeping track of
# which transformations have been unsuccessfully performed.
not_done = transformers
while True:
failed = {}
for name in not_done:
tr_module = import_transformer(name)
try:
source = tr_module.transform_source(source)
except Exception as e:
failed[name] = tr_module
# from traceback import print_exc
# print("Unexpected exception in transforms.transform",
# e.__class__.__name__)
# print_exc()
if not failed:
break
# Insanity is doing the same Tting over and overaAgain and
# expecting different results ...
# If the exact same set of transformations are not performed
# twice in a row, there is no point in trying out a third time.
if failed == not_done:
print("Warning: the following transforms could not be done:")
for key in failed:
print(key)
break
not_done = failed # attempt another pass
return source | 0.000532 |
def sign_attribute_query(self, statement, **kwargs):
"""Sign a SAML attribute query.
See sign_statement() for the kwargs.
:param statement: The statement to be signed
:return: The signed statement
"""
return self.sign_statement(
statement, class_name(samlp.AttributeQuery()), **kwargs) | 0.005698 |
def hashing_trick(X_in, hashing_method='md5', N=2, cols=None, make_copy=False):
"""A basic hashing implementation with configurable dimensionality/precision
Performs the hashing trick on a pandas dataframe, `X`, using the hashing method from hashlib
identified by `hashing_method`. The number of output dimensions (`N`), and columns to hash (`cols`) are
also configurable.
Parameters
----------
X_in: pandas dataframe
description text
hashing_method: string, optional
description text
N: int, optional
description text
cols: list, optional
description text
make_copy: bool, optional
description text
Returns
-------
out : dataframe
A hashing encoded dataframe.
References
----------
Cite the relevant literature, e.g. [1]_. You may also cite these
references in the notes section above.
.. [1] Kilian Weinberger; Anirban Dasgupta; John Langford; Alex Smola; Josh Attenberg (2009). Feature Hashing
for Large Scale Multitask Learning. Proc. ICML.
"""
try:
if hashing_method not in hashlib.algorithms_available:
raise ValueError('Hashing Method: %s Not Available. Please use one from: [%s]' % (
hashing_method,
', '.join([str(x) for x in hashlib.algorithms_available])
))
except Exception as e:
try:
_ = hashlib.new(hashing_method)
except Exception as e:
raise ValueError('Hashing Method: %s Not Found.')
if make_copy:
X = X_in.copy(deep=True)
else:
X = X_in
if cols is None:
cols = X.columns.values
def hash_fn(x):
tmp = [0 for _ in range(N)]
for val in x.values:
if val is not None:
hasher = hashlib.new(hashing_method)
if sys.version_info[0] == 2:
hasher.update(str(val))
else:
hasher.update(bytes(str(val), 'utf-8'))
tmp[int(hasher.hexdigest(), 16) % N] += 1
return pd.Series(tmp, index=new_cols)
new_cols = ['col_%d' % d for d in range(N)]
X_cat = X.loc[:, cols]
X_num = X.loc[:, [x for x in X.columns.values if x not in cols]]
X_cat = X_cat.apply(hash_fn, axis=1)
X_cat.columns = new_cols
X = pd.concat([X_cat, X_num], axis=1)
return X | 0.002636 |
def get_records(self, start_time, end_time, msgid=1,
number=10000):
"""
获取客服聊天记录
:param start_time: 查询开始时间,UNIX 时间戳
:param end_time: 查询结束时间,UNIX 时间戳,每次查询不能跨日查询
:param msgid: 消息id顺序从小到大,从1开始
:param number: 每次获取条数,最多10000条
:return: 返回的 JSON 数据包
"""
if isinstance(start_time, datetime.datetime):
start_time = time.mktime(start_time.timetuple())
if isinstance(end_time, datetime.datetime):
end_time = time.mktime(end_time.timetuple())
record_data = {
'starttime': int(start_time),
'endtime': int(end_time),
'msgid': msgid,
'number': number
}
res = self._post(
'https://api.weixin.qq.com/customservice/msgrecord/getmsglist',
data=record_data,
)
return res | 0.003378 |
def _populate(self, json):
"""
A helper method that, given a JSON object representing this object,
assigns values based on the properties dict and the attributes of
its Properties.
"""
if not json:
return
# hide the raw JSON away in case someone needs it
self._set('_raw_json', json)
for key in json:
if key in (k for k in type(self).properties.keys()
if not type(self).properties[k].identifier):
if type(self).properties[key].relationship \
and not json[key] is None:
if isinstance(json[key], list):
objs = []
for d in json[key]:
if not 'id' in d:
continue
new_class = type(self).properties[key].relationship
obj = new_class.make_instance(d['id'],
getattr(self,'_client'))
if obj:
obj._populate(d)
objs.append(obj)
self._set(key, objs)
else:
if isinstance(json[key], dict):
related_id = json[key]['id']
else:
related_id = json[key]
new_class = type(self).properties[key].relationship
obj = new_class.make_instance(related_id, getattr(self,'_client'))
if obj and isinstance(json[key], dict):
obj._populate(json[key])
self._set(key, obj)
elif type(self).properties[key].slug_relationship \
and not json[key] is None:
# create an object of the expected type with the given slug
self._set(key, type(self).properties[key].slug_relationship(self._client, json[key]))
elif type(json[key]) is dict:
self._set(key, MappedObject(**json[key]))
elif type(json[key]) is list:
# we're going to use MappedObject's behavior with lists to
# expand these, then grab the resulting value to set
mapping = MappedObject(_list=json[key])
self._set(key, mapping._list) # pylint: disable=no-member
elif type(self).properties[key].is_datetime:
try:
t = time.strptime(json[key], DATE_FORMAT)
self._set(key, datetime.fromtimestamp(time.mktime(t)))
except:
#TODO - handle this better (or log it?)
self._set(key, json[key])
else:
self._set(key, json[key])
self._set('_populated', True)
self._set('_last_updated', datetime.now()) | 0.004589 |
def is_tuple(node):
"""Does the node represent a tuple literal?"""
if isinstance(node, Node) and node.children == [LParen(), RParen()]:
return True
return (isinstance(node, Node)
and len(node.children) == 3
and isinstance(node.children[0], Leaf)
and isinstance(node.children[1], Node)
and isinstance(node.children[2], Leaf)
and node.children[0].value == u"("
and node.children[2].value == u")") | 0.002058 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.