code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def get_review_requests(self):
"""
:calls: `GET /repos/:owner/:repo/pulls/:number/requested_reviewers <https://developer.github.com/v3/pulls/review_requests/>`_
:rtype: tuple of :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser` and of :class:`github.PaginatedList.PaginatedList` of :class:`github.Team.Team`
"""
return (
github.PaginatedList.PaginatedList(
github.NamedUser.NamedUser,
self._requester,
self.url + "/requested_reviewers",
None,
list_item='users'
),
github.PaginatedList.PaginatedList(
github.Team.Team,
self._requester,
self.url + "/requested_reviewers",
None,
list_item='teams'
)
) | :calls: `GET /repos/:owner/:repo/pulls/:number/requested_reviewers <https://developer.github.com/v3/pulls/review_requests/>`_
:rtype: tuple of :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser` and of :class:`github.PaginatedList.PaginatedList` of :class:`github.Team.Team` |
def list_dfu_devices(*args, **kwargs):
"""Prints a lits of devices detected in DFU mode."""
devices = get_dfu_devices(*args, **kwargs)
if not devices:
print("No DFU capable devices found")
return
for device in devices:
print("Bus {} Device {:03d}: ID {:04x}:{:04x}"
.format(device.bus, device.address,
device.idVendor, device.idProduct))
layout = get_memory_layout(device)
print("Memory Layout")
for entry in layout:
print(" 0x{:x} {:2d} pages of {:3d}K bytes"
.format(entry['addr'], entry['num_pages'],
entry['page_size'] // 1024)) | Prints a lits of devices detected in DFU mode. |
def delete_resource(resource_name, key, identifier_fields, profile='pagerduty', subdomain=None, api_key=None):
'''
delete any pagerduty resource
Helper method for absent()
example:
delete_resource("users", key, ["id","name","email"]) # delete by id or name or email
'''
resource = get_resource(resource_name, key, identifier_fields, profile, subdomain, api_key)
if resource:
if __opts__['test']:
return 'would delete'
# flush the resource_cache, because we're modifying a resource
del __context__['pagerduty_util.resource_cache'][resource_name]
resource_id = _get_resource_id(resource)
return _query(method='DELETE', action='{0}/{1}'.format(resource_name, resource_id), profile=profile, subdomain=subdomain, api_key=api_key)
else:
return True | delete any pagerduty resource
Helper method for absent()
example:
delete_resource("users", key, ["id","name","email"]) # delete by id or name or email |
def __make_points(self, measurement, additional_tags, ts, fields):
"""
Parameters
----------
measurement : string
measurement type (e.g. monitoring, overall_meta, net_codes, proto_codes, overall_quantiles)
additional_tags : dict
custom additional tags for this points
ts : integer
timestamp
fields : dict
influxdb columns
Returns
-------
dict
points for InfluxDB client
"""
tags = self.tags.copy()
tags.update(additional_tags)
return {
"measurement": measurement,
"tags": tags,
"time": int(ts),
"fields": fields,
} | Parameters
----------
measurement : string
measurement type (e.g. monitoring, overall_meta, net_codes, proto_codes, overall_quantiles)
additional_tags : dict
custom additional tags for this points
ts : integer
timestamp
fields : dict
influxdb columns
Returns
-------
dict
points for InfluxDB client |
def reset(self):
"""
Reset the game so the grid is zeros (or default items)
"""
self.grid = [[0 for dummy_l in range(self.grid_width)] for dummy_l in range(self.grid_height)] | Reset the game so the grid is zeros (or default items) |
def stabilize(self, test_func, error, timeoutSecs=10, retryDelaySecs=0.5):
'''Repeatedly test a function waiting for it to return True.
Arguments:
test_func -- A function that will be run repeatedly
error -- A function that will be run to produce an error message
it will be called with (node, timeTakenSecs, numberOfRetries)
OR
-- A string that will be interpolated with a dictionary of
{ 'timeTakenSecs', 'numberOfRetries' }
timeoutSecs -- How long in seconds to keep trying before declaring a failure
retryDelaySecs -- How long to wait between retry attempts
'''
start = time.time()
numberOfRetries = 0
while h2o_args.no_timeout or (time.time() - start < timeoutSecs):
if test_func(self, tries=numberOfRetries, timeoutSecs=timeoutSecs):
break
time.sleep(retryDelaySecs)
numberOfRetries += 1
# hey, check the sandbox if we've been waiting a long time...rather than wait for timeout
# to find the badness?. can check_sandbox_for_errors at any time
if ((numberOfRetries % 50) == 0):
check_sandbox_for_errors(python_test_name=h2o_args.python_test_name)
else:
timeTakenSecs = time.time() - start
if isinstance(error, type('')):
raise Exception('%s failed after %.2f seconds having retried %d times' % (
error, timeTakenSecs, numberOfRetries))
else:
msg = error(self, timeTakenSecs, numberOfRetries)
raise Exception(msg) | Repeatedly test a function waiting for it to return True.
Arguments:
test_func -- A function that will be run repeatedly
error -- A function that will be run to produce an error message
it will be called with (node, timeTakenSecs, numberOfRetries)
OR
-- A string that will be interpolated with a dictionary of
{ 'timeTakenSecs', 'numberOfRetries' }
timeoutSecs -- How long in seconds to keep trying before declaring a failure
retryDelaySecs -- How long to wait between retry attempts |
def confirm_authorization_request(self):
"""When consumer confirm the authorization."""
server = self.server
scope = request.values.get('scope') or ''
scopes = scope.split()
credentials = dict(
client_id=request.values.get('client_id'),
redirect_uri=request.values.get('redirect_uri', None),
response_type=request.values.get('response_type', None),
state=request.values.get('state', None)
)
log.debug('Fetched credentials from request %r.', credentials)
redirect_uri = credentials.get('redirect_uri')
log.debug('Found redirect_uri %s.', redirect_uri)
uri, http_method, body, headers = extract_params()
try:
ret = server.create_authorization_response(
uri, http_method, body, headers, scopes, credentials)
log.debug('Authorization successful.')
return create_response(*ret)
except oauth2.FatalClientError as e:
log.debug('Fatal client error %r', e, exc_info=True)
return self._on_exception(e, e.in_uri(self.error_uri))
except oauth2.OAuth2Error as e:
log.debug('OAuth2Error: %r', e, exc_info=True)
# on auth error, we should preserve state if it's present according to RFC 6749
state = request.values.get('state')
if state and not e.state:
e.state = state # set e.state so e.in_uri() can add the state query parameter to redirect uri
return self._on_exception(e, e.in_uri(redirect_uri or self.error_uri))
except Exception as e:
log.exception(e)
return self._on_exception(e, add_params_to_uri(
self.error_uri, {'error': str(e)}
)) | When consumer confirm the authorization. |
def serialize_operator_less_than(self, op):
"""
Serializer for :meth:`SpiffWorkflow.operators.NotEqual`.
Example::
<less-than>
<value>text</value>
<value><attribute>foobar</attribute></value>
</less-than>
"""
elem = etree.Element('less-than')
return self.serialize_value_list(elem, op.args) | Serializer for :meth:`SpiffWorkflow.operators.NotEqual`.
Example::
<less-than>
<value>text</value>
<value><attribute>foobar</attribute></value>
</less-than> |
def GetRawDevice(path):
"""Resolves the raw device that contains the path.
Args:
path: A path to examine.
Returns:
A pathspec to read the raw device as well as the modified path to read
within the raw device. This is usually the path without the mount point.
Raises:
IOError: if the path does not exist or some unexpected behaviour occurs.
"""
path = CanonicalPathToLocalPath(path)
# Try to expand the shortened paths
try:
path = win32file.GetLongPathName(path)
except pywintypes.error:
pass
try:
mount_point = win32file.GetVolumePathName(path)
except pywintypes.error as details:
logging.info("path not found. %s", details)
raise IOError("No mountpoint for path: %s" % path)
if not path.startswith(mount_point):
stripped_mp = mount_point.rstrip("\\")
if not path.startswith(stripped_mp):
raise IOError("path %s is not mounted under %s" % (path, mount_point))
corrected_path = LocalPathToCanonicalPath(path[len(mount_point):])
corrected_path = utils.NormalizePath(corrected_path)
volume = win32file.GetVolumeNameForVolumeMountPoint(mount_point).rstrip("\\")
volume = LocalPathToCanonicalPath(volume)
# The pathspec for the raw volume
result = rdf_paths.PathSpec(
path=volume,
pathtype=rdf_paths.PathSpec.PathType.OS,
mount_point=mount_point.rstrip("\\"))
return result, corrected_path | Resolves the raw device that contains the path.
Args:
path: A path to examine.
Returns:
A pathspec to read the raw device as well as the modified path to read
within the raw device. This is usually the path without the mount point.
Raises:
IOError: if the path does not exist or some unexpected behaviour occurs. |
def v1_label_negative_inference(request, response,
visid_to_dbid, dbid_to_visid,
label_store, cid):
'''Return inferred negative labels.
The route for this endpoint is:
``/dossier/v1/label/<cid>/negative-inference``.
Negative labels are inferred by first getting all other content ids
connected to ``cid`` through a negative label. For each directly
adjacent ``cid'``, the connected components of ``cid`` and
``cid'`` are traversed to find negative labels.
The data returned is a JSON list of labels. Each label is a
dictionary with the following keys: ``content_id1``,
``content_id2``, ``subtopic_id1``, ``subtopic_id2``,
``annotator_id``, ``epoch_ticks`` and ``value``.
'''
# No subtopics yet? :-(
lab_to_json = partial(label_to_json, dbid_to_visid)
labs = imap(lab_to_json,
label_store.negative_inference(visid_to_dbid(cid)))
return list(paginate(request, response, labs)) | Return inferred negative labels.
The route for this endpoint is:
``/dossier/v1/label/<cid>/negative-inference``.
Negative labels are inferred by first getting all other content ids
connected to ``cid`` through a negative label. For each directly
adjacent ``cid'``, the connected components of ``cid`` and
``cid'`` are traversed to find negative labels.
The data returned is a JSON list of labels. Each label is a
dictionary with the following keys: ``content_id1``,
``content_id2``, ``subtopic_id1``, ``subtopic_id2``,
``annotator_id``, ``epoch_ticks`` and ``value``. |
def get_or_default(func=None, default=None):
"""
Wrapper around Django's ORM `get` functionality.
Wrap anything that raises ObjectDoesNotExist exception
and provide the default value if necessary.
`default` by default is None. `default` can be any callable,
if it is callable it will be called when ObjectDoesNotExist
exception will be raised.
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except ObjectDoesNotExist:
if callable(default):
return default()
else:
return default
return wrapper
if func is None:
return decorator
else:
return decorator(func) | Wrapper around Django's ORM `get` functionality.
Wrap anything that raises ObjectDoesNotExist exception
and provide the default value if necessary.
`default` by default is None. `default` can be any callable,
if it is callable it will be called when ObjectDoesNotExist
exception will be raised. |
def _get_os(osvi = None):
"""
Determines the current operating system.
This function allows you to quickly tell apart major OS differences.
For more detailed information call L{GetVersionEx} instead.
@note:
Wine reports itself as Windows XP 32 bits
(even if the Linux host is 64 bits).
ReactOS may report itself as Windows 2000 or Windows XP,
depending on the version of ReactOS.
@type osvi: L{OSVERSIONINFOEXA}
@param osvi: Optional. The return value from L{GetVersionEx}.
@rtype: str
@return:
One of the following values:
- L{OS_UNKNOWN} (C{"Unknown"})
- L{OS_NT} (C{"Windows NT"})
- L{OS_W2K} (C{"Windows 2000"})
- L{OS_XP} (C{"Windows XP"})
- L{OS_XP_64} (C{"Windows XP (64 bits)"})
- L{OS_W2K3} (C{"Windows 2003"})
- L{OS_W2K3_64} (C{"Windows 2003 (64 bits)"})
- L{OS_W2K3R2} (C{"Windows 2003 R2"})
- L{OS_W2K3R2_64} (C{"Windows 2003 R2 (64 bits)"})
- L{OS_W2K8} (C{"Windows 2008"})
- L{OS_W2K8_64} (C{"Windows 2008 (64 bits)"})
- L{OS_W2K8R2} (C{"Windows 2008 R2"})
- L{OS_W2K8R2_64} (C{"Windows 2008 R2 (64 bits)"})
- L{OS_VISTA} (C{"Windows Vista"})
- L{OS_VISTA_64} (C{"Windows Vista (64 bits)"})
- L{OS_W7} (C{"Windows 7"})
- L{OS_W7_64} (C{"Windows 7 (64 bits)"})
"""
# rough port of http://msdn.microsoft.com/en-us/library/ms724429%28VS.85%29.aspx
if not osvi:
osvi = GetVersionEx()
if osvi.dwPlatformId == VER_PLATFORM_WIN32_NT and osvi.dwMajorVersion > 4:
if osvi.dwMajorVersion == 6:
if osvi.dwMinorVersion == 0:
if osvi.wProductType == VER_NT_WORKSTATION:
if bits == 64 or wow64:
return 'Windows Vista (64 bits)'
return 'Windows Vista'
else:
if bits == 64 or wow64:
return 'Windows 2008 (64 bits)'
return 'Windows 2008'
if osvi.dwMinorVersion == 1:
if osvi.wProductType == VER_NT_WORKSTATION:
if bits == 64 or wow64:
return 'Windows 7 (64 bits)'
return 'Windows 7'
else:
if bits == 64 or wow64:
return 'Windows 2008 R2 (64 bits)'
return 'Windows 2008 R2'
if osvi.dwMajorVersion == 5:
if osvi.dwMinorVersion == 2:
if GetSystemMetrics(SM_SERVERR2):
if bits == 64 or wow64:
return 'Windows 2003 R2 (64 bits)'
return 'Windows 2003 R2'
if osvi.wSuiteMask in (VER_SUITE_STORAGE_SERVER, VER_SUITE_WH_SERVER):
if bits == 64 or wow64:
return 'Windows 2003 (64 bits)'
return 'Windows 2003'
if osvi.wProductType == VER_NT_WORKSTATION and arch == ARCH_AMD64:
return 'Windows XP (64 bits)'
else:
if bits == 64 or wow64:
return 'Windows 2003 (64 bits)'
return 'Windows 2003'
if osvi.dwMinorVersion == 1:
return 'Windows XP'
if osvi.dwMinorVersion == 0:
return 'Windows 2000'
if osvi.dwMajorVersion == 4:
return 'Windows NT'
return 'Unknown' | Determines the current operating system.
This function allows you to quickly tell apart major OS differences.
For more detailed information call L{GetVersionEx} instead.
@note:
Wine reports itself as Windows XP 32 bits
(even if the Linux host is 64 bits).
ReactOS may report itself as Windows 2000 or Windows XP,
depending on the version of ReactOS.
@type osvi: L{OSVERSIONINFOEXA}
@param osvi: Optional. The return value from L{GetVersionEx}.
@rtype: str
@return:
One of the following values:
- L{OS_UNKNOWN} (C{"Unknown"})
- L{OS_NT} (C{"Windows NT"})
- L{OS_W2K} (C{"Windows 2000"})
- L{OS_XP} (C{"Windows XP"})
- L{OS_XP_64} (C{"Windows XP (64 bits)"})
- L{OS_W2K3} (C{"Windows 2003"})
- L{OS_W2K3_64} (C{"Windows 2003 (64 bits)"})
- L{OS_W2K3R2} (C{"Windows 2003 R2"})
- L{OS_W2K3R2_64} (C{"Windows 2003 R2 (64 bits)"})
- L{OS_W2K8} (C{"Windows 2008"})
- L{OS_W2K8_64} (C{"Windows 2008 (64 bits)"})
- L{OS_W2K8R2} (C{"Windows 2008 R2"})
- L{OS_W2K8R2_64} (C{"Windows 2008 R2 (64 bits)"})
- L{OS_VISTA} (C{"Windows Vista"})
- L{OS_VISTA_64} (C{"Windows Vista (64 bits)"})
- L{OS_W7} (C{"Windows 7"})
- L{OS_W7_64} (C{"Windows 7 (64 bits)"}) |
def _get_event_cls(view_obj, events_map):
""" Helper function to get event class.
:param view_obj: Instance of View that processes the request.
:param events_map: Map of events from which event class should be
picked.
:returns: Found event class.
"""
request = view_obj.request
view_method = getattr(view_obj, request.action)
event_action = (
getattr(view_method, '_event_action', None) or
request.action)
return events_map[event_action] | Helper function to get event class.
:param view_obj: Instance of View that processes the request.
:param events_map: Map of events from which event class should be
picked.
:returns: Found event class. |
def construct_message(self, email=None):
""" construct the email message """
# add subject, from and to
self.multipart['Subject'] = self.subject
self.multipart['From'] = self.config['EMAIL']
self.multipart['Date'] = formatdate(localtime=True)
if email is None and self.send_as_one:
self.multipart['To'] = ", ".join(self.addresses)
elif email is not None and self.send_as_one is False:
self.multipart['To'] = email
# add ccs
if self.ccs is not None and self.ccs:
self.multipart['Cc'] = ", ".join(self.ccs)
# add html and text body
html = MIMEText(self.html, 'html')
alt_text = MIMEText(self.text, 'plain')
self.multipart.attach(html)
self.multipart.attach(alt_text)
for file in self.files:
self.multipart.attach(file) | construct the email message |
def _is_paste(keys):
"""
Return `True` when we should consider this list of keys as a paste
event. Pasted text on windows will be turned into a
`Keys.BracketedPaste` event. (It's not 100% correct, but it is probably
the best possible way to detect pasting of text and handle that
correctly.)
"""
# Consider paste when it contains at least one newline and at least one
# other character.
text_count = 0
newline_count = 0
for k in keys:
if isinstance(k.key, six.text_type):
text_count += 1
if k.key == Keys.ControlJ:
newline_count += 1
return newline_count >= 1 and text_count > 1 | Return `True` when we should consider this list of keys as a paste
event. Pasted text on windows will be turned into a
`Keys.BracketedPaste` event. (It's not 100% correct, but it is probably
the best possible way to detect pasting of text and handle that
correctly.) |
def setVisible(self, state):
"""
Sets the visible state for this line edit.
:param state | <bool>
"""
super(XLineEdit, self).setVisible(state)
self.adjustStyleSheet()
self.adjustTextMargins() | Sets the visible state for this line edit.
:param state | <bool> |
def handle_failed_login(self, login_result):
"""If Two Factor Authentication (2FA/2SV) is enabled, the initial
login will fail with a predictable error. Catching this error allows us
to begin the authentication process.
Other types of errors can be treated in a similar way.
"""
error_code = login_result.get('error')
if '2fa-required' in error_code:
utils.error_message('Login Failed: 2FA or 2SV is active!')
self.trigger_two_step_login(login_result)
self.finish_two_step_login()
else:
utils.error_message_and_exit('\nLogin Failed', login_result) | If Two Factor Authentication (2FA/2SV) is enabled, the initial
login will fail with a predictable error. Catching this error allows us
to begin the authentication process.
Other types of errors can be treated in a similar way. |
def right_click(self, x, y, n=1, pre_dl=None, post_dl=None):
"""Right click at ``(x, y)`` on screen for ``n`` times.
at begin.
**中文文档**
在屏幕的 ``(x, y)`` 坐标处右键单击 ``n`` 次。
"""
self.delay(pre_dl)
self.m.click(x, y, 2, n)
self.delay(post_dl) | Right click at ``(x, y)`` on screen for ``n`` times.
at begin.
**中文文档**
在屏幕的 ``(x, y)`` 坐标处右键单击 ``n`` 次。 |
def check_selection(self):
"""
Check if selected text is r/w,
otherwise remove read-only parts of selection
"""
if self.current_prompt_pos is None:
self.set_cursor_position('eof')
else:
self.truncate_selection(self.current_prompt_pos) | Check if selected text is r/w,
otherwise remove read-only parts of selection |
def get_composition_query_session_for_repository(self, repository_id, proxy):
"""Gets a composition query session for the given repository.
arg: repository_id (osid.id.Id): the Id of the repository
arg proxy (osid.proxy.Proxy): a proxy
return: (osid.repository.CompositionQuerySession) - a
CompositionQuerySession
raise: NotFound - repository_id not found
raise: NullArgument - repository_id is null
raise: OperationFailed - unable to complete request
raise: Unimplemented - supports_composition_query() or
supports_visible_federation() is false
compliance: optional - This method must be implemented if
supports_composition_query() and
supports_visible_federation() are true.
"""
if repository_id is None:
raise NullArgument()
if not self.supports_composition_query():
raise Unimplemented()
try:
from . import sessions
except ImportError:
raise # OperationFailed()
proxy = self._convert_proxy(proxy)
try:
session = sessions.CompositionQuerySession(repository_id, proxy, runtime=self._runtime)
except AttributeError:
raise # OperationFailed()
return session | Gets a composition query session for the given repository.
arg: repository_id (osid.id.Id): the Id of the repository
arg proxy (osid.proxy.Proxy): a proxy
return: (osid.repository.CompositionQuerySession) - a
CompositionQuerySession
raise: NotFound - repository_id not found
raise: NullArgument - repository_id is null
raise: OperationFailed - unable to complete request
raise: Unimplemented - supports_composition_query() or
supports_visible_federation() is false
compliance: optional - This method must be implemented if
supports_composition_query() and
supports_visible_federation() are true. |
def convert_bbox_to_albumentations(bbox, source_format, rows, cols, check_validity=False):
"""Convert a bounding box from a format specified in `source_format` to the format used by albumentations:
normalized coordinates of bottom-left and top-right corners of the bounding box in a form of
`[x_min, y_min, x_max, y_max]` e.g. `[0.15, 0.27, 0.67, 0.5]`.
Args:
bbox (list): bounding box
source_format (str): format of the bounding box. Should be 'coco' or 'pascal_voc'.
check_validity (bool): check if all boxes are valid boxes
rows (int): image height
cols (int): image width
Note:
The `coco` format of a bounding box looks like `[x_min, y_min, width, height]`, e.g. [97, 12, 150, 200].
The `pascal_voc` format of a bounding box looks like `[x_min, y_min, x_max, y_max]`, e.g. [97, 12, 247, 212].
Raises:
ValueError: if `target_format` is not equal to `coco` or `pascal_voc`.
"""
if source_format not in {'coco', 'pascal_voc'}:
raise ValueError(
"Unknown source_format {}. Supported formats are: 'coco' and 'pascal_voc'".format(source_format)
)
if source_format == 'coco':
x_min, y_min, width, height = bbox[:4]
x_max = x_min + width
y_max = y_min + height
else:
x_min, y_min, x_max, y_max = bbox[:4]
bbox = [x_min, y_min, x_max, y_max] + list(bbox[4:])
bbox = normalize_bbox(bbox, rows, cols)
if check_validity:
check_bbox(bbox)
return bbox | Convert a bounding box from a format specified in `source_format` to the format used by albumentations:
normalized coordinates of bottom-left and top-right corners of the bounding box in a form of
`[x_min, y_min, x_max, y_max]` e.g. `[0.15, 0.27, 0.67, 0.5]`.
Args:
bbox (list): bounding box
source_format (str): format of the bounding box. Should be 'coco' or 'pascal_voc'.
check_validity (bool): check if all boxes are valid boxes
rows (int): image height
cols (int): image width
Note:
The `coco` format of a bounding box looks like `[x_min, y_min, width, height]`, e.g. [97, 12, 150, 200].
The `pascal_voc` format of a bounding box looks like `[x_min, y_min, x_max, y_max]`, e.g. [97, 12, 247, 212].
Raises:
ValueError: if `target_format` is not equal to `coco` or `pascal_voc`. |
def unregister(self, token_to_remove):
"""
Unregister a filter function.
:param token_to_remove: The token as returned by :meth:`register`.
Unregister a function from the filter chain using the token returned by
:meth:`register`.
"""
for i, (_, token, _) in enumerate(self._filter_order):
if token == token_to_remove:
break
else:
raise ValueError("unregistered token: {!r}".format(
token_to_remove))
del self._filter_order[i] | Unregister a filter function.
:param token_to_remove: The token as returned by :meth:`register`.
Unregister a function from the filter chain using the token returned by
:meth:`register`. |
def fromexportreg(cls, bundle, export_reg):
# type: (Bundle, ExportRegistration) -> RemoteServiceAdminEvent
"""
Creates a RemoteServiceAdminEvent object from an ExportRegistration
"""
exc = export_reg.get_exception()
if exc:
return RemoteServiceAdminEvent(
RemoteServiceAdminEvent.EXPORT_ERROR,
bundle,
export_reg.get_export_container_id(),
export_reg.get_remoteservice_id(),
None,
None,
exc,
export_reg.get_description(),
)
return RemoteServiceAdminEvent(
RemoteServiceAdminEvent.EXPORT_REGISTRATION,
bundle,
export_reg.get_export_container_id(),
export_reg.get_remoteservice_id(),
None,
export_reg.get_export_reference(),
None,
export_reg.get_description(),
) | Creates a RemoteServiceAdminEvent object from an ExportRegistration |
def delete_intent(self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Deletes the specified intent.
Example:
>>> import dialogflow_v2
>>>
>>> client = dialogflow_v2.IntentsClient()
>>>
>>> name = client.intent_path('[PROJECT]', '[INTENT]')
>>>
>>> client.delete_intent(name)
Args:
name (str): Required. The name of the intent to delete.
Format: ``projects/<Project ID>/agent/intents/<Intent ID>``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'delete_intent' not in self._inner_api_calls:
self._inner_api_calls[
'delete_intent'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.delete_intent,
default_retry=self._method_configs['DeleteIntent'].retry,
default_timeout=self._method_configs['DeleteIntent']
.timeout,
client_info=self._client_info,
)
request = intent_pb2.DeleteIntentRequest(name=name, )
self._inner_api_calls['delete_intent'](
request, retry=retry, timeout=timeout, metadata=metadata) | Deletes the specified intent.
Example:
>>> import dialogflow_v2
>>>
>>> client = dialogflow_v2.IntentsClient()
>>>
>>> name = client.intent_path('[PROJECT]', '[INTENT]')
>>>
>>> client.delete_intent(name)
Args:
name (str): Required. The name of the intent to delete.
Format: ``projects/<Project ID>/agent/intents/<Intent ID>``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid. |
def save_state(self, fname):
"""
Save the state of this node (the alpha/ksize/id/immediate neighbors)
to a cache file with the given fname.
"""
log.info("Saving state to %s", fname)
data = {
'ksize': self.ksize,
'alpha': self.alpha,
'id': self.node.id,
'neighbors': self.bootstrappable_neighbors()
}
if not data['neighbors']:
log.warning("No known neighbors, so not writing to cache.")
return
with open(fname, 'wb') as file:
pickle.dump(data, file) | Save the state of this node (the alpha/ksize/id/immediate neighbors)
to a cache file with the given fname. |
def RgbToWebSafe(r, g, b, alt=False):
'''Convert the color from RGB to 'web safe' RGB
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
:alt:
If True, use the alternative color instead of the nearest one.
Can be used for dithering.
Returns:
The color as an (r, g, b) tuple in the range:
the range:
r[0...1],
g[0...1],
b[0...1]
>>> '(%g, %g, %g)' % Color.RgbToWebSafe(1, 0.55, 0.0)
'(1, 0.6, 0)'
'''
webSafeComponent = Color._WebSafeComponent
return tuple((webSafeComponent(v, alt) for v in (r, g, b))) | Convert the color from RGB to 'web safe' RGB
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
:alt:
If True, use the alternative color instead of the nearest one.
Can be used for dithering.
Returns:
The color as an (r, g, b) tuple in the range:
the range:
r[0...1],
g[0...1],
b[0...1]
>>> '(%g, %g, %g)' % Color.RgbToWebSafe(1, 0.55, 0.0)
'(1, 0.6, 0)' |
def apply_widget_css_class(self, field_name):
"""
Applies CSS classes to widgets if available.
The method uses the `get_widget_css_class` method to determine if the widget
CSS class should be changed. If a CSS class is returned, it is appended to
the current value of the class property of the widget instance.
:param field_name: A field name of the form.
"""
field = self.fields[field_name]
class_name = self.get_widget_css_class(field_name, field)
if class_name:
field.widget.attrs['class'] = join_css_class(
field.widget.attrs.get('class', None), class_name) | Applies CSS classes to widgets if available.
The method uses the `get_widget_css_class` method to determine if the widget
CSS class should be changed. If a CSS class is returned, it is appended to
the current value of the class property of the widget instance.
:param field_name: A field name of the form. |
def labels(self, include_missing=False, include_transforms_for_dims=False):
"""Gets labels for each cube's dimension.
Args
include_missing (bool): Include labels for missing values
Returns
labels (list of lists): Labels for each dimension
"""
return [
dim.labels(include_missing, include_transforms_for_dims)
for dim in self.dimensions
] | Gets labels for each cube's dimension.
Args
include_missing (bool): Include labels for missing values
Returns
labels (list of lists): Labels for each dimension |
def analyze_all(self, analysis_directory = None):
'''This function runs the analysis and creates the plots and summary file.'''
for analysis_set in self.analysis_sets:
self.analyze(analysis_set, analysis_directory = analysis_directory) | This function runs the analysis and creates the plots and summary file. |
def get_rate_limits(self):
"""
Returns a dict with the current rate limit information for domain
and status requests.
"""
resp, body = self.method_get("/limits")
rate_limits = body.get("limits", {}).get("rate")
ret = []
for rate_limit in rate_limits:
limits = rate_limit["limit"]
uri_limits = {"uri": rate_limit["uri"],
"limits": limits}
ret.append(uri_limits)
return ret | Returns a dict with the current rate limit information for domain
and status requests. |
def validate(self, skip_utf8_validation=False):
"""
validate the ABNF frame.
skip_utf8_validation: skip utf8 validation.
"""
if self.rsv1 or self.rsv2 or self.rsv3:
raise WebSocketProtocolException("rsv is not implemented, yet")
if self.opcode not in ABNF.OPCODES:
raise WebSocketProtocolException("Invalid opcode %r", self.opcode)
if self.opcode == ABNF.OPCODE_PING and not self.fin:
raise WebSocketProtocolException("Invalid ping frame.")
if self.opcode == ABNF.OPCODE_CLOSE:
l = len(self.data)
if not l:
return
if l == 1 or l >= 126:
raise WebSocketProtocolException("Invalid close frame.")
if l > 2 and not skip_utf8_validation and not validate_utf8(self.data[2:]):
raise WebSocketProtocolException("Invalid close frame.")
code = 256 * \
six.byte2int(self.data[0:1]) + six.byte2int(self.data[1:2])
if not self._is_valid_close_status(code):
raise WebSocketProtocolException("Invalid close opcode.") | validate the ABNF frame.
skip_utf8_validation: skip utf8 validation. |
def normalized_energy_at_conditions(self, pH, V):
"""
Energy at an electrochemical condition, compatible with
numpy arrays for pH/V input
Args:
pH (float): pH at condition
V (float): applied potential at condition
Returns:
energy normalized by number of non-O/H atoms at condition
"""
return self.energy_at_conditions(pH, V) * self.normalization_factor | Energy at an electrochemical condition, compatible with
numpy arrays for pH/V input
Args:
pH (float): pH at condition
V (float): applied potential at condition
Returns:
energy normalized by number of non-O/H atoms at condition |
def create_default_units_and_dimensions():
"""
Adds the units and the dimensions reading a json file. It adds only dimensions and units that are not inside the db
It is possible adding new dimensions and units to the DB just modifiyin the json file
"""
default_units_file_location = os.path.realpath(\
os.path.join(os.path.dirname(os.path.realpath(__file__)),
'../',
'static',
'default_units_and_dimensions.json'))
d=None
with open(default_units_file_location) as json_data:
d = json.load(json_data)
json_data.close()
for json_dimension in d["dimension"]:
new_dimension = None
dimension_name = get_utf8_encoded_string(json_dimension["name"])
db_dimensions_by_name = db.DBSession.query(Dimension).filter(Dimension.name==dimension_name).all()
if len(db_dimensions_by_name) == 0:
# Adding the dimension
log.debug("Adding Dimension `{}`".format(dimension_name))
new_dimension = Dimension()
if "id" in json_dimension:
# If ID is specified
new_dimension.id = json_dimension["id"]
new_dimension.name = dimension_name
db.DBSession.add(new_dimension)
db.DBSession.flush()
# Get the dimension by name
new_dimension = get_dimension_from_db_by_name(dimension_name)
for json_unit in json_dimension["unit"]:
db_units_by_name = db.DBSession.query(Unit).filter(Unit.abbreviation==get_utf8_encoded_string(json_unit['abbr'])).all()
if len(db_units_by_name) == 0:
# Adding the unit
log.debug("Adding Unit %s in %s",json_unit['abbr'], json_dimension["name"])
new_unit = Unit()
if "id" in json_unit:
new_unit.id = json_unit["id"]
new_unit.dimension_id = new_dimension.id
new_unit.name = get_utf8_encoded_string(json_unit['name'])
new_unit.abbreviation = get_utf8_encoded_string(json_unit['abbr'])
new_unit.lf = get_utf8_encoded_string(json_unit['lf'])
new_unit.cf = get_utf8_encoded_string(json_unit['cf'])
if "description" in json_unit:
# If Description is specified
new_unit.description = get_utf8_encoded_string(json_unit["description"])
# Save on DB
db.DBSession.add(new_unit)
db.DBSession.flush()
else:
#log.critical("UNIT {}.{} EXISTANT".format(dimension_name,json_unit['abbr']))
pass
try:
# Needed for test. on HWI it fails so we need to catch the exception and pass by
db.DBSession.commit()
except Exception as e:
# Needed for HWI
pass
return | Adds the units and the dimensions reading a json file. It adds only dimensions and units that are not inside the db
It is possible adding new dimensions and units to the DB just modifiyin the json file |
def callCount(cls, spy, number): #pylint: disable=invalid-name
"""
Checking the inspector is called number times
Args: SinonSpy, number
"""
cls.__is_spy(spy)
if not (spy.callCount == number):
raise cls.failException(cls.message) | Checking the inspector is called number times
Args: SinonSpy, number |
def run(self, clock, generalLedger):
"""
Execute the activity at the current clock cycle.
:param clock: The clock containing the current execution time and
period information.
:param generalLedger: The general ledger into which to create the
transactions.
"""
if not self._meet_execution_criteria(clock.timestep_ix):
return
generalLedger.create_transaction(
self.description if self.description is not None else self.name,
description='',
tx_date=clock.get_datetime(),
dt_account=self.dt_account,
cr_account=self.cr_account,
source=self.path,
amount=self.amount) | Execute the activity at the current clock cycle.
:param clock: The clock containing the current execution time and
period information.
:param generalLedger: The general ledger into which to create the
transactions. |
def _children(self):
"""Yield all direct children of this object."""
if self.declarations:
yield self.declarations
if isinstance(self.condition, CodeExpression):
yield self.condition
if self.increment:
yield self.increment
for codeobj in self.body._children():
yield codeobj | Yield all direct children of this object. |
def protect(self, password=None, read_protect=False, protect_from=0):
"""Set lock bits to disable future memory modifications.
If *password* is None, all memory pages except the 16-bit
counter in page 41 are protected by setting the relevant lock
bits (note that lock bits can not be reset). If valid NDEF
management data is found in page 4, protect() also sets the
NDEF write flag to read-only.
The NTAG203 can not be password protected. If a *password*
argument is provided, the protect() method always returns
False.
"""
return super(NTAG203, self).protect(
password, read_protect, protect_from) | Set lock bits to disable future memory modifications.
If *password* is None, all memory pages except the 16-bit
counter in page 41 are protected by setting the relevant lock
bits (note that lock bits can not be reset). If valid NDEF
management data is found in page 4, protect() also sets the
NDEF write flag to read-only.
The NTAG203 can not be password protected. If a *password*
argument is provided, the protect() method always returns
False. |
def deprecated(msg=''):
"""Deprecate decorated method."""
@decorator.decorator
def wrap(function, *args, **kwargs):
if not kwargs.pop('disable_warning', False):
warn(msg, DeprecationWarning)
return function(*args, **kwargs)
return wrap | Deprecate decorated method. |
def clear(self):
"""
Clear out the multi-frame
:return:
"""
for slot in self._slots:
slot.grid_forget()
slot.destroy()
self._slots = [] | Clear out the multi-frame
:return: |
def match_agent_id(self, agent_id, match):
"""Matches the agent identified by the given ``Id``.
arg: agent_id (osid.id.Id): the Id of the ``Agent``
arg: match (boolean): ``true`` if a positive match, ``false``
for a negative match
raise: NullArgument - ``agent_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
self._add_match('agentId', str(agent_id), bool(match)) | Matches the agent identified by the given ``Id``.
arg: agent_id (osid.id.Id): the Id of the ``Agent``
arg: match (boolean): ``true`` if a positive match, ``false``
for a negative match
raise: NullArgument - ``agent_id`` is ``null``
*compliance: mandatory -- This method must be implemented.* |
def member_create(self, params, member_id):
"""start new mongod instances as part of replica set
Args:
params - member params
member_id - member index
return member config
"""
member_config = params.get('rsParams', {})
server_id = params.pop('server_id', None)
version = params.pop('version', self._version)
proc_params = {'replSet': self.repl_id}
proc_params.update(params.get('procParams', {}))
if self.enable_ipv6:
enable_ipv6_single(proc_params)
# Make sure that auth isn't set the first time we start the servers.
proc_params = self._strip_auth(proc_params)
# Don't pass in auth_key the first time we start the servers.
server_id = self._servers.create(
name='mongod',
procParams=proc_params,
sslParams=self.sslParams,
version=version,
server_id=server_id
)
member_config.update({"_id": member_id,
"host": self._servers.hostname(server_id)})
return member_config | start new mongod instances as part of replica set
Args:
params - member params
member_id - member index
return member config |
def sync_next_id(self):
"""
Determines the highest-numbered ID in this table, and sets
the table's .next_id attribute to the next highest ID in
sequence. If the .next_id attribute is already set to a
value greater than the highest value found, then it is left
unmodified. The return value is the ID identified by this
method. If the table's .next_id attribute is None, then
this function is a no-op.
Note that tables of the same name typically share a common
.next_id attribute (it is a class attribute, not an
attribute of each instance) so that IDs can be generated
that are unique across all tables in the document. Running
sync_next_id() on all the tables in a document that are of
the same type will have the effect of setting the ID to the
next ID higher than any ID in any of those tables.
Example:
>>> import lsctables
>>> tbl = lsctables.New(lsctables.ProcessTable)
>>> print tbl.sync_next_id()
process:process_id:0
"""
if self.next_id is not None:
if len(self):
n = max(self.getColumnByName(self.next_id.column_name)) + 1
else:
n = type(self.next_id)(0)
if n > self.next_id:
self.set_next_id(n)
return self.next_id | Determines the highest-numbered ID in this table, and sets
the table's .next_id attribute to the next highest ID in
sequence. If the .next_id attribute is already set to a
value greater than the highest value found, then it is left
unmodified. The return value is the ID identified by this
method. If the table's .next_id attribute is None, then
this function is a no-op.
Note that tables of the same name typically share a common
.next_id attribute (it is a class attribute, not an
attribute of each instance) so that IDs can be generated
that are unique across all tables in the document. Running
sync_next_id() on all the tables in a document that are of
the same type will have the effect of setting the ID to the
next ID higher than any ID in any of those tables.
Example:
>>> import lsctables
>>> tbl = lsctables.New(lsctables.ProcessTable)
>>> print tbl.sync_next_id()
process:process_id:0 |
def gp_ccX():
"""fit experimental data"""
inDir, outDir = getWorkDirs()
data, alldata = OrderedDict(), None
for infile in os.listdir(inDir):
# get key and import data
key = os.path.splitext(infile)[0].replace('_', '/')
data_import = np.loadtxt(open(os.path.join(inDir, infile), 'rb'))
# convert to log10 vs log10 plot, z=log10(y) => dz=0.434*dy/y
data_import[:,3] = 0.434*(data_import[:,3]/data_import[:,1])
data_import[:,(0,1)] = np.log10(data_import[:,(0,1)])
data_import[:,2] = 0
# fill dictionary
data[key] = data_import
alldata = data[key] if alldata is None else np.vstack((alldata, data[key]))
# fit linear part first
lindata = alldata[alldata[:,0]>2.5]
m = (lindata[-1,1]-lindata[0,1])/(lindata[-1,0]-lindata[0,0])
t = lindata[0,1] - m * lindata[0,0]
popt1, pcov = curve_fit(
linear, lindata[:,0], lindata[:,1], p0=[m, t],
sigma=lindata[:,3], absolute_sigma=True
)
# fit full range
popt2, pcov = curve_fit(
lambda x, c, d: fitfunc(x, popt1[0], popt1[1], c, d),
alldata[:,0], alldata[:,1], sigma=alldata[:,3], absolute_sigma=True,
)
popt = np.hstack((popt1, popt2))
model = lambda x: fitfunc(x, *popt)
# calculate mean standard deviation of data from parameterization
yfit = np.array([model(x) for x in alldata[:,0]])
stddev = 1.5*np.sqrt( # multiple of "sigma"
np.average((alldata[:,1]-yfit)**2, weights=1./alldata[:,3])
)
print 'stddev = %.2g' % stddev
errorband = np.array([[x, model(x), 0, 0, stddev] for x in np.linspace(1,4)])
# make plot
fitdata = np.array([[x, model(x), 0, 0, 0] for x in np.linspace(1,4)])
par_names = ['a', 'b', 'c', 'd']
energies = [19.6, 27, 39, 62.4, 200]
labels = dict(
('%s = %.3g' % (par_name, popt[i]), [3.3, 3-i*0.2, True])
for i,par_name in enumerate(par_names)
)
ccX_vals = [10**model(np.log10(energy)) for energy in energies]
ccX = [' '.join([
'%g GeV:' % energy,
'({})'.format(ufloat(ccX_vals[i], stddev/0.434*ccX_vals[i])),
'{/Symbol \155}b'
]) for i,energy in enumerate(energies)]
print ccX
#labels.update(dict(
# (cc, [1+i*0.5, 4.5+(i%2+1)*0.2, True]) for i,cc in enumerate(ccX)
#))
make_plot(
data = [errorband] + data.values() + [fitdata],
properties = [
'with filledcurves lt 1 lw 5 pt 0 lc %s' % default_colors[8]
] + [
'lc %s lw 4 lt 1 pt 18 ps 1.5' % (default_colors[i])
for i in xrange(len(data))
] + ['with lines lc 0 lw 4 lt 1'],
titles = [''] + data.keys() + ['y = ax+b - e^{-cx+d}'],
xlabel = 'x = log_{10}[{/Symbol \326}s_{NN} (GeV)]',
ylabel = 'y = log_{10}[{/Symbol \163}@_{c@^{/=18-}c}^{NN} ({/Symbol \155}b)]',
name = os.path.join(outDir, 'ccX'),
size = '11.4in,8.3in', xr = [1, 4], yr = [0.5,4.5],
key = ['bottom right', 'nobox', 'width -5'],
tmargin = 0.98, rmargin = 0.99, bmargin = 0.13,
lines = dict(
('y=%f' % (np.log10(energy)), 'lc 0 lw 2 lt 2') for energy in energies
), labels = labels,
) | fit experimental data |
def get_file_info_web(self, fname, delim='<BR>\n'):
"""
gathers info on a python program in list and formats as string
"""
txt = ''
f = mod_file.File(fname[0])
txt += '<sup>' + f.name + '</sup>' + delim
txt += '<sup>' + fname[1] + '</sup>' + delim
txt += '<sub><sup><span white-space:nowrap;>' + f.GetDateAsString(f.date_modified)[2:10] + '</span></sup></sub>' + delim
txt += '<sup><sup>' + str(f.size) + '</sup></sup>' + delim
return txt + '\n' | gathers info on a python program in list and formats as string |
def qurl(url, add=None, exclude=None, remove=None):
"""
Returns the url with changed parameters
"""
urlp = list(urlparse(url))
qp = parse_qsl(urlp[4])
# Add parameters
add = add if add else {}
for name, value in add.items():
if isinstance(value, (list, tuple)):
# Append mode
value = [smart_str(v) for v in value]
qp = [p for p in qp if p[0] != name or p[1] not in value]
qp.extend([(name, smart_str(val)) for val in value])
else:
# Set mode
qp = [p for p in qp if p[0] != name]
qp.append((name, smart_str(value)))
# Exclude parameters
exclude = exclude if exclude else {}
for name, value in exclude.items():
if not isinstance(value, (list, tuple)):
value = [value]
value = [smart_str(v) for v in value]
qp = [p for p in qp if p[0] != name or p[1] not in value]
# Remove parameters
remove = remove if remove else []
for name in remove:
qp = [p for p in qp if p[0] != name]
urlp[4] = urlencode(qp, True)
return urlunparse(urlp) | Returns the url with changed parameters |
def sigmask(self, sigsetsize=None):
"""
Gets the current sigmask. If it's blank, a new one is created (of sigsetsize).
:param sigsetsize: the size (in *bytes* of the sigmask set)
:return: the sigmask
"""
if self._sigmask is None:
if sigsetsize is not None:
sc = self.state.solver.eval(sigsetsize)
self.state.add_constraints(sc == sigsetsize)
self._sigmask = self.state.solver.BVS('initial_sigmask', sc*self.state.arch.byte_width, key=('initial_sigmask',), eternal=True)
else:
self._sigmask = self.state.solver.BVS('initial_sigmask', self.sigmask_bits, key=('initial_sigmask',), eternal=True)
return self._sigmask | Gets the current sigmask. If it's blank, a new one is created (of sigsetsize).
:param sigsetsize: the size (in *bytes* of the sigmask set)
:return: the sigmask |
def monitor_module(module, summary_writer,
track_data=True,
track_grad=True,
track_update=True,
track_update_ratio=False, # this is usually unnecessary
bins=51):
""" Allows for remote monitoring of a module's params and buffers.
The following may be monitored:
1. Forward Values - Histograms of the values for parameter and buffer tensors
2. Gradient Values - Histograms of the gradients for parameter and buffer tensors
3. Update Values - Histograms of the change in parameter and buffer tensor
values from the last recorded forward pass
4. Update Ratios - Histograms of the ratio of change in value for parameter
and value tensors from the last iteration to the actual values.
I.e., what is the relative size of the update.
Generally we like to see values of about .001.
See [cite Andrej Karpathy's babysitting dnn's blog post]
"""
# The module will need additional information
module.track_data = track_data
module.track_grad = track_grad
module.track_update = track_update
module.track_update_ratio = track_update_ratio
if not hasattr(module, 'global_step'):
module.global_step = 0
if not hasattr(module, 'is_monitoring'):
module.is_monitoring = True
if not hasattr(module, 'monitoring'):
set_monitoring(module)
if not hasattr(module, 'last_state_dict'):
module.last_state_dict = dict()
if not hasattr(module, 'var_hooks'):
module.var_hooks = dict()
if not hasattr(module, 'param_hooks'):
module.param_hooks = dict()
# All submodules need to have these
for name, mod in module.named_modules():
if not hasattr(mod, 'monitor'):
set_monitor(mod)
if not hasattr(mod, 'monitored_vars'):
mod.monitored_vars = dict()
module.monitoring(True)
# remove previous grad hooks before handles go stale
module.register_forward_pre_hook(remove_grad_hooks)
# set forward hook that monitors forward activations and sets new grad hooks
monitor_forward_and_backward = get_monitor_forward_and_backward(summary_writer, bins)
module.register_forward_hook(monitor_forward_and_backward) | Allows for remote monitoring of a module's params and buffers.
The following may be monitored:
1. Forward Values - Histograms of the values for parameter and buffer tensors
2. Gradient Values - Histograms of the gradients for parameter and buffer tensors
3. Update Values - Histograms of the change in parameter and buffer tensor
values from the last recorded forward pass
4. Update Ratios - Histograms of the ratio of change in value for parameter
and value tensors from the last iteration to the actual values.
I.e., what is the relative size of the update.
Generally we like to see values of about .001.
See [cite Andrej Karpathy's babysitting dnn's blog post] |
def shell(filepath, wsgiapp, interpreter, models):
"""
Runs a python shell.
Usage:
$ wsgicli shell app.py app -i ipython
"""
model_base_classes = get_model_base_classes()
imported_objects = {}
if models and model_base_classes:
insert_import_path_to_sys_modules(filepath)
for module in find_modules_from_path(filepath):
for name in dir(module):
if name.startswith('_'):
continue
obj = getattr(module, name)
if isinstance(obj, model_base_classes):
key = name.split('.')[-1] if '.' in name else name
if key in imported_objects:
continue
imported_objects[key] = obj
module = SourceFileLoader('module', filepath).load_module()
imported_objects['app'] = getattr(module, wsgiapp)
for key in imported_objects.keys():
click.secho("import {}".format(key), fg='green')
run_python(interpreter, imported_objects) | Runs a python shell.
Usage:
$ wsgicli shell app.py app -i ipython |
def _authenticated_call_geocoder(self, url, timeout=DEFAULT_SENTINEL):
"""
Wrap self._call_geocoder, handling tokens.
"""
if self.token is None or int(time()) > self.token_expiry:
self._refresh_authentication_token()
request = Request(
"&".join((url, urlencode({"token": self.token}))),
headers={"Referer": self.referer}
)
return self._base_call_geocoder(request, timeout=timeout) | Wrap self._call_geocoder, handling tokens. |
def main(argv: Sequence[str] = SYS_ARGV) -> int:
"""Execute CLI commands."""
args = default_parser().parse_args(argv)
try:
seq = POPULATIONS[args.population] # type: Sequence
except KeyError:
try:
with open(args.population, 'r', encoding=args.encoding) as file_:
seq = list(file_)
except (OSError, UnicodeError) as ex:
print(ex, file=sys.stderr)
return 1
main_key = key(seq=seq, nteeth=args.nteeth, delimiter=args.delimiter)
print(main_key)
if args.stats:
print('*', len(main_key), 'characters')
print('*', args.nteeth, 'samples from a population of', len(seq))
print(
'* entropy {sign} {nbits} bits'.format(
sign='~' if args.delimiter else '<',
nbits=round(math.log(len(seq), 2) * args.nteeth, 2),
),
)
return 0 | Execute CLI commands. |
def map_E_to_height(self, alat, alon, height, newheight, E):
"""Performs mapping of electric field along the magnetic field.
It is assumed that the electric field is perpendicular to B.
Parameters
==========
alat : (N,) array_like or float
Modified apex latitude
alon : (N,) array_like or float
Modified apex longitude
height : (N,) array_like or float
Source altitude in km
newheight : (N,) array_like or float
Destination altitude in km
E : (3,) or (3, N) array_like
Electric field (at `alat`, `alon`, `height`) in geodetic east,
north, and up components
Returns
=======
E : (3, N) or (3,) ndarray
The electric field at `newheight` (geodetic east, north, and up
components)
"""
return self._map_EV_to_height(alat, alon, height, newheight, E, 'E') | Performs mapping of electric field along the magnetic field.
It is assumed that the electric field is perpendicular to B.
Parameters
==========
alat : (N,) array_like or float
Modified apex latitude
alon : (N,) array_like or float
Modified apex longitude
height : (N,) array_like or float
Source altitude in km
newheight : (N,) array_like or float
Destination altitude in km
E : (3,) or (3, N) array_like
Electric field (at `alat`, `alon`, `height`) in geodetic east,
north, and up components
Returns
=======
E : (3, N) or (3,) ndarray
The electric field at `newheight` (geodetic east, north, and up
components) |
def get_module_functions(modules):
"""Finds functions that do not have implemented derivatives.
Args:
modules: A list of Python modules. Functions contained in these modules
will be checked for membership in 'implemented', and if not found,
will be added to an 'unimplemented' set
implemented: A Python object containing implemented derivatives. A function
should be checkable for membership using the `fn in implemented` syntax.
Returns:
module_fns: A set of functions, builtins or ufuncs in `modules`.
"""
module_fns = set()
for module in modules:
for key in dir(module):
attr = getattr(module, key)
if isinstance(
attr, (types.BuiltinFunctionType, types.FunctionType, numpy.ufunc)):
module_fns.add(attr)
return module_fns | Finds functions that do not have implemented derivatives.
Args:
modules: A list of Python modules. Functions contained in these modules
will be checked for membership in 'implemented', and if not found,
will be added to an 'unimplemented' set
implemented: A Python object containing implemented derivatives. A function
should be checkable for membership using the `fn in implemented` syntax.
Returns:
module_fns: A set of functions, builtins or ufuncs in `modules`. |
def iter_schemas(self, schema: Schema) -> Iterable[Tuple[str, Any]]:
"""
Build zero or more JSON schemas for a marshmallow schema.
Generates: name, schema pairs.
"""
if not schema:
return
yield self.to_tuple(schema)
for name, field in self.iter_fields(schema):
if isinstance(field, Nested):
yield self.to_tuple(field.schema)
yield from self.iter_schemas(field.schema)
if isinstance(field, List) and isinstance(field.container, Nested):
yield self.to_tuple(field.container.schema)
yield from self.iter_schemas(field.container.schema) | Build zero or more JSON schemas for a marshmallow schema.
Generates: name, schema pairs. |
def get_field_label(self, field_name, field=None):
""" Return a label to display for a field """
label = None
if field is not None:
label = getattr(field, 'verbose_name', None)
if label is None:
label = getattr(field, 'name', None)
if label is None:
label = field_name
return label.capitalize() | Return a label to display for a field |
def apply(self, q, bindings, cuts):
""" Apply a set of filters, which can be given as a set of tuples in
the form (ref, operator, value), or as a string in query form. If it
is ``None``, no filter will be applied. """
info = []
for (ref, operator, value) in self.parse(cuts):
if map_is_class and isinstance(value, map):
value = list(value)
self._check_type(ref, value)
info.append({'ref': ref, 'operator': operator, 'value': value})
table, column = self.cube.model[ref].bind(self.cube)
bindings.append(Binding(table, ref))
q = q.where(column.in_(value))
return info, q, bindings | Apply a set of filters, which can be given as a set of tuples in
the form (ref, operator, value), or as a string in query form. If it
is ``None``, no filter will be applied. |
def make_url_absolute(self, url, resolve_base=False):
"""
Make url absolute using previous request url as base url.
"""
if self.config['url']:
if resolve_base:
ubody = self.doc.unicode_body()
base_url = find_base_url(ubody)
if base_url:
return urljoin(base_url, url)
return urljoin(self.config['url'], url)
else:
return url | Make url absolute using previous request url as base url. |
def _init_usrgos(self, goids):
"""Return user GO IDs which have GO Terms."""
usrgos = set()
goids_missing = set()
_go2obj = self.gosubdag.go2obj
for goid in goids:
if goid in _go2obj:
usrgos.add(goid)
else:
goids_missing.add(goid)
if goids_missing:
print("MISSING GO IDs: {GOs}".format(GOs=goids_missing))
print("{N} of {M} GO IDs ARE MISSING".format(N=len(goids_missing), M=len(goids)))
return usrgos | Return user GO IDs which have GO Terms. |
def text_height(text):
"""Return the total height of the <text> and the length from the
base point to the top of the text box."""
(d1, d2, ymin, ymax) = get_dimension(text)
return (ymax - ymin, ymax) | Return the total height of the <text> and the length from the
base point to the top of the text box. |
def read_links_file(self,file_path):
'''
Read links and associated categories for specified articles
in text file seperated by a space
Args:
file_path (str): The path to text file with news article links
and category
Returns:
articles: Array of tuples that contains article link & cateogory
ex. [('IPO','www.cs.columbia.edu')]
'''
articles = []
with open(file_path) as f:
for line in f:
line = line.strip()
#Ignore blank lines
if len(line) != 0:
link,category = line.split(' ')
articles.append((category.rstrip(),link.strip()))
return articles | Read links and associated categories for specified articles
in text file seperated by a space
Args:
file_path (str): The path to text file with news article links
and category
Returns:
articles: Array of tuples that contains article link & cateogory
ex. [('IPO','www.cs.columbia.edu')] |
def parse_keypair_lines(content, delim='|', kv_sep='='):
"""
Parses a set of entities, where each entity is a set of key-value pairs
contained all on one line. Each entity is parsed into a dictionary and
added to the list returned from this function.
"""
r = []
if content:
for row in [line for line in content if line]:
item_dict = {}
for item in row.split(delim):
key, value = [i.strip("'\"").strip() for i in item.strip().split(kv_sep)]
item_dict[key] = value
r.append(item_dict)
return r | Parses a set of entities, where each entity is a set of key-value pairs
contained all on one line. Each entity is parsed into a dictionary and
added to the list returned from this function. |
def render_pdf_file_to_image_files__ghostscript_png(pdf_file_name,
root_output_file_path,
res_x=150, res_y=150):
"""Use Ghostscript to render a PDF file to .png images. The root_output_file_path
is prepended to all the output files, which have numbers and extensions added.
Return the command output."""
# For gs commands see
# http://ghostscript.com/doc/current/Devices.htm#File_formats
# http://ghostscript.com/doc/current/Devices.htm#PNG
if not gs_executable: init_and_test_gs_executable(exit_on_fail=True)
command = [gs_executable, "-dBATCH", "-dNOPAUSE", "-sDEVICE=pnggray",
"-r"+res_x+"x"+res_y, "-sOutputFile="+root_output_file_path+"-%06d.png",
pdf_file_name]
comm_output = get_external_subprocess_output(command, env=gs_environment)
return comm_output | Use Ghostscript to render a PDF file to .png images. The root_output_file_path
is prepended to all the output files, which have numbers and extensions added.
Return the command output. |
def exchange_additional_URL(self, handle, old, new):
'''
Exchange an URL in the 10320/LOC entry against another, keeping the same id
and other attributes.
:param handle: The handle to modify.
:param old: The URL to replace.
:param new: The URL to set as new URL.
'''
LOGGER.debug('exchange_additional_URL...')
handlerecord_json = self.retrieve_handle_record_json(handle)
if handlerecord_json is None:
msg = 'Cannot exchange URLs in unexisting handle'
raise HandleNotFoundException(
handle=handle,
msg=msg
)
list_of_entries = handlerecord_json['values']
if not self.is_URL_contained_in_10320LOC(handle, old, handlerecord_json):
LOGGER.debug('exchange_additional_URL: No URLs exchanged, as the url was not in the record.')
else:
self.__exchange_URL_in_13020loc(old, new, list_of_entries, handle)
op = 'exchanging URLs'
resp, put_payload = self.__send_handle_put_request(
handle,
list_of_entries,
overwrite=True,
op=op
)
# TODO FIXME (one day): Implement overwriting by index (less risky)
if hsresponses.handle_success(resp):
pass
else:
msg = 'Could not exchange URL ' + str(old) + ' against ' + str(new)
raise GenericHandleError(
operation=op,
handle=handle,
reponse=resp,
msg=msg,
payload=put_payload
) | Exchange an URL in the 10320/LOC entry against another, keeping the same id
and other attributes.
:param handle: The handle to modify.
:param old: The URL to replace.
:param new: The URL to set as new URL. |
def render(self, template=None, additional=None):
"""
Render single model to its html representation.
You may set template path in render function argument,
or model's variable named 'template_path',
or get default name: $app_label$/models/$model_name$.html
Settings:
* MODEL_RENDER_DEFAULT_EXTENSION
set default template extension. Usable if you use jinja or others.
:param template: custom template_path
:return: rendered model html string
"""
template_path = template or self.get_template_path()
template_vars = {'model': self}
if additional:
template_vars.update(additional)
rendered = render_to_string(template_path, template_vars)
return mark_safe(rendered) | Render single model to its html representation.
You may set template path in render function argument,
or model's variable named 'template_path',
or get default name: $app_label$/models/$model_name$.html
Settings:
* MODEL_RENDER_DEFAULT_EXTENSION
set default template extension. Usable if you use jinja or others.
:param template: custom template_path
:return: rendered model html string |
def set_item(filename, item):
"""
Save entry to JSON file
"""
with atomic_write(os.fsencode(str(filename))) as temp_file:
with open(os.fsencode(str(filename))) as products_file:
# load the JSON data into memory
products_data = json.load(products_file)
# check if UUID already exists
uuid_list = [i for i in filter(
lambda z: z["uuid"] == str(item["uuid"]), products_data)]
if len(uuid_list) == 0:
# add the new item to the JSON file
products_data.append(item)
# save the new JSON to the temp file
json.dump(products_data, temp_file)
return True
return None | Save entry to JSON file |
def _get_measure_outcome(self, qubit):
"""Simulate the outcome of measurement of a qubit.
Args:
qubit (int): the qubit to measure
Return:
tuple: pair (outcome, probability) where outcome is '0' or '1' and
probability is the probability of the returned outcome.
"""
# Axis for numpy.sum to compute probabilities
axis = list(range(self._number_of_qubits))
axis.remove(self._number_of_qubits - 1 - qubit)
probabilities = np.sum(np.abs(self._statevector) ** 2, axis=tuple(axis))
# Compute einsum index string for 1-qubit matrix multiplication
random_number = self._local_random.rand()
if random_number < probabilities[0]:
return '0', probabilities[0]
# Else outcome was '1'
return '1', probabilities[1] | Simulate the outcome of measurement of a qubit.
Args:
qubit (int): the qubit to measure
Return:
tuple: pair (outcome, probability) where outcome is '0' or '1' and
probability is the probability of the returned outcome. |
def server_list_detailed(self):
'''
Detailed list of servers
'''
nt_ks = self.compute_conn
ret = {}
for item in nt_ks.servers.list():
try:
ret[item.name] = {
'OS-EXT-SRV-ATTR': {},
'OS-EXT-STS': {},
'accessIPv4': item.accessIPv4,
'accessIPv6': item.accessIPv6,
'addresses': item.addresses,
'created': item.created,
'flavor': {'id': item.flavor['id'],
'links': item.flavor['links']},
'hostId': item.hostId,
'id': item.id,
'image': {'id': item.image['id'] if item.image else 'Boot From Volume',
'links': item.image['links'] if item.image else ''},
'key_name': item.key_name,
'links': item.links,
'metadata': item.metadata,
'name': item.name,
'state': item.status,
'tenant_id': item.tenant_id,
'updated': item.updated,
'user_id': item.user_id,
}
except TypeError:
continue
ret[item.name]['progress'] = getattr(item, 'progress', '0')
if hasattr(item.__dict__, 'OS-DCF:diskConfig'):
ret[item.name]['OS-DCF'] = {
'diskConfig': item.__dict__['OS-DCF:diskConfig']
}
if hasattr(item.__dict__, 'OS-EXT-SRV-ATTR:host'):
ret[item.name]['OS-EXT-SRV-ATTR']['host'] = \
item.__dict__['OS-EXT-SRV-ATTR:host']
if hasattr(item.__dict__, 'OS-EXT-SRV-ATTR:hypervisor_hostname'):
ret[item.name]['OS-EXT-SRV-ATTR']['hypervisor_hostname'] = \
item.__dict__['OS-EXT-SRV-ATTR:hypervisor_hostname']
if hasattr(item.__dict__, 'OS-EXT-SRV-ATTR:instance_name'):
ret[item.name]['OS-EXT-SRV-ATTR']['instance_name'] = \
item.__dict__['OS-EXT-SRV-ATTR:instance_name']
if hasattr(item.__dict__, 'OS-EXT-STS:power_state'):
ret[item.name]['OS-EXT-STS']['power_state'] = \
item.__dict__['OS-EXT-STS:power_state']
if hasattr(item.__dict__, 'OS-EXT-STS:task_state'):
ret[item.name]['OS-EXT-STS']['task_state'] = \
item.__dict__['OS-EXT-STS:task_state']
if hasattr(item.__dict__, 'OS-EXT-STS:vm_state'):
ret[item.name]['OS-EXT-STS']['vm_state'] = \
item.__dict__['OS-EXT-STS:vm_state']
if hasattr(item.__dict__, 'security_groups'):
ret[item.name]['security_groups'] = \
item.__dict__['security_groups']
return ret | Detailed list of servers |
def match(self, path):
"""Matches a fully qualified path template string.
Args:
path (str): A fully qualified path template string.
Returns:
dict: Var names to matched binding values.
Raises:
ValidationException: If path can't be matched to the template.
"""
this = self.segments
that = path.split('/')
current_var = None
bindings = {}
segment_count = self.segment_count
j = 0
for i in range(0, len(this)):
if j >= len(that):
break
if this[i].kind == _TERMINAL:
if this[i].literal == '*':
bindings[current_var] = that[j]
j += 1
elif this[i].literal == '**':
until = j + len(that) - segment_count + 1
segment_count += len(that) - segment_count
bindings[current_var] = '/'.join(that[j:until])
j = until
elif this[i].literal != that[j]:
raise ValidationException(
'mismatched literal: \'%s\' != \'%s\'' % (
this[i].literal, that[j]))
else:
j += 1
elif this[i].kind == _BINDING:
current_var = this[i].literal
if j != len(that) or j != segment_count:
raise ValidationException(
'match error: could not render from the path template: {}'
.format(path))
return bindings | Matches a fully qualified path template string.
Args:
path (str): A fully qualified path template string.
Returns:
dict: Var names to matched binding values.
Raises:
ValidationException: If path can't be matched to the template. |
def bft(self):
""" Generator that returns each element of the tree in Breadth-first order"""
queue = deque([self])
while queue:
node = queue.pop()
yield node
if hasattr(node, "childs"):
queue.extendleft(node.childs) | Generator that returns each element of the tree in Breadth-first order |
def build_subtree_strut(self, result, *args, **kwargs):
"""
Returns a dictionary in form of
{node:Resource, children:{node_id: Resource}}
:param result:
:return:
"""
return self.service.build_subtree_strut(result=result, *args, **kwargs) | Returns a dictionary in form of
{node:Resource, children:{node_id: Resource}}
:param result:
:return: |
def get_group_summary(self, group_id, **kwargs): # noqa: E501
"""Get group information. # noqa: E501
An endpoint for getting general information about the group. **Example usage:** `curl https://api.us-east-1.mbedcloud.com/v3/policy-groups/{group-id} -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.get_group_summary(group_id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str group_id: The ID of the group to be retrieved. (required)
:return: GroupSummary
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.get_group_summary_with_http_info(group_id, **kwargs) # noqa: E501
else:
(data) = self.get_group_summary_with_http_info(group_id, **kwargs) # noqa: E501
return data | Get group information. # noqa: E501
An endpoint for getting general information about the group. **Example usage:** `curl https://api.us-east-1.mbedcloud.com/v3/policy-groups/{group-id} -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.get_group_summary(group_id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str group_id: The ID of the group to be retrieved. (required)
:return: GroupSummary
If the method is called asynchronously,
returns the request thread. |
def load_remotes(extra_path=None, load_user=True):
"""Load the YAML remotes file, which sort of combines the Accounts file with part of the
remotes sections from the main config
:return: An `AttrDict`
"""
from os.path import getmtime
try:
remotes_file = find_config_file(REMOTES_FILE, extra_path=extra_path, load_user=load_user)
except ConfigurationError:
remotes_file = None
if remotes_file is not None and os.path.exists(remotes_file):
config = AttrDict()
config.update_yaml(remotes_file)
if not 'remotes' in config:
config.remotes = AttrDict()
config.remotes.loaded = [remotes_file, getmtime(remotes_file)]
return config
else:
return None | Load the YAML remotes file, which sort of combines the Accounts file with part of the
remotes sections from the main config
:return: An `AttrDict` |
def addKwdArgsToSig(sigStr, kwArgsDict):
""" Alter the passed function signature string to add the given kewords """
retval = sigStr
if len(kwArgsDict) > 0:
retval = retval.strip(' ,)') # open up the r.h.s. for more args
for k in kwArgsDict:
if retval[-1] != '(': retval += ", "
retval += str(k)+"="+str(kwArgsDict[k])
retval += ')'
retval = retval
return retval | Alter the passed function signature string to add the given kewords |
def _generate_union(self, union_type):
"""
Emits a JSDoc @typedef for a union type.
"""
union_name = fmt_type_name(union_type)
self._emit_jsdoc_header(union_type.doc)
self.emit(' * @typedef {Object} %s' % union_name)
variant_types = []
for variant in union_type.all_fields:
variant_types.append("'%s'" % variant.name)
variant_data_type, _, _ = unwrap(variant.data_type)
# Don't emit fields for void types.
if not is_void_type(variant_data_type):
variant_doc = ' - Available if .tag is %s.' % variant.name
if variant.doc:
variant_doc += ' ' + variant.doc
self.emit_wrapped_text(
'@property {%s} [%s]%s' % (
fmt_type(variant_data_type),
variant.name,
variant_doc,
),
prefix=' * ',
)
jsdoc_tag_union = fmt_jsdoc_union(variant_types)
self.emit(' * @property {%s} .tag - Tag identifying the union variant.' % jsdoc_tag_union)
self.emit(' */') | Emits a JSDoc @typedef for a union type. |
def snapshot(self, filename="tmp.png"):
"""
Take a screenshot and save it to `tmp.png` filename by default
Args:
filename: name of file where to store the screenshot
Returns:
display the screenshot
"""
if not filename:
filename = "tmp.png"
if self.handle:
try:
screenshot(filename, self.handle)
except win32gui.error:
self.handle = None
screenshot(filename)
else:
screenshot(filename)
img = aircv.imread(filename)
os.remove(filename)
return img | Take a screenshot and save it to `tmp.png` filename by default
Args:
filename: name of file where to store the screenshot
Returns:
display the screenshot |
def render_field_errors(field):
"""
Render field errors as html.
"""
if field.errors:
html = """<p class="help-block">Error: {errors}</p>""".format(
errors='. '.join(field.errors)
)
return HTMLString(html)
return None | Render field errors as html. |
def detect(self, app):
"""
Given an app, run detect script on it to determine whether it can be
built with this pack. Return True/False.
"""
script = os.path.join(self.folder, 'bin', 'detect')
cmd = '%s %s' % (script, app.folder)
result = run(cmd)
return result.status_code == 0 | Given an app, run detect script on it to determine whether it can be
built with this pack. Return True/False. |
def get_model_spec_ting(atomic_number):
"""
X_u_template[0:2] are teff, logg, vturb in km/s
X_u_template[:,3] -> onward, put atomic number
atomic_number is 6 for C, 7 for N
"""
DATA_DIR = "/Users/annaho/Data/LAMOST/Mass_And_Age"
temp = np.load("%s/X_u_template_KGh_res=1800.npz" %DATA_DIR)
X_u_template = temp["X_u_template"]
wl = temp["wavelength"]
grad_spec = X_u_template[:,atomic_number]
return wl, grad_spec | X_u_template[0:2] are teff, logg, vturb in km/s
X_u_template[:,3] -> onward, put atomic number
atomic_number is 6 for C, 7 for N |
def event(self, *topics, **kwargs):
"""Topic callback registry.
callback func should receive two args: topic and pk, and then process
the replication job.
Note: The callback func must return True/False. When passed a list of
pks, the func should return a list of True/False with the same length
of pks.
:param topics: a list of topics
:param workers: how many workers to process this topic
:param multi: whether pass multiple pks
:param queue_limit: when queue size is larger than the limit,
the worker should run deduplicate procedure
"""
workers = kwargs.pop("workers", 1)
multi = kwargs.pop("multi", False)
queue_limit = kwargs.pop("queue_limit", 10000)
def wrapper(func):
for topic in topics:
queues = [Queue() for _ in range(workers)]
hash_ring = ketama.Continuum()
for q in queues:
hash_ring[str(hash(q))] = q
self.worker_queues[topic] = hash_ring
self.workers[topic] = WorkerPool(
queues, topic, func, multi=multi, queue_limit=queue_limit,
logger_name="%s.%s" % (self.name, topic))
self.socket.setsockopt(zmq.SUBSCRIBE, asbytes(topic))
return func
return wrapper | Topic callback registry.
callback func should receive two args: topic and pk, and then process
the replication job.
Note: The callback func must return True/False. When passed a list of
pks, the func should return a list of True/False with the same length
of pks.
:param topics: a list of topics
:param workers: how many workers to process this topic
:param multi: whether pass multiple pks
:param queue_limit: when queue size is larger than the limit,
the worker should run deduplicate procedure |
def launch_image(
# players info
player: Player,
nth_player: int,
num_players: int,
# game settings
headless: bool,
game_name: str,
map_name: str,
game_type: GameType,
game_speed: int,
timeout: Optional[int],
hide_names: bool,
random_names: bool,
drop_players: bool,
allow_input: bool,
auto_launch: bool,
# mount dirs
game_dir: str,
bot_dir: str,
map_dir: str,
bwapi_data_bwta_dir: str,
bwapi_data_bwta2_dir: str,
vnc_base_port: int,
vnc_host: int,
capture_movement: bool,
# docker
docker_image: str,
docker_opts: List[str]
) -> None:
"""
:raises docker,errors.APIError
:raises DockerException
"""
container_name = f"{game_name}_{nth_player}_{player.name.replace(' ', '_')}"
log_dir = f"{game_dir}/{game_name}/logs_{nth_player}"
crashes_dir = f"{game_dir}/{game_name}/crashes_{nth_player}"
os.makedirs(log_dir, mode=0o777, exist_ok=True) # todo: proper mode
os.makedirs(crashes_dir, mode=0o777, exist_ok=True) # todo: proper mode
volumes = {
xoscmounts(log_dir): {"bind": LOG_DIR, "mode": "rw"},
xoscmounts(map_dir): {"bind": MAP_DIR, "mode": "rw"},
xoscmounts(crashes_dir): {"bind": ERRORS_DIR, "mode": "rw"},
xoscmounts(bwapi_data_bwta_dir): {"bind": BWAPI_DATA_BWTA_DIR, "mode": "rw"},
xoscmounts(bwapi_data_bwta2_dir): {"bind": BWAPI_DATA_BWTA2_DIR, "mode": "rw"},
}
ports = {}
if not headless:
ports.update({"5900/tcp": vnc_base_port + nth_player})
env = dict(
PLAYER_NAME=player.name if not random_names else random_string(8),
PLAYER_RACE=player.race.value,
NTH_PLAYER=nth_player,
NUM_PLAYERS=num_players,
GAME_NAME=game_name,
MAP_NAME=f"/app/sc/maps/{map_name}",
GAME_TYPE=game_type.value,
SPEED_OVERRIDE=game_speed,
HIDE_NAMES="1" if hide_names else "0",
DROP_PLAYERS="1" if drop_players else "0",
TM_LOG_RESULTS=f"../logs/scores.json",
TM_LOG_FRAMETIMES=f"../logs/frames.csv",
TM_SPEED_OVERRIDE=game_speed,
TM_ALLOW_USER_INPUT="1" if isinstance(player, HumanPlayer) or allow_input else "0",
EXIT_CODE_REALTIME_OUTED=EXIT_CODE_REALTIME_OUTED,
CAPTURE_MOUSE_MOVEMENT="1" if capture_movement else "0",
HEADFUL_AUTO_LAUNCH="1" if auto_launch else "0",
JAVA_DEBUG="0"
)
if timeout is not None:
env["PLAY_TIMEOUT"] = timeout
if isinstance(player, BotPlayer):
# Only mount write directory, read and AI
# are copied from the bot directory in proper places in bwapi-data
bot_data_write_dir = f"{game_dir}/{game_name}/write_{nth_player}/"
os.makedirs(bot_data_write_dir, mode=0o777, exist_ok=True) # todo: proper mode
volumes.update({
xoscmounts(bot_data_write_dir): {"bind": BOT_DATA_WRITE_DIR, "mode": "rw"},
xoscmounts(player.bot_dir): {"bind": BOT_DIR, "mode": "ro"},
})
env["BOT_FILE"] = player.bot_basefilename
env["BOT_BWAPI"] = player.bwapi_version
env["JAVA_DEBUG"] = "0"
env["JAVA_DEBUG_PORT"] = ""
env["JAVA_OPTS"] = ""
command = ["/app/play_bot.sh"]
if player.meta.javaDebugPort is not None:
ports.update({"player.meta.javaDebugPort/tcp": player.meta.javaDebugPort})
env["JAVA_DEBUG"] = "1"
env["JAVA_DEBUG_PORT"] = player.meta.javaDebugPort
if player.meta.javaOpts is not None:
env["JAVA_OPTS"] = player.meta.javaOpts
if player.meta.port is not None:
if isinstance(player.meta.port, int) or player.meta.port.isdigit():
ports.update({str(player.meta.port) + '/tcp': int(player.meta.port)})
else:
forward, local = [int(x) for x in player.meta.port.split(':')]
ports.update({str(local) + '/tcp': forward})
else:
command = ["/app/play_human.sh"]
is_server = nth_player == 0
entrypoint_opts = ["--headful"]
if headless:
entrypoint_opts = [
"--game", game_name, "--name", player.name,
"--race", player.race.value, "--lan"
]
if is_server:
entrypoint_opts += ["--host", "--map", f"/app/sc/maps/{map_name}"]
else:
entrypoint_opts += ["--join"]
command += entrypoint_opts
logger.debug(
"\n"
f"docker_image={docker_image}\n"
f"command={pformat(command, indent=4)}\n"
f"name={container_name}\n"
f"detach={True}\n"
f"environment={pformat(env, indent=4)}\n"
f"privileged={True}\n"
f"volumes={pformat(volumes, indent=4)}\n"
f"network={DOCKER_STARCRAFT_NETWORK}\n"
f"ports={ports}\n"
)
container = docker_client.containers.run(
docker_image,
command=command,
name=container_name,
detach=True,
environment=env,
privileged=True,
volumes=volumes,
network=DOCKER_STARCRAFT_NETWORK,
ports=ports
)
if container:
container_id = running_containers(container_name)
logger.info(f"launched {player}")
logger.debug(f"container name = '{container_name}', container id = '{container_id}'")
else:
raise DockerException(f"could not launch {player} in container {container_name}") | :raises docker,errors.APIError
:raises DockerException |
def data_parallelism(daisy_chain_variables=True,
all_workers=False,
ps_replicas=0,
ps_job="/job:ps",
ps_gpu=0,
schedule="continuous_train_and_eval",
sync=False,
worker_gpu=1,
worker_replicas=1,
worker_id=0,
gpu_order="",
worker_job="/job:localhost",
no_data_parallelism=False):
"""See data_parallelism_from_flags."""
tf.logging.info("schedule=%s" % schedule)
tf.logging.info("worker_gpu=%s" % worker_gpu)
tf.logging.info("sync=%s" % sync)
def _ps_replicas(all_workers=False):
if all_workers:
return list(range(ps_replicas))
# Worker K will be using replicas {0,...n-1} + K*n if we have n replicas.
num_replicas = ps_replicas // worker_replicas
return [d + worker_id * num_replicas for d in range(num_replicas)]
def _gpu_order(num_gpus):
if gpu_order:
ret = [int(s) for s in gpu_order.split(" ")]
if len(ret) == num_gpus:
return ret
return list(range(num_gpus))
def _ps_gpus(all_workers=False):
ps_gpus = []
for d in _ps_replicas(all_workers=all_workers):
ps_gpus.extend([(d, gpu) for gpu in _gpu_order(ps_gpu)])
return ps_gpus
def ps_devices(all_workers=False):
"""List of ps devices (where to put the experts).
Args:
all_workers: whether the list is for all async workers or just this one.
Returns:
a list of device names
"""
if ps_replicas > 0:
if ps_gpu > 0:
return [
ps_job + "/task:%d/GPU:%d" % (d, gpu)
for (d, gpu) in _ps_gpus(all_workers=all_workers)
]
else:
return [
ps_job + "/task:%d" % d
for d in _ps_replicas(all_workers=all_workers)
]
else:
if worker_gpu > 0:
return ["gpu:%d" % d for d in _gpu_order(worker_gpu)]
else:
return [""]
def _replica_device_setter(worker_device):
if ps_replicas == 0:
return worker_device
return tf.train.replica_device_setter(
worker_device=worker_device,
ps_tasks=ps_replicas,
ps_device=ps_job + "/GPU:0" if ps_gpu > 0 else ps_job)
is_single_machine = ps_replicas == 0 and worker_replicas == 1
if no_data_parallelism:
datashard_devices = [""]
caching_devices = None
elif is_single_machine:
tf.logging.warn(
"Schedule=%s. Assuming that training is running on a single machine.",
schedule)
datashard_devices = ["gpu:%d" % d for d in _gpu_order(worker_gpu)]
if worker_gpu < 1:
datashard_devices += ["cpu:0"]
caching_devices = None
elif sync and ps_replicas > 0:
# compute on ps
datashard_devices = [
_replica_device_setter(d) for d in ps_devices(all_workers=all_workers)
]
if ps_gpu > 0 and ps_replicas > 1:
caching_devices = [
ps_job + "/task:%d/cpu:0" % d
for (d, _) in _ps_gpus(all_workers=all_workers)
]
else:
caching_devices = None
else:
# compute on worker - this is either a single-worker setup or asynchronous
# with parameter servers.
if worker_gpu > 1:
datashard_devices = [
_replica_device_setter(worker_job + "/GPU:%d" % d)
for d in _gpu_order(worker_gpu)
]
caching_devices = None
else:
datashard_devices = [_replica_device_setter(worker_job)]
caching_devices = None
tf.logging.info("datashard_devices: %s", datashard_devices)
tf.logging.info("caching_devices: %s", caching_devices)
tf.logging.info("ps_devices: %s", ps_devices(all_workers=all_workers))
return eu.Parallelism(
datashard_devices,
caching_devices=caching_devices,
daisy_chain_variables=daisy_chain_variables,
ps_devices=ps_devices(all_workers=all_workers)) | See data_parallelism_from_flags. |
def op(name,
data,
bucket_count=None,
display_name=None,
description=None,
collections=None):
"""Create a legacy histogram summary op.
Arguments:
name: A unique name for the generated summary node.
data: A `Tensor` of any shape. Must be castable to `float64`.
bucket_count: Optional positive `int`. The output will have this
many buckets, except in two edge cases. If there is no data, then
there are no buckets. If there is data but all points have the
same value, then there is one bucket whose left and right
endpoints are the same.
display_name: Optional name for this summary in TensorBoard, as a
constant `str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
constant `str`. Markdown is supported. Defaults to empty.
collections: Optional list of graph collections keys. The new
summary op is added to these collections. Defaults to
`[Graph Keys.SUMMARIES]`.
Returns:
A TensorFlow summary op.
"""
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
if display_name is None:
display_name = name
summary_metadata = metadata.create_summary_metadata(
display_name=display_name, description=description)
with tf.name_scope(name):
tensor = _buckets(data, bucket_count=bucket_count)
return tf.summary.tensor_summary(name='histogram_summary',
tensor=tensor,
collections=collections,
summary_metadata=summary_metadata) | Create a legacy histogram summary op.
Arguments:
name: A unique name for the generated summary node.
data: A `Tensor` of any shape. Must be castable to `float64`.
bucket_count: Optional positive `int`. The output will have this
many buckets, except in two edge cases. If there is no data, then
there are no buckets. If there is data but all points have the
same value, then there is one bucket whose left and right
endpoints are the same.
display_name: Optional name for this summary in TensorBoard, as a
constant `str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
constant `str`. Markdown is supported. Defaults to empty.
collections: Optional list of graph collections keys. The new
summary op is added to these collections. Defaults to
`[Graph Keys.SUMMARIES]`.
Returns:
A TensorFlow summary op. |
def _wait_for_result(self):
"""Wait for the sensor to be ready for measurement."""
basetime = 0.018 if self._low_res else 0.128
sleep(basetime * (self._mtreg / 69.0) + self._delay) | Wait for the sensor to be ready for measurement. |
def del_team(self, team, sync=True):
"""
delete team from this OS instance
:param team: the team to be deleted from this OS instance
:param sync: If sync=True(default) synchronize with Ariane server. If sync=False,
add the team object on list to be removed on next save().
:return:
"""
LOGGER.debug("OSInstance.del_team")
if not sync:
self.team_2_rm.append(team)
else:
if team.id is None:
team.sync()
if self.id is not None and team.id is not None:
params = {
'id': self.id,
'teamID': team.id
}
args = {'http_operation': 'GET', 'operation_path': 'update/teams/delete', 'parameters': params}
response = OSInstanceService.requester.call(args)
if response.rc != 0:
LOGGER.warning(
'OSInstance.del_team - Problem while updating OS instance ' + self.name +
'. Reason: ' + str(response.response_content) + '-' + str(response.error_message) +
" (" + str(response.rc) + ")"
)
else:
self.team_ids.remove(team.id)
team.osi_ids.remove(self.id)
else:
LOGGER.warning(
'OSInstance.del_team - Problem while updating OS instance ' + self.name + '. Reason: application ' +
team.name + ' id is None'
) | delete team from this OS instance
:param team: the team to be deleted from this OS instance
:param sync: If sync=True(default) synchronize with Ariane server. If sync=False,
add the team object on list to be removed on next save().
:return: |
def _build_id_tuple(params, spec):
"""
Builds a 2-element tuple used to identify fields by grabbing the class_
and tag from an Asn1Value class and the params dict being passed to it
:param params:
A dict of params to pass to spec
:param spec:
An Asn1Value class
:return:
A 2-element integer tuple in the form (class_, tag)
"""
# Handle situations where the spec is not known at setup time
if spec is None:
return (None, None)
required_class = spec.class_
required_tag = spec.tag
_tag_type_to_explicit_implicit(params)
if 'explicit' in params:
if isinstance(params['explicit'], tuple):
required_class, required_tag = params['explicit']
else:
required_class = 2
required_tag = params['explicit']
elif 'implicit' in params:
if isinstance(params['implicit'], tuple):
required_class, required_tag = params['implicit']
else:
required_class = 2
required_tag = params['implicit']
if required_class is not None and not isinstance(required_class, int_types):
required_class = CLASS_NAME_TO_NUM_MAP[required_class]
required_class = params.get('class_', required_class)
required_tag = params.get('tag', required_tag)
return (required_class, required_tag) | Builds a 2-element tuple used to identify fields by grabbing the class_
and tag from an Asn1Value class and the params dict being passed to it
:param params:
A dict of params to pass to spec
:param spec:
An Asn1Value class
:return:
A 2-element integer tuple in the form (class_, tag) |
def format_output(old_maps, new_maps):
""" This function takes the returned dict from `transform` and converts
it to the same datatype as the input.
Parameters
----------
old_maps : {FieldArray, dict}
The mapping object to add new maps to.
new_maps : dict
A dict with key as parameter name and value is numpy.array.
Returns
-------
{FieldArray, dict}
The old_maps object with new keys from new_maps.
"""
# if input is FieldArray then return FieldArray
if isinstance(old_maps, record.FieldArray):
keys = new_maps.keys()
values = [new_maps[key] for key in keys]
for key, vals in zip(keys, values):
try:
old_maps = old_maps.add_fields([vals], [key])
except ValueError:
old_maps[key] = vals
return old_maps
# if input is dict then return dict
elif isinstance(old_maps, dict):
out = old_maps.copy()
out.update(new_maps)
return out
# else error
else:
raise TypeError("Input type must be FieldArray or dict.") | This function takes the returned dict from `transform` and converts
it to the same datatype as the input.
Parameters
----------
old_maps : {FieldArray, dict}
The mapping object to add new maps to.
new_maps : dict
A dict with key as parameter name and value is numpy.array.
Returns
-------
{FieldArray, dict}
The old_maps object with new keys from new_maps. |
def subsite_upcoming_events(context):
"""
Displays a list of all upcoming events in this site.
"""
request = context['request']
home = request.site.root_page
return {'request': request,
'events': getAllUpcomingEvents(request, home=home)} | Displays a list of all upcoming events in this site. |
def get_student_item_dict(self, anonymous_user_id=None):
"""Create a student_item_dict from our surrounding context.
See also: submissions.api for details.
Args:
anonymous_user_id(str): A unique anonymous_user_id for (user, course) pair.
Returns:
(dict): The student item associated with this XBlock instance. This
includes the student id, item id, and course id.
"""
item_id = self._serialize_opaque_key(self.scope_ids.usage_id)
# This is not the real way course_ids should work, but this is a
# temporary expediency for LMS integration
if hasattr(self, "xmodule_runtime"):
course_id = self.get_course_id() # pylint:disable=E1101
if anonymous_user_id:
student_id = anonymous_user_id
else:
student_id = self.xmodule_runtime.anonymous_student_id # pylint:disable=E1101
else:
course_id = "edX/Enchantment_101/April_1"
if self.scope_ids.user_id is None:
student_id = ''
else:
student_id = unicode(self.scope_ids.user_id)
student_item_dict = dict(
student_id=student_id,
item_id=item_id,
course_id=course_id,
item_type='ubcpi'
)
return student_item_dict | Create a student_item_dict from our surrounding context.
See also: submissions.api for details.
Args:
anonymous_user_id(str): A unique anonymous_user_id for (user, course) pair.
Returns:
(dict): The student item associated with this XBlock instance. This
includes the student id, item id, and course id. |
def rotate(self, img):
""" Rotate image if exif says it needs it """
try:
exif = image2exif.get_exif(img)
except AttributeError:
# image format doesn't support exif
return img
orientation = exif.get('Orientation', 1)
landscape = img.height < img.width
if orientation == 6 and landscape:
print("ROTATING")
return img.rotate(-90)
return img | Rotate image if exif says it needs it |
def all_hosts(self):
"""List of hosts, passives, and arbiters known to this server."""
return set(imap(common.clean_node, itertools.chain(
self._doc.get('hosts', []),
self._doc.get('passives', []),
self._doc.get('arbiters', [])))) | List of hosts, passives, and arbiters known to this server. |
def _generate_union_class(self, ns, data_type):
# type: (ApiNamespace, Union) -> None
"""Defines a Python class that represents a union in Stone."""
self.emit(self._class_declaration_for_type(ns, data_type))
with self.indent():
self.emit('"""')
if data_type.doc:
self.emit_wrapped_text(
self.process_doc(data_type.doc, self._docf))
self.emit()
self.emit_wrapped_text(
'This class acts as a tagged union. Only one of the ``is_*`` '
'methods will return true. To get the associated value of a '
'tag (if one exists), use the corresponding ``get_*`` method.')
if data_type.has_documented_fields():
self.emit()
for field in data_type.fields:
if not field.doc:
continue
if is_void_type(field.data_type):
ivar_doc = ':ivar {}: {}'.format(
fmt_namespaced_var(ns.name, data_type.name, field.name),
self.process_doc(field.doc, self._docf))
elif is_user_defined_type(field.data_type):
if data_type.namespace.name != ns.name:
formatted_var = fmt_namespaced_var(ns.name, data_type.name, field.name)
else:
formatted_var = '{}.{}'.format(data_type.name, fmt_var(field.name))
ivar_doc = ':ivar {} {}: {}'.format(
fmt_class(field.data_type.name),
formatted_var,
self.process_doc(field.doc, self._docf))
else:
ivar_doc = ':ivar {} {}: {}'.format(
self._python_type_mapping(ns, field.data_type),
fmt_namespaced_var(ns.name, data_type.name, field.name), field.doc)
self.emit_wrapped_text(ivar_doc, subsequent_prefix=' ')
self.emit('"""')
self.emit()
self._generate_union_class_vars(data_type)
self._generate_union_class_variant_creators(ns, data_type)
self._generate_union_class_is_set(data_type)
self._generate_union_class_get_helpers(ns, data_type)
self._generate_union_class_custom_annotations(ns, data_type)
self._generate_union_class_repr(data_type)
self.emit('{0}_validator = bv.Union({0})'.format(
class_name_for_data_type(data_type)
))
self.emit() | Defines a Python class that represents a union in Stone. |
def aitoffImageToSphere(x, y):
"""
Inverse Hammer-Aitoff projection (deg).
"""
x = x - 360.*(x>180)
x = np.asarray(np.radians(x))
y = np.asarray(np.radians(y))
z = np.sqrt(1. - (x / 4.)**2 - (y / 2.)**2) # rad
lon = 2. * np.arctan2((2. * z**2) - 1, (z / 2.) * x)
lat = np.arcsin( y * z)
return ((180. - np.degrees(lon)) % 360.), np.degrees(lat) | Inverse Hammer-Aitoff projection (deg). |
def sortedbyAge(self):
'''
Sorting the pop. base of the age
'''
ageAll = numpy.zeros(self.length)
for i in range(self.length):
ageAll[i] = self.Ind[i].age
ageSorted = ageAll.argsort()
return ageSorted[::-1] | Sorting the pop. base of the age |
def _print_checker_doc(checker_name, info, stream=None):
"""Helper method for print_full_documentation.
Also used by doc/exts/pylint_extensions.py.
"""
if not stream:
stream = sys.stdout
doc = info.get("doc")
module = info.get("module")
msgs = info.get("msgs")
options = info.get("options")
reports = info.get("reports")
checker_title = "%s checker" % (checker_name.replace("_", " ").title())
if module:
# Provide anchor to link against
print(".. _%s:\n" % module, file=stream)
print(checker_title, file=stream)
print("~" * len(checker_title), file=stream)
print("", file=stream)
if module:
print("This checker is provided by ``%s``." % module, file=stream)
print("Verbatim name of the checker is ``%s``." % checker_name, file=stream)
print("", file=stream)
if doc:
# Provide anchor to link against
title = "{} Documentation".format(checker_title)
print(title, file=stream)
print("^" * len(title), file=stream)
print(cleandoc(doc), file=stream)
print("", file=stream)
if options:
title = "{} Options".format(checker_title)
print(title, file=stream)
print("^" * len(title), file=stream)
_rest_format_section(stream, None, options)
print("", file=stream)
if msgs:
title = "{} Messages".format(checker_title)
print(title, file=stream)
print("^" * len(title), file=stream)
for msgid, msg in sorted(
msgs.items(), key=lambda kv: (_MSG_ORDER.index(kv[0][0]), kv[1])
):
msg = build_message_definition(checker_name, msgid, msg)
print(msg.format_help(checkerref=False), file=stream)
print("", file=stream)
if reports:
title = "{} Reports".format(checker_title)
print(title, file=stream)
print("^" * len(title), file=stream)
for report in reports:
print(":%s: %s" % report[:2], file=stream)
print("", file=stream)
print("", file=stream) | Helper method for print_full_documentation.
Also used by doc/exts/pylint_extensions.py. |
def sort_by_ref(vcf_file, data):
"""Sort a VCF file by genome reference and position, adding contig information.
"""
out_file = "%s-prep.vcf.gz" % utils.splitext_plus(vcf_file)[0]
if not utils.file_uptodate(out_file, vcf_file):
with file_transaction(data, out_file) as tx_out_file:
header_file = "%s-header.txt" % utils.splitext_plus(tx_out_file)[0]
with open(header_file, "w") as out_handle:
for region in ref.file_contigs(dd.get_ref_file(data), data["config"]):
out_handle.write("##contig=<ID=%s,length=%s>\n" % (region.name, region.size))
cat_cmd = "zcat" if vcf_file.endswith("vcf.gz") else "cat"
cmd = ("{cat_cmd} {vcf_file} | grep -v ^##contig | bcftools annotate -h {header_file} | "
"vt sort -m full -o {tx_out_file} -")
with utils.chdir(os.path.dirname(tx_out_file)):
do.run(cmd.format(**locals()), "Sort VCF by reference")
return bgzip_and_index(out_file, data["config"]) | Sort a VCF file by genome reference and position, adding contig information. |
def do_POST(self):
"""Handles the HTTP POST request.
Attempts to interpret all HTTP POST requests as XML-RPC calls,
which are forwarded to the server's _dispatch method for handling.
"""
# Check that the path is legal
if not self.is_rpc_path_valid():
self.report_404()
return
try:
# Get arguments by reading body of request.
# We read this in chunks to avoid straining
# socket.read(); around the 10 or 15Mb mark, some platforms
# begin to have problems (bug #792570).
max_chunk_size = 10 * 1024 * 1024
size_remaining = int(self.headers["content-length"])
L = []
while size_remaining:
chunk_size = min(size_remaining, max_chunk_size)
L.append(self.rfile.read(chunk_size))
size_remaining -= len(L[-1])
data = ''.join(L)
# In previous versions of SimpleXMLRPCServer, _dispatch
# could be overridden in this class, instead of in
# SimpleXMLRPCDispatcher. To maintain backwards compatibility,
# check to see if a subclass implements _dispatch and dispatch
# using that method if present.
response = self.server._marshaled_dispatch(
data, getattr(self, '_dispatch', None)
)
except: # This should only happen if the module is buggy
# internal error, report as HTTP server error
self.send_response(500)
self.end_headers()
else:
# got a valid XML RPC response
self.send_response(200)
self.send_header("Content-type", "text/xml")
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
# shut down the connection
self.wfile.flush()
self.connection.shutdown(1) | Handles the HTTP POST request.
Attempts to interpret all HTTP POST requests as XML-RPC calls,
which are forwarded to the server's _dispatch method for handling. |
def act(self, cmd_name, params=None):
""" Run the specified command with its parameters."""
command = getattr(self, cmd_name)
if params:
command(params)
else:
command() | Run the specified command with its parameters. |
def discrete_rainbow(N=7, cmap=cm.Set1, usepreset=True, shuffle=False, \
plot=False):
"""
Return a discrete colormap and the set of colors.
modified from
<http://www.scipy.org/Cookbook/Matplotlib/ColormapTransformations>
cmap: colormap instance, eg. cm.jet.
N: Number of colors.
Example
>>> x = resize(arange(100), (5,100))
>>> djet = cmap_discretize(cm.jet, 5)
>>> imshow(x, cmap=djet)
See available matplotlib colormaps at:
<http://dept.astro.lsa.umich.edu/~msshin/science/code/matplotlib_cm/>
If N>20 the sampled colors might not be very distinctive.
If you want to error and try anyway, set usepreset=False
"""
import random
from scipy import interpolate
if usepreset:
if 0 < N <= 5:
cmap = cm.gist_rainbow
elif N <= 20:
cmap = cm.Set1
else:
sys.exit(discrete_rainbow.__doc__)
cdict = cmap._segmentdata.copy()
# N colors
colors_i = np.linspace(0,1.,N)
# N+1 indices
indices = np.linspace(0,1.,N+1)
rgbs = []
for key in ('red','green','blue'):
# Find the N colors
D = np.array(cdict[key])
I = interpolate.interp1d(D[:,0], D[:,1])
colors = I(colors_i)
rgbs.append(colors)
# Place these colors at the correct indices.
A = np.zeros((N+1,3), float)
A[:,0] = indices
A[1:,1] = colors
A[:-1,2] = colors
# Create a tuple for the dictionary.
L = []
for l in A:
L.append(tuple(l))
cdict[key] = tuple(L)
palette = zip(*rgbs)
if shuffle:
random.shuffle(palette)
if plot:
print_colors(palette)
# Return (colormap object, RGB tuples)
return mpl.colors.LinearSegmentedColormap('colormap',cdict,1024), palette | Return a discrete colormap and the set of colors.
modified from
<http://www.scipy.org/Cookbook/Matplotlib/ColormapTransformations>
cmap: colormap instance, eg. cm.jet.
N: Number of colors.
Example
>>> x = resize(arange(100), (5,100))
>>> djet = cmap_discretize(cm.jet, 5)
>>> imshow(x, cmap=djet)
See available matplotlib colormaps at:
<http://dept.astro.lsa.umich.edu/~msshin/science/code/matplotlib_cm/>
If N>20 the sampled colors might not be very distinctive.
If you want to error and try anyway, set usepreset=False |
def normalize_unicode(text):
"""
Normalize any unicode characters to ascii equivalent
https://docs.python.org/2/library/unicodedata.html#unicodedata.normalize
"""
if isinstance(text, six.text_type):
return unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode('utf8')
else:
return text | Normalize any unicode characters to ascii equivalent
https://docs.python.org/2/library/unicodedata.html#unicodedata.normalize |
def convert_tkinter_size_to_Wx(size):
"""
Converts size in characters to size in pixels
:param size: size in characters, rows
:return: size in pixels, pixels
"""
qtsize = size
if size[1] is not None and size[1] < DEFAULT_PIXEL_TO_CHARS_CUTOFF: # change from character based size to pixels (roughly)
qtsize = size[0]*DEFAULT_PIXELS_TO_CHARS_SCALING[0], size[1]*DEFAULT_PIXELS_TO_CHARS_SCALING[1]
return qtsize | Converts size in characters to size in pixels
:param size: size in characters, rows
:return: size in pixels, pixels |
def iterate(t_table, wordlist, stanzas, schemes, rprobs, maxsteps):
"""
Iterate EM and return final probabilities
"""
data_probs = numpy.zeros(len(stanzas))
old_data_probs = None
probs = None
num_words = len(wordlist)
ctr = 0
for ctr in range(maxsteps):
logging.info("Iteration {}".format(ctr))
old_data_probs = data_probs
logging.info("Expectation step")
probs = expectation_step(t_table, stanzas, schemes, rprobs)
logging.info("Maximization step")
t_table, rprobs = maximization_step(num_words, stanzas, schemes, probs)
# Warn if it did not converge
data_probs = numpy.logaddexp.reduce(probs, axis=1)
if ctr == maxsteps - 1 and not numpy.allclose(data_probs, old_data_probs):
logging.warning("Warning: EM did not converge")
logging.info("Stopped after {} interations".format(ctr))
return probs | Iterate EM and return final probabilities |
Subsets and Splits