code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def expand_abbreviations(template, abbreviations):
"""Expand abbreviations in a template name.
:param template: The project template name.
:param abbreviations: Abbreviation definitions.
"""
if template in abbreviations:
return abbreviations[template]
# Split on colon. If there is no colon, rest will be empty
# and prefix will be the whole template
prefix, sep, rest = template.partition(':')
if prefix in abbreviations:
return abbreviations[prefix].format(rest)
return template | Expand abbreviations in a template name.
:param template: The project template name.
:param abbreviations: Abbreviation definitions. |
def kill(self):
""" Kill the queue-consumer.
Unlike `stop()` any pending message ack or requeue-requests,
requests to remove providers, etc are lost and the consume thread is
asked to terminate as soon as possible.
"""
# greenlet has a magic attribute ``dead`` - pylint: disable=E1101
if self._gt is not None and not self._gt.dead:
# we can't just kill the thread because we have to give
# ConsumerMixin a chance to close the sockets properly.
self._providers = set()
self._pending_remove_providers = {}
self.should_stop = True
try:
self._gt.wait()
except Exception as exc:
# discard the exception since we're already being killed
_log.warn(
'QueueConsumer %s raised `%s` during kill', self, exc)
super(QueueConsumer, self).kill()
_log.debug('killed %s', self) | Kill the queue-consumer.
Unlike `stop()` any pending message ack or requeue-requests,
requests to remove providers, etc are lost and the consume thread is
asked to terminate as soon as possible. |
def __set_log_file_name(self):
"""Automatically set logFileName attribute"""
# ensure directory exists
dir, _ = os.path.split(self.__logFileBasename)
if len(dir) and not os.path.exists(dir):
os.makedirs(dir)
# create logFileName
self.__logFileName = self.__logFileBasename+"."+self.__logFileExtension
number = 0
while os.path.isfile(self.__logFileName):
if os.stat(self.__logFileName).st_size/1e6 < self.__maxlogFileSize:
break
number += 1
self.__logFileName = self.__logFileBasename+"_"+str(number)+"."+self.__logFileExtension
# create log file stram
self.__logFileStream = None | Automatically set logFileName attribute |
def create_project(self, project_name, desc):
"""
Send POST to /projects creating a new project with the specified name and desc.
Raises DataServiceError on error.
:param project_name: str name of the project
:param desc: str description of the project
:return: requests.Response containing the successful result
"""
data = {
"name": project_name,
"description": desc
}
return self._post("/projects", data) | Send POST to /projects creating a new project with the specified name and desc.
Raises DataServiceError on error.
:param project_name: str name of the project
:param desc: str description of the project
:return: requests.Response containing the successful result |
def sanitize_win_path(winpath):
'''
Remove illegal path characters for windows
'''
intab = '<>:|?*'
if isinstance(winpath, six.text_type):
winpath = winpath.translate(dict((ord(c), '_') for c in intab))
elif isinstance(winpath, six.string_types):
outtab = '_' * len(intab)
trantab = ''.maketrans(intab, outtab) if six.PY3 else string.maketrans(intab, outtab) # pylint: disable=no-member
winpath = winpath.translate(trantab)
return winpath | Remove illegal path characters for windows |
async def get_agents(self, addr=True, agent_cls=None):
"""Get addresses of all agents in all the slave environments.
This is a managing function for
:meth:`creamas.mp.MultiEnvironment.get_agents`.
.. note::
Since :class:`aiomas.rpc.Proxy` objects do not seem to handle
(re)serialization, ``addr`` and ``agent_cls`` parameters are
omitted from the call to underlying multi-environment's
:meth:`get_agents`.
If :class:`aiomas.rpc.Proxy` objects from all the agents are
needed, call each slave environment manager's :meth:`get_agents`
directly.
"""
return await self.menv.get_agents(addr=True, agent_cls=None,
as_coro=True) | Get addresses of all agents in all the slave environments.
This is a managing function for
:meth:`creamas.mp.MultiEnvironment.get_agents`.
.. note::
Since :class:`aiomas.rpc.Proxy` objects do not seem to handle
(re)serialization, ``addr`` and ``agent_cls`` parameters are
omitted from the call to underlying multi-environment's
:meth:`get_agents`.
If :class:`aiomas.rpc.Proxy` objects from all the agents are
needed, call each slave environment manager's :meth:`get_agents`
directly. |
def add_success(self, group=None, type_='', field='', description=''):
"""parse and append a success data param"""
group = group or '(200)'
group = int(group.lower()[1:-1])
self.retcode = self.retcode or group
if group != self.retcode:
raise ValueError('Two or more retcodes!')
type_ = type_ or '{String}'
p = Param(type_, field, description)
self.params['responce'][p.field] = p | parse and append a success data param |
def unpack_rsp(cls, rsp_pb):
"""Convert from PLS response to user response"""
if rsp_pb.retType != RET_OK:
return RET_ERROR, rsp_pb.retMsg, None
order_id = str(rsp_pb.s2c.orderID)
modify_order_list = [{
'trd_env': TRADE.REV_TRD_ENV_MAP[rsp_pb.s2c.header.trdEnv],
'order_id': order_id
}]
return RET_OK, "", modify_order_list | Convert from PLS response to user response |
def get_enterprise_program_enrollment_page(self, request, enterprise_customer, program_details):
"""
Render Enterprise-specific program enrollment page.
"""
# Safely make the assumption that we can use the first authoring organization.
organizations = program_details['authoring_organizations']
organization = organizations[0] if organizations else {}
platform_name = get_configuration_value('PLATFORM_NAME', settings.PLATFORM_NAME)
program_title = program_details['title']
program_type_details = program_details['type_details']
program_type = program_type_details['name']
# Make any modifications for singular/plural-dependent text.
program_courses = program_details['courses']
course_count = len(program_courses)
course_count_text = ungettext(
'{count} Course',
'{count} Courses',
course_count,
).format(count=course_count)
effort_info_text = ungettext_min_max(
'{} hour per week, per course',
'{} hours per week, per course',
_('{}-{} hours per week, per course'),
program_details.get('min_hours_effort_per_week'),
program_details.get('max_hours_effort_per_week'),
)
length_info_text = ungettext_min_max(
'{} week per course',
'{} weeks per course',
_('{}-{} weeks per course'),
program_details.get('weeks_to_complete_min'),
program_details.get('weeks_to_complete_max'),
)
# Update some enrollment-related text requirements.
if program_details['enrolled_in_program']:
purchase_action = _('Purchase all unenrolled courses')
item = _('enrollment')
else:
purchase_action = _('Pursue the program')
item = _('program enrollment')
# Add any DSC warning messages.
program_data_sharing_consent = get_data_sharing_consent(
request.user.username,
enterprise_customer.uuid,
program_uuid=program_details['uuid'],
)
if program_data_sharing_consent.exists and not program_data_sharing_consent.granted:
messages.add_consent_declined_message(request, enterprise_customer, program_title)
discount_data = program_details.get('discount_data', {})
one_click_purchase_eligibility = program_details.get('is_learner_eligible_for_one_click_purchase', False)
# The following messages shouldn't both appear at the same time, and we prefer the eligibility message.
if not one_click_purchase_eligibility:
messages.add_unenrollable_item_message(request, 'program')
elif discount_data.get('total_incl_tax_excl_discounts') is None:
messages.add_missing_price_information_message(request, program_title)
context_data = get_global_context(request, enterprise_customer)
context_data.update({
'enrolled_in_course_and_paid_text': _('enrolled'),
'enrolled_in_course_and_unpaid_text': _('already enrolled, must pay for certificate'),
'expected_learning_items_text': _("What you'll learn"),
'expected_learning_items_show_count': 2,
'corporate_endorsements_text': _('Real Career Impact'),
'corporate_endorsements_show_count': 1,
'see_more_text': _('See More'),
'see_less_text': _('See Less'),
'confirm_button_text': _('Confirm Program'),
'summary_header': _('Program Summary'),
'price_text': _('Price'),
'length_text': _('Length'),
'effort_text': _('Effort'),
'level_text': _('Level'),
'course_full_description_text': _('About This Course'),
'staff_text': _('Course Staff'),
'close_modal_button_text': _('Close'),
'program_not_eligible_for_one_click_purchase_text': _('Program not eligible for one-click purchase.'),
'program_type_description_header': _('What is an {platform_name} {program_type}?').format(
platform_name=platform_name,
program_type=program_type,
),
'platform_description_header': _('What is {platform_name}?').format(
platform_name=platform_name
),
'organization_name': organization.get('name'),
'organization_logo': organization.get('logo_image_url'),
'organization_text': _('Presented by {organization}').format(organization=organization.get('name')),
'page_title': _('Confirm your {item}').format(item=item),
'program_type_logo': program_type_details['logo_image'].get('medium', {}).get('url', ''),
'program_type': program_type,
'program_type_description': get_program_type_description(program_type),
'program_title': program_title,
'program_subtitle': program_details['subtitle'],
'program_overview': program_details['overview'],
'program_price': get_price_text(discount_data.get('total_incl_tax_excl_discounts', 0), request),
'program_discounted_price': get_price_text(discount_data.get('total_incl_tax', 0), request),
'is_discounted': discount_data.get('is_discounted', False),
'courses': program_courses,
'item_bullet_points': [
_('Credit- and Certificate-eligible'),
_('Self-paced; courses can be taken in any order'),
],
'purchase_text': _('{purchase_action} for').format(purchase_action=purchase_action),
'expected_learning_items': program_details['expected_learning_items'],
'corporate_endorsements': program_details['corporate_endorsements'],
'course_count_text': course_count_text,
'length_info_text': length_info_text,
'effort_info_text': effort_info_text,
'is_learner_eligible_for_one_click_purchase': one_click_purchase_eligibility,
})
return render(request, 'enterprise/enterprise_program_enrollment_page.html', context=context_data) | Render Enterprise-specific program enrollment page. |
def generate_html_report(self, include_turtle=False, exclude_warning=False, list_auxiliary_line=False) -> str:
"""
Shows links to all classes and properties, a nice hierarchy of the classes, and then a nice
description of all the classes with all the properties that apply to it.
Example: http://www.cidoc-crm.org/sites/default/files/Documents/cidoc_crm_version_5.0.4.html
:param include_turtle: include turtle related to this entity.
:param exclude_warning: Exclude warning messages in HTML report
:return: HTML in raw string
"""
import os
template = os.path.dirname(os.path.abspath(__file__)) + '/../ontologies/template.html'
with open(template) as f:
# Lists
content = f.read().replace('{{{title}}}', 'Ontology Entities')
content = content.replace('{{{class_list}}}', self.__html_entities_hierarchy(self.classes))
content = content.replace('{{{dataproperty_list}}}', self.__html_entities_hierarchy(self.data_properties))
content = content.replace('{{{objectproperty_list}}}', self.__html_entities_hierarchy(self.object_properties))
# Classes
content = content.replace('{{{classes}}}', self.__html_classes(include_turtle))
# Properties
properties = self.__html_properties(include_turtle)
content = content.replace('{{{dataproperties}}}', properties[0])
content = content.replace('{{{objectproperties}}}', properties[1])
# Logging
content = content.replace('{{{logging-title}}}', '' if exclude_warning else '<h2>Logging</h2>')
logs = '' if exclude_warning else self.ontology.log_stream.getvalue()
content = content.replace('{{{logging}}}', '<pre><code>{}</code></pre>'.format(logs))
# Auxiliary line
content = content.replace('{{{list_auxiliary_line}}}', self.__show_list_auxiliary_line(list_auxiliary_line))
return content | Shows links to all classes and properties, a nice hierarchy of the classes, and then a nice
description of all the classes with all the properties that apply to it.
Example: http://www.cidoc-crm.org/sites/default/files/Documents/cidoc_crm_version_5.0.4.html
:param include_turtle: include turtle related to this entity.
:param exclude_warning: Exclude warning messages in HTML report
:return: HTML in raw string |
def uninstall_hook(ctx):
""" Uninstall gitlint commit-msg hook. """
try:
lint_config = ctx.obj[0]
hooks.GitHookInstaller.uninstall_commit_msg_hook(lint_config)
# declare victory :-)
hook_path = hooks.GitHookInstaller.commit_msg_hook_path(lint_config)
click.echo(u"Successfully uninstalled gitlint commit-msg hook from {0}".format(hook_path))
ctx.exit(0)
except hooks.GitHookInstallerError as e:
click.echo(ustr(e), err=True)
ctx.exit(GIT_CONTEXT_ERROR_CODE) | Uninstall gitlint commit-msg hook. |
def confirm_login_allowed(self, user):
"""
Controls whether the given User may log in. This is a policy setting,
independent of end-user authentication. This default behavior is to
allow login by active users, and reject login by inactive users.
If the given user cannot log in, this method should raise a
``forms.ValidationError``.
If the given user may log in, this method should return None.
"""
if not user.is_active:
raise forms.ValidationError(
self.error_messages['inactive'],
code='inactive',
) | Controls whether the given User may log in. This is a policy setting,
independent of end-user authentication. This default behavior is to
allow login by active users, and reject login by inactive users.
If the given user cannot log in, this method should raise a
``forms.ValidationError``.
If the given user may log in, this method should return None. |
def global_position_int_cov_encode(self, time_boot_ms, time_utc, estimator_type, lat, lon, alt, relative_alt, vx, vy, vz, covariance):
'''
The filtered global position (e.g. fused GPS and accelerometers). The
position is in GPS-frame (right-handed, Z-up). It is
designed as scaled integer message since the
resolution of float is not sufficient. NOTE: This
message is intended for onboard networks / companion
computers and higher-bandwidth links and optimized for
accuracy and completeness. Please use the
GLOBAL_POSITION_INT message for a minimal subset.
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
time_utc : Timestamp (microseconds since UNIX epoch) in UTC. 0 for unknown. Commonly filled by the precision time source of a GPS receiver. (uint64_t)
estimator_type : Class id of the estimator this estimate originated from. (uint8_t)
lat : Latitude, expressed as degrees * 1E7 (int32_t)
lon : Longitude, expressed as degrees * 1E7 (int32_t)
alt : Altitude in meters, expressed as * 1000 (millimeters), above MSL (int32_t)
relative_alt : Altitude above ground in meters, expressed as * 1000 (millimeters) (int32_t)
vx : Ground X Speed (Latitude), expressed as m/s (float)
vy : Ground Y Speed (Longitude), expressed as m/s (float)
vz : Ground Z Speed (Altitude), expressed as m/s (float)
covariance : Covariance matrix (first six entries are the first ROW, next six entries are the second row, etc.) (float)
'''
return MAVLink_global_position_int_cov_message(time_boot_ms, time_utc, estimator_type, lat, lon, alt, relative_alt, vx, vy, vz, covariance) | The filtered global position (e.g. fused GPS and accelerometers). The
position is in GPS-frame (right-handed, Z-up). It is
designed as scaled integer message since the
resolution of float is not sufficient. NOTE: This
message is intended for onboard networks / companion
computers and higher-bandwidth links and optimized for
accuracy and completeness. Please use the
GLOBAL_POSITION_INT message for a minimal subset.
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
time_utc : Timestamp (microseconds since UNIX epoch) in UTC. 0 for unknown. Commonly filled by the precision time source of a GPS receiver. (uint64_t)
estimator_type : Class id of the estimator this estimate originated from. (uint8_t)
lat : Latitude, expressed as degrees * 1E7 (int32_t)
lon : Longitude, expressed as degrees * 1E7 (int32_t)
alt : Altitude in meters, expressed as * 1000 (millimeters), above MSL (int32_t)
relative_alt : Altitude above ground in meters, expressed as * 1000 (millimeters) (int32_t)
vx : Ground X Speed (Latitude), expressed as m/s (float)
vy : Ground Y Speed (Longitude), expressed as m/s (float)
vz : Ground Z Speed (Altitude), expressed as m/s (float)
covariance : Covariance matrix (first six entries are the first ROW, next six entries are the second row, etc.) (float) |
def load_alias_hash(self):
"""
Load (create, if not exist) the alias hash file.
"""
# w+ creates the alias hash file if it does not exist
open_mode = 'r+' if os.path.exists(GLOBAL_ALIAS_HASH_PATH) else 'w+'
with open(GLOBAL_ALIAS_HASH_PATH, open_mode) as alias_config_hash_file:
self.alias_config_hash = alias_config_hash_file.read() | Load (create, if not exist) the alias hash file. |
def cmdline_generator(param_iter, PathToBin=None, PathToCmd=None,
PathsToInputs=None, PathToOutput=None,
PathToStderr='/dev/null', PathToStdout='/dev/null',
UniqueOutputs=False, InputParam=None,
OutputParam=None):
"""Generates command lines that can be used in a cluster environment
param_iter : ParameterIterBase subclass instance
PathToBin : Absolute location primary command (i.e. Python)
PathToCmd : Absolute location of the command
PathsToInputs : Absolute location(s) of input file(s)
PathToOutput : Absolute location of output file
PathToStderr : Path to stderr
PathToStdout : Path to stdout
UniqueOutputs : Generate unique tags for output files
InputParam : Application input parameter (if not specified, assumes
stdin is to be used)
OutputParam : Application output parameter (if not specified, assumes
stdout is to be used)
"""
# Make sure we have input(s) and output
if not PathsToInputs:
raise ValueError("No input file(s) specified.")
if not PathToOutput:
raise ValueError("No output file specified.")
if not isinstance(PathsToInputs, list):
PathsToInputs = [PathsToInputs]
# PathToBin and PathToCmd can be blank
if PathToBin is None:
PathToBin = ''
if PathToCmd is None:
PathToCmd = ''
# stdout_ and stderr_ do not have to be redirected
if PathToStdout is None:
stdout_ = ''
else:
stdout_ = '> "%s"' % PathToStdout
if PathToStderr is None:
stderr_ = ''
else:
stderr_ = '2> "%s"' % PathToStderr
# Output can be redirected to stdout or specified output argument
if OutputParam is None:
output = '> "%s"' % PathToOutput
stdout_ = ''
else:
output_param = param_iter.AppParams[OutputParam]
output_param.on('"%s"' % PathToOutput)
output = str(output_param)
output_param.off()
output_count = 0
base_command = ' '.join([PathToBin, PathToCmd])
for params in param_iter:
# Support for multiple input files
for inputfile in PathsToInputs:
cmdline = [base_command]
cmdline.extend(sorted(filter(None, map(str, params.values()))))
# Input can come from stdin or specified input argument
if InputParam is None:
input = '< "%s"' % inputfile
else:
input_param = params[InputParam]
input_param.on('"%s"' % inputfile)
input = str(input_param)
input_param.off()
cmdline.append(input)
if UniqueOutputs:
cmdline.append(''.join([output, str(output_count)]))
output_count += 1
else:
cmdline.append(output)
cmdline.append(stdout_)
cmdline.append(stderr_)
yield ' '.join(cmdline) | Generates command lines that can be used in a cluster environment
param_iter : ParameterIterBase subclass instance
PathToBin : Absolute location primary command (i.e. Python)
PathToCmd : Absolute location of the command
PathsToInputs : Absolute location(s) of input file(s)
PathToOutput : Absolute location of output file
PathToStderr : Path to stderr
PathToStdout : Path to stdout
UniqueOutputs : Generate unique tags for output files
InputParam : Application input parameter (if not specified, assumes
stdin is to be used)
OutputParam : Application output parameter (if not specified, assumes
stdout is to be used) |
def _set_show_bare_metal_state(self, v, load=False):
"""
Setter method for show_bare_metal_state, mapped from YANG variable /brocade_preprovision_rpc/show_bare_metal_state (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_show_bare_metal_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_show_bare_metal_state() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=show_bare_metal_state.show_bare_metal_state, is_leaf=True, yang_name="show-bare-metal-state", rest_name="show-bare-metal-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'Retrieve bare-metal state.', u'hidden': u'rpccmd', u'actionpoint': u'get-bare-metal-state'}}, namespace='urn:brocade.com:mgmt:brocade-preprovision', defining_module='brocade-preprovision', yang_type='rpc', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """show_bare_metal_state must be of a type compatible with rpc""",
'defined-type': "rpc",
'generated-type': """YANGDynClass(base=show_bare_metal_state.show_bare_metal_state, is_leaf=True, yang_name="show-bare-metal-state", rest_name="show-bare-metal-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'Retrieve bare-metal state.', u'hidden': u'rpccmd', u'actionpoint': u'get-bare-metal-state'}}, namespace='urn:brocade.com:mgmt:brocade-preprovision', defining_module='brocade-preprovision', yang_type='rpc', is_config=True)""",
})
self.__show_bare_metal_state = t
if hasattr(self, '_set'):
self._set() | Setter method for show_bare_metal_state, mapped from YANG variable /brocade_preprovision_rpc/show_bare_metal_state (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_show_bare_metal_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_show_bare_metal_state() directly. |
def sync_focus(self, *_):
"""
Focus the focused window from the pymux arrangement.
"""
# Pop-up displayed?
if self.display_popup:
self.app.layout.focus(self.layout_manager.popup_dialog)
return
# Confirm.
if self.confirm_text:
return
# Custom prompt.
if self.prompt_command:
return # Focus prompt
# Command mode.
if self.command_mode:
return # Focus command
# No windows left, return. We will quit soon.
if not self.pymux.arrangement.windows:
return
pane = self.pymux.arrangement.get_active_pane()
self.app.layout.focus(pane.terminal) | Focus the focused window from the pymux arrangement. |
def from_dict(cls, d):
"""
Reconstructs the MultiWeightsChemenvStrategy object from a dict representation of the
MultipleAbundanceChemenvStrategy object created using the as_dict method.
:param d: dict representation of the MultiWeightsChemenvStrategy object
:return: MultiWeightsChemenvStrategy object
"""
if d["normalized_angle_distance_weight"] is not None:
nad_w = NormalizedAngleDistanceNbSetWeight.from_dict(d["normalized_angle_distance_weight"])
else:
nad_w = None
return cls(additional_condition=d["additional_condition"],
symmetry_measure_type=d["symmetry_measure_type"],
dist_ang_area_weight=DistanceAngleAreaNbSetWeight.from_dict(d["dist_ang_area_weight"])
if d["dist_ang_area_weight"] is not None else None,
self_csm_weight=SelfCSMNbSetWeight.from_dict(d["self_csm_weight"])
if d["self_csm_weight"] is not None else None,
delta_csm_weight=DeltaCSMNbSetWeight.from_dict(d["delta_csm_weight"])
if d["delta_csm_weight"] is not None else None,
cn_bias_weight=CNBiasNbSetWeight.from_dict(d["cn_bias_weight"])
if d["cn_bias_weight"] is not None else None,
angle_weight=AngleNbSetWeight.from_dict(d["angle_weight"])
if d["angle_weight"] is not None else None,
normalized_angle_distance_weight=nad_w,
ce_estimator=d["ce_estimator"]) | Reconstructs the MultiWeightsChemenvStrategy object from a dict representation of the
MultipleAbundanceChemenvStrategy object created using the as_dict method.
:param d: dict representation of the MultiWeightsChemenvStrategy object
:return: MultiWeightsChemenvStrategy object |
def get_http_info_with_retriever(self, request, retriever):
"""
Exact method for getting http_info but with form data work around.
"""
urlparts = urlparse.urlsplit(request.url)
try:
data = retriever(request)
except Exception:
data = {}
return {
'url': '{0}://{1}{2}'.format(
urlparts.scheme, urlparts.netloc, urlparts.path),
'query_string': urlparts.query,
'method': request.method,
'data': data,
'cookies': request.cookies,
'headers': request.headers,
'env': {
'REMOTE_ADDR': request.remote_addr,
}
} | Exact method for getting http_info but with form data work around. |
def normal_cdf(x, mu=0, sigma=1):
"""Cumulative Normal Distribution Function.
:param x: scalar or array of real numbers.
:type x: numpy.ndarray, float
:param mu: Mean value. Default 0.
:type mu: float, numpy.ndarray
:param sigma: Standard deviation. Default 1.
:type sigma: float
:returns: An approximation of the cdf of the normal.
:rtype: numpy.ndarray
Note:
CDF of the normal distribution is defined as
\frac12 [1 + erf(\frac{x - \mu}{\sigma \sqrt{2}})], x \in \R
Source: http://en.wikipedia.org/wiki/Normal_distribution
"""
arg = (x - mu) / (sigma * numpy.sqrt(2))
res = (1 + erf(arg)) / 2
return res | Cumulative Normal Distribution Function.
:param x: scalar or array of real numbers.
:type x: numpy.ndarray, float
:param mu: Mean value. Default 0.
:type mu: float, numpy.ndarray
:param sigma: Standard deviation. Default 1.
:type sigma: float
:returns: An approximation of the cdf of the normal.
:rtype: numpy.ndarray
Note:
CDF of the normal distribution is defined as
\frac12 [1 + erf(\frac{x - \mu}{\sigma \sqrt{2}})], x \in \R
Source: http://en.wikipedia.org/wiki/Normal_distribution |
def in_cwd():
"""
Return list of configs in current working directory.
If filename is ``.tmuxp.py``, ``.tmuxp.json``, ``.tmuxp.yaml``.
Returns
-------
list
configs in current working directory
"""
configs = []
for filename in os.listdir(os.getcwd()):
if filename.startswith('.tmuxp') and is_config_file(filename):
configs.append(filename)
return configs | Return list of configs in current working directory.
If filename is ``.tmuxp.py``, ``.tmuxp.json``, ``.tmuxp.yaml``.
Returns
-------
list
configs in current working directory |
def new(cls, user, provider, federated_id):
"""
Create a new login
:param user: AuthUser
:param provider: str - ie: facebook, twitter, ...
:param federated_id: str - an id associated to provider
:return:
"""
if cls.get_user(provider, federated_id):
raise exceptions.AuthError("Federation already")
return cls.create(user_id=user.id,
provider=provider,
federated_id=federated_id) | Create a new login
:param user: AuthUser
:param provider: str - ie: facebook, twitter, ...
:param federated_id: str - an id associated to provider
:return: |
def reset_next_ids(classes):
"""
For each class in the list, if the .next_id attribute is not None
(meaning the table has an ID generator associated with it), set
.next_id to 0. This has the effect of reseting the ID generators,
and is useful in applications that process multiple documents and
add new rows to tables in those documents. Calling this function
between documents prevents new row IDs from growing continuously
from document to document. There is no need to do this, it's
purpose is merely aesthetic, but it can be confusing to open a
document and find process ID 300 in the process table and wonder
what happened to the other 299 processes.
Example:
>>> import lsctables
>>> reset_next_ids(lsctables.TableByName.values())
"""
for cls in classes:
if cls.next_id is not None:
cls.set_next_id(type(cls.next_id)(0)) | For each class in the list, if the .next_id attribute is not None
(meaning the table has an ID generator associated with it), set
.next_id to 0. This has the effect of reseting the ID generators,
and is useful in applications that process multiple documents and
add new rows to tables in those documents. Calling this function
between documents prevents new row IDs from growing continuously
from document to document. There is no need to do this, it's
purpose is merely aesthetic, but it can be confusing to open a
document and find process ID 300 in the process table and wonder
what happened to the other 299 processes.
Example:
>>> import lsctables
>>> reset_next_ids(lsctables.TableByName.values()) |
def stop_processes(self):
"""Iterate through all of the consumer processes shutting them down."""
self.set_state(self.STATE_SHUTTING_DOWN)
LOGGER.info('Stopping consumer processes')
signal.signal(signal.SIGABRT, signal.SIG_IGN)
signal.signal(signal.SIGALRM, signal.SIG_IGN)
signal.signal(signal.SIGCHLD, signal.SIG_IGN)
signal.signal(signal.SIGPROF, signal.SIG_IGN)
signal.setitimer(signal.ITIMER_REAL, 0, 0)
# Send SIGABRT
LOGGER.info('Sending SIGABRT to active children')
for proc in multiprocessing.active_children():
if int(proc.pid) != os.getpid():
try:
os.kill(int(proc.pid), signal.SIGABRT)
except OSError:
pass
# Wait for them to finish up to MAX_SHUTDOWN_WAIT
for iteration in range(0, self.MAX_SHUTDOWN_WAIT):
processes = len(self.active_processes(False))
if not processes:
break
LOGGER.info('Waiting on %i active processes to shut down (%i/%i)',
processes, iteration, self.MAX_SHUTDOWN_WAIT)
try:
time.sleep(0.5)
except KeyboardInterrupt:
break
if len(self.active_processes(False)):
self.kill_processes()
LOGGER.debug('All consumer processes stopped')
self.set_state(self.STATE_STOPPED) | Iterate through all of the consumer processes shutting them down. |
def _make_params_pb(params, param_types):
"""Helper for :meth:`execute_update`.
:type params: dict, {str -> column value}
:param params: values for parameter replacement. Keys must match
the names used in ``dml``.
:type param_types: dict[str -> Union[dict, .types.Type]]
:param param_types:
(Optional) maps explicit types for one or more param values;
required if parameters are passed.
:rtype: Union[None, :class:`Struct`]
:returns: a struct message for the passed params, or None
:raises ValueError:
If ``param_types`` is None but ``params`` is not None.
:raises ValueError:
If ``params`` is None but ``param_types`` is not None.
"""
if params is not None:
if param_types is None:
raise ValueError("Specify 'param_types' when passing 'params'.")
return Struct(
fields={key: _make_value_pb(value) for key, value in params.items()}
)
else:
if param_types is not None:
raise ValueError("Specify 'params' when passing 'param_types'.")
return None | Helper for :meth:`execute_update`.
:type params: dict, {str -> column value}
:param params: values for parameter replacement. Keys must match
the names used in ``dml``.
:type param_types: dict[str -> Union[dict, .types.Type]]
:param param_types:
(Optional) maps explicit types for one or more param values;
required if parameters are passed.
:rtype: Union[None, :class:`Struct`]
:returns: a struct message for the passed params, or None
:raises ValueError:
If ``param_types`` is None but ``params`` is not None.
:raises ValueError:
If ``params`` is None but ``param_types`` is not None. |
def setup_packages():
'''
Custom setup for Corrfunc package.
Optional: Set compiler via 'CC=/path/to/compiler' or
'CC /path/to/compiler' or 'CC = /path/to/compiler'
All the CC options are removed from sys.argv after
being parsed.
'''
# protect the user in case they run python setup.py not from root directory
src_path = dirname(abspath(sys.argv[0]))
old_path = os.getcwd()
os.chdir(src_path)
sys.path.insert(0, src_path)
# create a list of the python extensions
python_dirs = ["theory/python_bindings",
"mocks/python_bindings"]
extensions = generate_extensions(python_dirs)
# check requirement for extensions and set the compiler if specified
# in command-line
common_dict = requirements_check()
# Some command options require headers/libs to be generated
# so that the following dirs_patters supplies them.
if install_required():
from distutils.sysconfig import get_config_var
if get_config_var('SHLIB_EXT') != '".so"' and version_info[0] == 2:
msg = "The extensions all get the `.so` automatically. "\
"However, python expects the extension to be `{0}`"\
.format(get_config_var('SHLIB_EXT'))
raise ValueError(msg)
# global variable compiler is set if passed in
# command-line
extra_string = ''
if compiler != '':
extra_string = 'CC={0}'.format(compiler)
command = "make libs {0}".format(extra_string)
run_command(command)
else:
# not installing. Check if creating source distribution
# in that case run distclean to delete auto-generated C
# files
if 'sdist' in sys.argv:
command = "make distclean"
run_command(command)
# find all the data-files required.
# Now the lib + associated header files have been generated
# and put in lib/ and include/
# This step must run after ``make install``
dirs_patterns = {'theory/tests/data': ['*.ff', '*.txt',
'*.txt.gz', '*.dat'],
'mocks/tests/data': ['*.ff', '*.txt',
'*.txt.gz', '*.dat'],
'theory/tests': ['Mr19*', 'bins*', 'cmass*'],
'mocks/tests': ['Mr19*', 'bins*', 'angular_bins*'],
'include': ['count*.h'],
'lib': ['libcount*.a']
}
data_files = []
for d in dirs_patterns:
patterns = dirs_patterns[d]
f = recursive_glob(d, patterns)
data_files.extend(f)
# change them to be relative to package dir rather than root
data_files = ["../{0}".format(d) for d in data_files]
long_description = read_text_file('README.rst')
min_np_major = int(common_dict['MIN_NUMPY_MAJOR'][0])
min_np_minor = int(common_dict['MIN_NUMPY_MINOR'][0])
# All book-keeping is done.
# base_url = "https://github.com/manodeep/Corrfunc"
classifiers = ['Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: POSIX',
'Programming Language :: C',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6']
metadata = dict(
name=projectname,
version=version,
author='Manodeep Sinha',
author_email='[email protected]',
maintainer='Manodeep Sinha',
maintainer_email='[email protected]',
url=base_url,
download_url='{0}/archive/{1}-{2}.tar.gz'.format(
base_url, projectname, version),
description='Blazing fast correlation functions on the CPU',
long_description=long_description,
classifiers=classifiers,
license='MIT',
# Solaris might work, Windows will almost certainly not work
platforms=["Linux", "Mac OSX", "Unix"],
keywords=['correlation functions', 'simulations',
'surveys', 'galaxies'],
provides=[projectname],
packages=find_packages(),
ext_package=projectname,
ext_modules=extensions,
package_data={'': data_files},
include_package_data=True,
setup_requires=['setuptools',
'numpy>={0}.{1}'.format(min_np_major,
min_np_minor)],
install_requires=['numpy>={0}.{1}'.format(min_np_major,
min_np_minor),
'future',
'wurlitzer'],
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4',
zip_safe=False,
cmdclass={'build_ext': BuildExtSubclass})
# Now the actual setup
try:
setup(**metadata)
finally:
del sys.path[0]
os.chdir(old_path)
return | Custom setup for Corrfunc package.
Optional: Set compiler via 'CC=/path/to/compiler' or
'CC /path/to/compiler' or 'CC = /path/to/compiler'
All the CC options are removed from sys.argv after
being parsed. |
def remove(self, point, **kwargs):
"""!
@brief Remove specified point from kd-tree.
@details It removes the first found node that satisfy to the input parameters. Make sure that
pair (point, payload) is unique for each node, othewise the first found is removed.
@param[in] point (list): Coordinates of the point of removed node.
@param[in] **kwargs: Arbitrary keyword arguments (available arguments: 'payload').
<b>Keyword Args:</b><br>
- payload (any): Payload of the node that should be removed.
@return (node) Root if node has been successfully removed, otherwise None.
"""
# Get required node
node_for_remove = None
if 'payload' in kwargs:
node_for_remove = self.find_node_with_payload(point, kwargs['payload'], None)
else:
node_for_remove = self.find_node(point, None)
if node_for_remove is None:
return None
parent = node_for_remove.parent
minimal_node = self.__recursive_remove(node_for_remove)
if parent is None:
self.__root = minimal_node
# If all k-d tree was destroyed
if minimal_node is not None:
minimal_node.parent = None
else:
if parent.left is node_for_remove:
parent.left = minimal_node
elif parent.right is node_for_remove:
parent.right = minimal_node
return self.__root | !
@brief Remove specified point from kd-tree.
@details It removes the first found node that satisfy to the input parameters. Make sure that
pair (point, payload) is unique for each node, othewise the first found is removed.
@param[in] point (list): Coordinates of the point of removed node.
@param[in] **kwargs: Arbitrary keyword arguments (available arguments: 'payload').
<b>Keyword Args:</b><br>
- payload (any): Payload of the node that should be removed.
@return (node) Root if node has been successfully removed, otherwise None. |
def _Bern_to_Fierz_III_IV_V(C, qqqq):
"""From Bern to 4-quark Fierz basis for Classes III, IV and V.
`qqqq` should be of the form 'sbuc', 'sdcc', 'ucuu' etc."""
# 2nd != 4th, color-octet redundant
if qqqq in ['sbss', 'dbdd', 'dbds', 'sbsd', 'bsbd', 'dsdd']:
return {
'F' + qqqq + '1': C['1' + qqqq] + 16 * C['3' + qqqq],
'F' + qqqq + '1p': C['1p' + qqqq] + 16 * C['3p' + qqqq],
'F' + qqqq + '3': C['1' + qqqq] + 4 * C['3' + qqqq],
'F' + qqqq + '3p': C['1p' + qqqq] + 4 * C['3p' + qqqq],
'F' + qqqq + '5': C['5p' + qqqq] + 64 * C['9p' + qqqq],
'F' + qqqq + '5p': C['5' + qqqq] + 64 * C['9' + qqqq],
'F' + qqqq + '7': C['5p' + qqqq] + 16 * C['9p' + qqqq],
'F' + qqqq + '7p': C['5' + qqqq] + 16 * C['9' + qqqq],
'F' + qqqq + '9': C['7p' + qqqq] - 16 * C['9p' + qqqq],
'F' + qqqq + '9p': C['7' + qqqq] - 16 * C['9' + qqqq],
}
if qqqq in ['dbbb', 'sbbb', 'dsss']: # 2nd = 4th, color-octet redundant
return {
'F' + qqqq + '1': C['1' + qqqq] + 16 * C['3' + qqqq],
'F' + qqqq + '1p': C['1p' + qqqq] + 16 * C['3p' + qqqq],
'F' + qqqq + '3': C['1' + qqqq] + 4 * C['3' + qqqq],
'F' + qqqq + '3p': C['1p' + qqqq] + 4 * C['3p' + qqqq],
'F' + qqqq + '5': C['5' + qqqq] + 64 * C['9' + qqqq],
'F' + qqqq + '5p': C['5p' + qqqq] + 64 * C['9p' + qqqq],
'F' + qqqq + '7': C['5' + qqqq] + 16 * C['9' + qqqq],
'F' + qqqq + '7p': C['5p' + qqqq] + 16 * C['9p' + qqqq],
'F' + qqqq + '9': C['7' + qqqq] - 16 * C['9' + qqqq],
'F' + qqqq + '9p': C['7p' + qqqq] - 16 * C['9p' + qqqq],
}
# generic case
if qqqq in ['sbuu', 'sbdd', 'sbuu', 'sbuc', 'sbcu', 'sbcc',
'dbuu', 'dbss', 'dbuu', 'dbuc', 'dbcu', 'dbcc',
'dsuu', 'dsbb', 'dsuu', 'dsuc', 'dscu', 'dscc',]:
return {
'F' + qqqq + '1': C['1' + qqqq] - C['2' + qqqq] / 6 + 16 * C['3' + qqqq] - (8 * C['4' + qqqq]) / 3,
'F' + qqqq + '10': -8 * C['10' + qqqq] + C['8' + qqqq] / 2,
'F' + qqqq + '10p': -8 * C['10p' + qqqq] + C['8p' + qqqq] / 2,
'F' + qqqq + '1p': C['1p' + qqqq] - C['2p' + qqqq] / 6 + 16 * C['3p' + qqqq] - (8 * C['4p' + qqqq]) / 3,
'F' + qqqq + '2': C['2' + qqqq] / 2 + 8 * C['4' + qqqq],
'F' + qqqq + '2p': C['2p' + qqqq] / 2 + 8 * C['4p' + qqqq],
'F' + qqqq + '3': C['1' + qqqq] - C['2' + qqqq] / 6 + 4 * C['3' + qqqq] - (2 * C['4' + qqqq]) / 3,
'F' + qqqq + '3p': C['1p' + qqqq] - C['2p' + qqqq] / 6 + 4 * C['3p' + qqqq] - (2 * C['4p' + qqqq]) / 3,
'F' + qqqq + '4': C['2' + qqqq] / 2 + 2 * C['4' + qqqq],
'F' + qqqq + '4p': C['2p' + qqqq] / 2 + 2 * C['4p' + qqqq],
'F' + qqqq + '5': -((32 * C['10' + qqqq]) / 3) + C['5' + qqqq] - C['6' + qqqq] / 6 + 64 * C['9' + qqqq],
'F' + qqqq + '5p': -((32 * C['10p' + qqqq]) / 3) + C['5p' + qqqq] - C['6p' + qqqq] / 6 + 64 * C['9p' + qqqq],
'F' + qqqq + '6': 32 * C['10' + qqqq] + C['6' + qqqq] / 2,
'F' + qqqq + '6p': 32 * C['10p' + qqqq] + C['6p' + qqqq] / 2,
'F' + qqqq + '7': -((8 * C['10' + qqqq]) / 3) + C['5' + qqqq] - C['6' + qqqq] / 6 + 16 * C['9' + qqqq],
'F' + qqqq + '7p': -((8 * C['10p' + qqqq]) / 3) + C['5p' + qqqq] - C['6p' + qqqq] / 6 + 16 * C['9p' + qqqq],
'F' + qqqq + '8': 8 * C['10' + qqqq] + C['6' + qqqq] / 2,
'F' + qqqq + '8p': 8 * C['10p' + qqqq] + C['6p' + qqqq] / 2,
'F' + qqqq + '9': (8 * C['10' + qqqq]) / 3 + C['7' + qqqq] - C['8' + qqqq] / 6 - 16 * C['9' + qqqq],
'F' + qqqq + '9p': (8 * C['10p' + qqqq]) / 3 + C['7p' + qqqq] - C['8p' + qqqq] / 6 - 16 * C['9p' + qqqq],
}
raise ValueError("Case not implemented: {}".format(qqqq)) | From Bern to 4-quark Fierz basis for Classes III, IV and V.
`qqqq` should be of the form 'sbuc', 'sdcc', 'ucuu' etc. |
def clean_all(ctx, dry_run=False):
"""Clean up everything, even the precious stuff.
NOTE: clean task is executed first.
"""
cleanup_dirs(ctx.clean_all.directories or [], dry_run=dry_run)
cleanup_dirs(ctx.clean_all.extra_directories or [], dry_run=dry_run)
cleanup_files(ctx.clean_all.files or [], dry_run=dry_run)
cleanup_files(ctx.clean_all.extra_files or [], dry_run=dry_run)
execute_cleanup_tasks(ctx, cleanup_all_tasks, dry_run=dry_run)
clean(ctx, dry_run=dry_run) | Clean up everything, even the precious stuff.
NOTE: clean task is executed first. |
def __lookup_builtin(name):
"""Lookup the parameter name and default parameter values for
builtin functions.
"""
global __builtin_functions
if __builtin_functions is None:
builtins = dict()
for proto in __builtins:
pos = proto.find('(')
name, params, defaults = proto[:pos], list(), dict()
for param in proto[pos + 1:-1].split(','):
pos = param.find('=')
if not pos < 0:
param, value = param[:pos], param[pos + 1:]
try:
defaults[param] = __builtin_defaults[value]
except KeyError:
raise ValueError(
'builtin function %s: parameter %s: '
'unknown default %r' % (name, param, value))
params.append(param)
builtins[name] = (params, defaults)
__builtin_functions = builtins
try:
params, defaults = __builtin_functions[name]
except KeyError:
params, defaults = tuple(), dict()
__builtin_functions[name] = (params, defaults)
print(
"Warning: builtin function %r is missing prototype" % name,
file=sys.stderr)
return len(params), params, defaults | Lookup the parameter name and default parameter values for
builtin functions. |
def post_build_time_coverage(self):
"""Collect all of the time coverage for the bundle."""
from ambry.util.datestimes import expand_to_years
years = set()
# From the bundle about
if self.metadata.about.time:
for year in expand_to_years(self.metadata.about.time):
years.add(year)
# From the bundle name
if self.identity.btime:
for year in expand_to_years(self.identity.btime):
years.add(year)
# From all of the partitions
for p in self.partitions:
years |= set(p.time_coverage) | Collect all of the time coverage for the bundle. |
def xerrorbar(self, canvas, X, Y, error, color=None, label=None, **kwargs):
"""
Make an errorbar along the xaxis for points at (X,Y) on the canvas.
if error is two dimensional, the lower error is error[:,0] and
the upper error is error[:,1]
the kwargs are plotting library specific kwargs!
"""
raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library") | Make an errorbar along the xaxis for points at (X,Y) on the canvas.
if error is two dimensional, the lower error is error[:,0] and
the upper error is error[:,1]
the kwargs are plotting library specific kwargs! |
def sources(verbose=False):
'''
Return a list of available sources
verbose : boolean (False)
toggle verbose output
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' imgadm.sources
'''
ret = {}
cmd = 'imgadm sources -j'
res = __salt__['cmd.run_all'](cmd)
retcode = res['retcode']
if retcode != 0:
ret['Error'] = _exit_status(retcode)
return ret
for src in salt.utils.json.loads(res['stdout']):
ret[src['url']] = src
del src['url']
if not verbose:
ret = list(ret)
return ret | Return a list of available sources
verbose : boolean (False)
toggle verbose output
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' imgadm.sources |
def apply (self, img):
yup,uup,vup = self.getUpLimit()
ydwn,udwn,vdwn = self.getDownLimit()
''' We convert RGB as BGR because OpenCV
with RGB pass to YVU instead of YUV'''
yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV)
minValues = np.array([ydwn,udwn,vdwn],dtype=np.uint8)
maxValues = np.array([yup,uup,vup], dtype=np.uint8)
mask = cv2.inRange(yuv, minValues, maxValues)
res = cv2.bitwise_and(img,img, mask= mask)
return res | We convert RGB as BGR because OpenCV
with RGB pass to YVU instead of YUV |
def get_string(strings: Sequence[str],
prefix: str,
ignoreleadingcolon: bool = False,
precedingline: str = "") -> Optional[str]:
"""
Find a string as per :func:`get_what_follows`.
Args:
strings: see :func:`get_what_follows`
prefix: see :func:`get_what_follows`
ignoreleadingcolon: if ``True``, restrict the result to what comes
after its first colon (and whitespace-strip that)
precedingline: see :func:`get_what_follows`
Returns:
the line fragment
"""
s = get_what_follows(strings, prefix, precedingline=precedingline)
if ignoreleadingcolon:
f = s.find(":")
if f != -1:
s = s[f+1:].strip()
if len(s) == 0:
return None
return s | Find a string as per :func:`get_what_follows`.
Args:
strings: see :func:`get_what_follows`
prefix: see :func:`get_what_follows`
ignoreleadingcolon: if ``True``, restrict the result to what comes
after its first colon (and whitespace-strip that)
precedingline: see :func:`get_what_follows`
Returns:
the line fragment |
def change_weibo_header(uri, headers, body):
"""Since weibo is a rubbish server, it does not follow the standard,
we need to change the authorization header for it."""
auth = headers.get('Authorization')
if auth:
auth = auth.replace('Bearer', 'OAuth2')
headers['Authorization'] = auth
return uri, headers, body | Since weibo is a rubbish server, it does not follow the standard,
we need to change the authorization header for it. |
def _setEndpoint(self, location):
'''
Set the endpoint after when Salesforce returns the URL after successful login()
'''
# suds 0.3.7+ supports multiple wsdl services, but breaks setlocation :(
# see https://fedorahosted.org/suds/ticket/261
try:
self._sforce.set_options(location = location)
except:
self._sforce.wsdl.service.setlocation(location)
self._location = location | Set the endpoint after when Salesforce returns the URL after successful login() |
def get_market_summary(self, market):
"""
Used to get the last 24 hour summary of all active
exchanges in specific coin
Endpoint:
1.1 /public/getmarketsummary
2.0 /pub/Market/GetMarketSummary
:param market: String literal for the market(ex: BTC-XRP)
:type market: str
:return: Summaries of active exchanges of a coin in JSON
:rtype : dict
"""
return self._api_query(path_dict={
API_V1_1: '/public/getmarketsummary',
API_V2_0: '/pub/Market/GetMarketSummary'
}, options={'market': market, 'marketname': market}, protection=PROTECTION_PUB) | Used to get the last 24 hour summary of all active
exchanges in specific coin
Endpoint:
1.1 /public/getmarketsummary
2.0 /pub/Market/GetMarketSummary
:param market: String literal for the market(ex: BTC-XRP)
:type market: str
:return: Summaries of active exchanges of a coin in JSON
:rtype : dict |
def RENEWING(self):
"""RENEWING state."""
logger.debug('In state: RENEWING')
self.current_state = STATE_RENEWING
if self.script is not None:
self.script.script_init(self.client.lease, self.current_state)
self.script.script_go()
else:
set_net(self.client.lease) | RENEWING state. |
def contains(self, key_or_keypath):
""" Allows the 'in' operator to work for checking if a particular key (or keypath)
is inside the dictionary. """
if isinstance(key_or_keypath, list):
if len(key_or_keypath) == 0:
# empty list is root
return False
val = self
next_key = None
for next_key in key_or_keypath:
if next_key in val:
val = val[next_key]
else:
return False
return True
else:
return key_or_keypath in self.__dict__['p'] | Allows the 'in' operator to work for checking if a particular key (or keypath)
is inside the dictionary. |
def save(self, filename, binary=True):
"""
Writes a ``MultiBlock`` dataset to disk.
Written file may be an ASCII or binary vtm file.
Parameters
----------
filename : str
Filename of mesh to be written. File type is inferred from
the extension of the filename unless overridden with
ftype. Can be one of the following types (.vtm or .vtmb)
binary : bool, optional
Writes the file as binary when True and ASCII when False.
Notes
-----
Binary files write much faster than ASCII and have a smaller
file size.
"""
filename = os.path.abspath(os.path.expanduser(filename))
ext = vtki.get_ext(filename)
if ext in ['.vtm', '.vtmb']:
writer = vtk.vtkXMLMultiBlockDataWriter()
else:
raise Exception('File extension must be either "vtm" or "vtmb"')
writer.SetFileName(filename)
writer.SetInputDataObject(self)
if binary:
writer.SetDataModeToBinary()
else:
writer.SetDataModeToAscii()
writer.Write()
return | Writes a ``MultiBlock`` dataset to disk.
Written file may be an ASCII or binary vtm file.
Parameters
----------
filename : str
Filename of mesh to be written. File type is inferred from
the extension of the filename unless overridden with
ftype. Can be one of the following types (.vtm or .vtmb)
binary : bool, optional
Writes the file as binary when True and ASCII when False.
Notes
-----
Binary files write much faster than ASCII and have a smaller
file size. |
def create_keep_package(cls, package_name, recursive=True):
"""Convenience constructor for a package keep rule.
Essentially equivalent to just using ``shading_keep('package_name.**')``.
:param string package_name: Package name to keep (eg, ``org.pantsbuild.example``).
:param bool recursive: Whether to keep everything under any subpackage of ``package_name``,
or just direct children of the package. (Defaults to True).
"""
return cls.create_keep(cls._format_package_glob(package_name, recursive)) | Convenience constructor for a package keep rule.
Essentially equivalent to just using ``shading_keep('package_name.**')``.
:param string package_name: Package name to keep (eg, ``org.pantsbuild.example``).
:param bool recursive: Whether to keep everything under any subpackage of ``package_name``,
or just direct children of the package. (Defaults to True). |
def _certify_int_param(value, negative=True, required=False):
"""
A private certifier (to `certifiable`) to certify integers from `certify_int`.
:param int value:
The value to certify is an integer.
:param bool negative:
If the value can be negative. Default=False.
:param bool required:
If the value is required. Default=False.
:raises CertifierParamError:
Value was not an integer (if required and non-None).
"""
if value is None and not required:
return
if not isinstance(value, int):
raise CertifierTypeError(
message="expected integer, but value is of type {cls!r}".format(
cls=value.__class__.__name__),
value=value,
required=required,
)
if not negative and value < 0:
raise CertifierValueError(
message="expected positive integer, but value is negative") | A private certifier (to `certifiable`) to certify integers from `certify_int`.
:param int value:
The value to certify is an integer.
:param bool negative:
If the value can be negative. Default=False.
:param bool required:
If the value is required. Default=False.
:raises CertifierParamError:
Value was not an integer (if required and non-None). |
def to_python(value, seen=None):
"""Reify values to their Python equivalents.
Does recursion detection, failing when that happens.
"""
seen = seen or set()
if isinstance(value, framework.TupleLike):
if value.ident in seen:
raise RecursionException('to_python: infinite recursion while evaluating %r' % value)
new_seen = seen.union([value.ident])
return {k: to_python(value[k], seen=new_seen) for k in value.exportable_keys()}
if isinstance(value, dict):
return {k: to_python(value[k], seen=seen) for k in value.keys()}
if isinstance(value, list):
return [to_python(x, seen=seen) for x in value]
return value | Reify values to their Python equivalents.
Does recursion detection, failing when that happens. |
def update_metadata(self, scaling_group, metadata):
"""
Adds the given metadata dict to the existing metadata for the scaling
group.
"""
if not isinstance(scaling_group, ScalingGroup):
scaling_group = self.get(scaling_group)
curr_meta = scaling_group.metadata
curr_meta.update(metadata)
return self.update(scaling_group, metadata=curr_meta) | Adds the given metadata dict to the existing metadata for the scaling
group. |
def rotate(l, steps=1):
r"""Rotates a list `l` `steps` to the left. Accepts
`steps` > `len(l)` or < 0.
>>> rotate([1,2,3])
[2, 3, 1]
>>> rotate([1,2,3,4],-2)
[3, 4, 1, 2]
>>> rotate([1,2,3,4],-5)
[4, 1, 2, 3]
>>> rotate([1,2,3,4],1)
[2, 3, 4, 1]
>>> l = [1,2,3]; rotate(l) is not l
True
"""
if len(l):
steps %= len(l)
if steps:
res = l[steps:]
res.extend(l[:steps])
return res | r"""Rotates a list `l` `steps` to the left. Accepts
`steps` > `len(l)` or < 0.
>>> rotate([1,2,3])
[2, 3, 1]
>>> rotate([1,2,3,4],-2)
[3, 4, 1, 2]
>>> rotate([1,2,3,4],-5)
[4, 1, 2, 3]
>>> rotate([1,2,3,4],1)
[2, 3, 4, 1]
>>> l = [1,2,3]; rotate(l) is not l
True |
def get_columns(self, df, usage, columns=None):
"""
Returns a `data_frame.columns`.
:param df: dataframe to select columns from
:param usage: should be a value from [ALL, INCLUDE, EXCLUDE].
this value only makes sense if attr `columns` is also set.
otherwise, should be used with default value ALL.
:param columns: * if `usage` is all, this value is not used.
* if `usage` is INCLUDE, the `df` is restricted to the intersection
between `columns` and the `df.columns`
* if usage is EXCLUDE, returns the `df.columns` excluding these `columns`
:return: `data_frame` columns, excluding `target_column` and `id_column` if given.
`data_frame` columns, including/excluding the `columns` depending on `usage`.
"""
columns_excluded = pd.Index([])
columns_included = df.columns
if usage == self.INCLUDE:
try:
columns_included = columns_included.intersection(pd.Index(columns))
except TypeError:
pass
elif usage == self.EXCLUDE:
try:
columns_excluded = columns_excluded.union(pd.Index(columns))
except TypeError:
pass
columns_included = columns_included.difference(columns_excluded)
return columns_included.intersection(df.columns) | Returns a `data_frame.columns`.
:param df: dataframe to select columns from
:param usage: should be a value from [ALL, INCLUDE, EXCLUDE].
this value only makes sense if attr `columns` is also set.
otherwise, should be used with default value ALL.
:param columns: * if `usage` is all, this value is not used.
* if `usage` is INCLUDE, the `df` is restricted to the intersection
between `columns` and the `df.columns`
* if usage is EXCLUDE, returns the `df.columns` excluding these `columns`
:return: `data_frame` columns, excluding `target_column` and `id_column` if given.
`data_frame` columns, including/excluding the `columns` depending on `usage`. |
def remap_name(name_generator, names, table=None):
"""
Produces a series of variable assignments in the form of::
<obfuscated name> = <some identifier>
for each item in *names* using *name_generator* to come up with the
replacement names.
If *table* is provided, replacements will be looked up there before
generating a new unique name.
"""
out = ""
for name in names:
if table and name in table[0].keys():
replacement = table[0][name]
else:
replacement = next(name_generator)
out += "%s=%s\n" % (replacement, name)
return out | Produces a series of variable assignments in the form of::
<obfuscated name> = <some identifier>
for each item in *names* using *name_generator* to come up with the
replacement names.
If *table* is provided, replacements will be looked up there before
generating a new unique name. |
def explain_weights_lightgbm(lgb,
vec=None,
top=20,
target_names=None, # ignored
targets=None, # ignored
feature_names=None,
feature_re=None,
feature_filter=None,
importance_type='gain',
):
"""
Return an explanation of an LightGBM estimator (via scikit-learn wrapper
LGBMClassifier or LGBMRegressor) as feature importances.
See :func:`eli5.explain_weights` for description of
``top``, ``feature_names``,
``feature_re`` and ``feature_filter`` parameters.
``target_names`` and ``targets`` parameters are ignored.
Parameters
----------
importance_type : str, optional
A way to get feature importance. Possible values are:
- 'gain' - the average gain of the feature when it is used in trees
(default)
- 'split' - the number of times a feature is used to split the data
across all trees
- 'weight' - the same as 'split', for compatibility with xgboost
"""
coef = _get_lgb_feature_importances(lgb, importance_type)
lgb_feature_names = lgb.booster_.feature_name()
return get_feature_importance_explanation(lgb, vec, coef,
feature_names=feature_names,
estimator_feature_names=lgb_feature_names,
feature_filter=feature_filter,
feature_re=feature_re,
top=top,
description=DESCRIPTION_LIGHTGBM,
num_features=coef.shape[-1],
is_regression=isinstance(lgb, lightgbm.LGBMRegressor),
) | Return an explanation of an LightGBM estimator (via scikit-learn wrapper
LGBMClassifier or LGBMRegressor) as feature importances.
See :func:`eli5.explain_weights` for description of
``top``, ``feature_names``,
``feature_re`` and ``feature_filter`` parameters.
``target_names`` and ``targets`` parameters are ignored.
Parameters
----------
importance_type : str, optional
A way to get feature importance. Possible values are:
- 'gain' - the average gain of the feature when it is used in trees
(default)
- 'split' - the number of times a feature is used to split the data
across all trees
- 'weight' - the same as 'split', for compatibility with xgboost |
def add_send_last_message(self, connection, send_last_message):
"""Adds a send_last_message function to the Dispatcher's
dictionary of functions indexed by connection.
Args:
connection (str): A locally unique identifier
provided by the receiver of messages.
send_last_message (fn): The method that should be called
by the dispatcher to respond to messages which
arrive via connection, when the connection should be closed
after the message has been sent.
"""
self._send_last_message[connection] = send_last_message
LOGGER.debug("Added send_last_message function "
"for connection %s", connection) | Adds a send_last_message function to the Dispatcher's
dictionary of functions indexed by connection.
Args:
connection (str): A locally unique identifier
provided by the receiver of messages.
send_last_message (fn): The method that should be called
by the dispatcher to respond to messages which
arrive via connection, when the connection should be closed
after the message has been sent. |
def degree_elevation(degree, ctrlpts, **kwargs):
""" Computes the control points of the rational/non-rational spline after degree elevation.
Implementation of Eq. 5.36 of The NURBS Book by Piegl & Tiller, 2nd Edition, p.205
Keyword Arguments:
* ``num``: number of degree elevations
Please note that degree elevation algorithm can only operate on Bezier shapes, i.e. curves, surfaces, volumes.
:param degree: degree
:type degree: int
:param ctrlpts: control points
:type ctrlpts: list, tuple
:return: control points of the degree-elevated shape
:rtype: list
"""
# Get keyword arguments
num = kwargs.get('num', 1) # number of degree elevations
check_op = kwargs.get('check_num', True) # enable/disable input validation checks
if check_op:
if degree + 1 != len(ctrlpts):
raise GeomdlException("Degree elevation can only work with Bezier-type geometries")
if num <= 0:
raise GeomdlException("Cannot degree elevate " + str(num) + " times")
# Initialize variables
num_pts_elev = degree + 1 + num
pts_elev = [[0.0 for _ in range(len(ctrlpts[0]))] for _ in range(num_pts_elev)]
# Compute control points of degree-elevated 1-dimensional shape
for i in range(0, num_pts_elev):
start = max(0, (i - num))
end = min(degree, i)
for j in range(start, end + 1):
coeff = linalg.binomial_coefficient(degree, j) * linalg.binomial_coefficient(num, (i - j))
coeff /= linalg.binomial_coefficient((degree + num), i)
pts_elev[i] = [p1 + (coeff * p2) for p1, p2 in zip(pts_elev[i], ctrlpts[j])]
# Return computed control points after degree elevation
return pts_elev | Computes the control points of the rational/non-rational spline after degree elevation.
Implementation of Eq. 5.36 of The NURBS Book by Piegl & Tiller, 2nd Edition, p.205
Keyword Arguments:
* ``num``: number of degree elevations
Please note that degree elevation algorithm can only operate on Bezier shapes, i.e. curves, surfaces, volumes.
:param degree: degree
:type degree: int
:param ctrlpts: control points
:type ctrlpts: list, tuple
:return: control points of the degree-elevated shape
:rtype: list |
def convert(filename,
num_questions=None,
solution=False,
pages_per_q=DEFAULT_PAGES_PER_Q,
folder='question_pdfs',
output='gradescope.pdf',
zoom=1):
"""
Public method that exports nb to PDF and pads all the questions.
If num_questions is specified, will also check the final PDF for missing
questions.
If the output font size is too small/large, increase or decrease the zoom
argument until the size looks correct.
If solution=True, we'll export solution cells instead of student cells. Use
this option to generate the solutions to upload to Gradescope.
"""
check_for_wkhtmltohtml()
save_notebook(filename)
nb = read_nb(filename, solution=solution)
pdf_names = create_question_pdfs(nb,
pages_per_q=pages_per_q,
folder=folder,
zoom=zoom)
merge_pdfs(pdf_names, output)
# The first pdf generated is the email PDF
n_questions_found = len(pdf_names) - 1
if num_questions is not None and n_questions_found != num_questions:
logging.warning(
'We expected there to be {} questions but there are only {} in '
'your final PDF. Gradescope will most likely not accept your '
'submission. Double check that you wrote your answers in the '
'cells that we provided.'
.format(num_questions, len(pdf_names))
)
try:
from IPython.display import display, HTML
display(HTML(DOWNLOAD_HTML.format(output)))
except ImportError:
print('Done! The resulting PDF is located in this directory and is '
'called {}. Upload that PDF to Gradescope for grading.'
.format(output))
print()
print('If the font size of your PDF is too small/large, change the value '
'of the zoom argument when calling convert. For example, setting '
'zoom=2 makes everything twice as big.') | Public method that exports nb to PDF and pads all the questions.
If num_questions is specified, will also check the final PDF for missing
questions.
If the output font size is too small/large, increase or decrease the zoom
argument until the size looks correct.
If solution=True, we'll export solution cells instead of student cells. Use
this option to generate the solutions to upload to Gradescope. |
def add_broker(self, broker):
"""Add broker to current broker-list."""
if broker not in self._brokers:
self._brokers.add(broker)
else:
self.log.warning(
'Broker {broker_id} already present in '
'replication-group {rg_id}'.format(
broker_id=broker.id,
rg_id=self._id,
)
) | Add broker to current broker-list. |
def _update_names(self):
"""Update the derived names"""
d = dict(
table=self.table_name,
time=self.time,
space=self.space,
grain=self.grain,
variant=self.variant,
segment=self.segment
)
assert self.dataset
name = PartialPartitionName(**d).promote(self.dataset.identity.name)
self.name = str(name.name)
self.vname = str(name.vname)
self.cache_key = name.cache_key
self.fqname = str(self.identity.fqname) | Update the derived names |
def store_user_documents(user_document_gen, client, mongo_database_name, mongo_collection_name):
"""
Stores Twitter list objects that a Twitter user is a member of in different mongo collections.
Inputs: - user_document_gen: A python generator that yields a Twitter user id and an associated document list.
- client: A pymongo MongoClient object.
- mongo_database_name: The name of a Mongo database as a string.
- mongo_collection_name: The name of the mongo collection as a string.
"""
mongo_database = client[mongo_database_name]
mongo_collection = mongo_database[mongo_collection_name]
# Iterate over all users to be annotated and store the Twitter lists in mongo.
for user_twitter_id, user_document_list in user_document_gen:
document = user_document_list
document["_id"] = int(user_twitter_id)
mongo_collection.update({"_id": user_twitter_id}, document, upsert=True) | Stores Twitter list objects that a Twitter user is a member of in different mongo collections.
Inputs: - user_document_gen: A python generator that yields a Twitter user id and an associated document list.
- client: A pymongo MongoClient object.
- mongo_database_name: The name of a Mongo database as a string.
- mongo_collection_name: The name of the mongo collection as a string. |
def init_tasks():
"""
Performs basic setup before any of the tasks are run. All tasks needs to
run this before continuing. It only fires once.
"""
# Make sure exist are set
if "exists" not in env:
env.exists = exists
if "run" not in env:
env.run = run
if "cd" not in env:
env.cd = cd
if "max_releases" not in env:
env.max_releases = 5
if "public_path" in env:
public_path = env.public_path.rstrip("/")
env.public_path = public_path
run_hook("init_tasks") | Performs basic setup before any of the tasks are run. All tasks needs to
run this before continuing. It only fires once. |
def get_ip_reports(self, ips):
"""Retrieves the most recent VT info for a set of ips.
Args:
ips: list of IPs.
Returns:
A dict with the IP as key and the VT report as value.
"""
api_name = 'virustotal-ip-address-reports'
(all_responses, ips) = self._bulk_cache_lookup(api_name, ips)
responses = self._request_reports("ip", ips, 'ip-address/report')
for ip, response in zip(ips, responses):
if self._cache:
self._cache.cache_value(api_name, ip, response)
all_responses[ip] = response
return all_responses | Retrieves the most recent VT info for a set of ips.
Args:
ips: list of IPs.
Returns:
A dict with the IP as key and the VT report as value. |
def is_visible(self, selector):
"""Check if an element is visible in the dom or not
This method will check if the element is displayed or not
This method might (according
to the config highlight:element_is_visible)
highlight the element if it is visible
This method won't wait until the element is visible or present
This method won't raise any exception if the element is not visible
Returns:
bool: True if the element is visible; False otherwise
"""
self.debug_log("Is visible (%s)" % selector)
element = self.find(
selector,
raise_exception=False,
wait_until_present=False,
wait_until_visible=False
)
if element:
if element.is_displayed(raise_exception=False):
element.highlight(
style=BROME_CONFIG['highlight']['element_is_visible']
)
self.debug_log("is visible (%s): True" % selector)
return True
self.debug_log("is visible (%s): False" % selector)
return False | Check if an element is visible in the dom or not
This method will check if the element is displayed or not
This method might (according
to the config highlight:element_is_visible)
highlight the element if it is visible
This method won't wait until the element is visible or present
This method won't raise any exception if the element is not visible
Returns:
bool: True if the element is visible; False otherwise |
def models(cls, api_version=DEFAULT_API_VERSION):
"""Module depends on the API version:
* 2016-09-01: :mod:`v2016_09_01.models<azure.mgmt.resource.links.v2016_09_01.models>`
"""
if api_version == '2016-09-01':
from .v2016_09_01 import models
return models
raise NotImplementedError("APIVersion {} is not available".format(api_version)) | Module depends on the API version:
* 2016-09-01: :mod:`v2016_09_01.models<azure.mgmt.resource.links.v2016_09_01.models>` |
def to_pandas_dataframe(self, sample_column=False):
"""Convert a SampleSet to a Pandas DataFrame
Returns:
:obj:`pandas.DataFrame`
Examples:
>>> samples = dimod.SampleSet.from_samples([{'a': -1, 'b': +1, 'c': -1},
... {'a': -1, 'b': -1, 'c': +1}],
... dimod.SPIN, energy=-.5)
>>> samples.to_pandas_dataframe() # doctest: +SKIP
a b c energy num_occurrences
0 -1 1 -1 -0.5 1
1 -1 -1 1 -0.5 1
>>> samples.to_pandas_dataframe(sample_column=True) # doctest: +SKIP
sample energy num_occurrences
0 {'a': -1, 'b': 1, 'c': -1} -0.5 1
1 {'a': -1, 'b': -1, 'c': 1} -0.5 1
"""
import pandas as pd
if sample_column:
df = pd.DataFrame(self.data(sorted_by=None, sample_dict_cast=True))
else:
# work directly with the record, it's much faster
df = pd.DataFrame(self.record.sample, columns=self.variables)
for field in sorted(self.record.dtype.fields): # sort for consistency
if field == 'sample':
continue
df.loc[:, field] = self.record[field]
return df | Convert a SampleSet to a Pandas DataFrame
Returns:
:obj:`pandas.DataFrame`
Examples:
>>> samples = dimod.SampleSet.from_samples([{'a': -1, 'b': +1, 'c': -1},
... {'a': -1, 'b': -1, 'c': +1}],
... dimod.SPIN, energy=-.5)
>>> samples.to_pandas_dataframe() # doctest: +SKIP
a b c energy num_occurrences
0 -1 1 -1 -0.5 1
1 -1 -1 1 -0.5 1
>>> samples.to_pandas_dataframe(sample_column=True) # doctest: +SKIP
sample energy num_occurrences
0 {'a': -1, 'b': 1, 'c': -1} -0.5 1
1 {'a': -1, 'b': -1, 'c': 1} -0.5 1 |
def vim_enter(self, filename):
"""Set up EnsimeClient when vim enters.
This is useful to start the EnsimeLauncher as soon as possible."""
success = self.setup(True, False)
if success:
self.editor.message("start_message") | Set up EnsimeClient when vim enters.
This is useful to start the EnsimeLauncher as soon as possible. |
def sample(self, withReplacement=None, fraction=None, seed=None):
"""Returns a sampled subset of this :class:`DataFrame`.
:param withReplacement: Sample with replacement or not (default False).
:param fraction: Fraction of rows to generate, range [0.0, 1.0].
:param seed: Seed for sampling (default a random seed).
.. note:: This is not guaranteed to provide exactly the fraction specified of the total
count of the given :class:`DataFrame`.
.. note:: `fraction` is required and, `withReplacement` and `seed` are optional.
>>> df = spark.range(10)
>>> df.sample(0.5, 3).count()
7
>>> df.sample(fraction=0.5, seed=3).count()
7
>>> df.sample(withReplacement=True, fraction=0.5, seed=3).count()
1
>>> df.sample(1.0).count()
10
>>> df.sample(fraction=1.0).count()
10
>>> df.sample(False, fraction=1.0).count()
10
"""
# For the cases below:
# sample(True, 0.5 [, seed])
# sample(True, fraction=0.5 [, seed])
# sample(withReplacement=False, fraction=0.5 [, seed])
is_withReplacement_set = \
type(withReplacement) == bool and isinstance(fraction, float)
# For the case below:
# sample(faction=0.5 [, seed])
is_withReplacement_omitted_kwargs = \
withReplacement is None and isinstance(fraction, float)
# For the case below:
# sample(0.5 [, seed])
is_withReplacement_omitted_args = isinstance(withReplacement, float)
if not (is_withReplacement_set
or is_withReplacement_omitted_kwargs
or is_withReplacement_omitted_args):
argtypes = [
str(type(arg)) for arg in [withReplacement, fraction, seed] if arg is not None]
raise TypeError(
"withReplacement (optional), fraction (required) and seed (optional)"
" should be a bool, float and number; however, "
"got [%s]." % ", ".join(argtypes))
if is_withReplacement_omitted_args:
if fraction is not None:
seed = fraction
fraction = withReplacement
withReplacement = None
seed = long(seed) if seed is not None else None
args = [arg for arg in [withReplacement, fraction, seed] if arg is not None]
jdf = self._jdf.sample(*args)
return DataFrame(jdf, self.sql_ctx) | Returns a sampled subset of this :class:`DataFrame`.
:param withReplacement: Sample with replacement or not (default False).
:param fraction: Fraction of rows to generate, range [0.0, 1.0].
:param seed: Seed for sampling (default a random seed).
.. note:: This is not guaranteed to provide exactly the fraction specified of the total
count of the given :class:`DataFrame`.
.. note:: `fraction` is required and, `withReplacement` and `seed` are optional.
>>> df = spark.range(10)
>>> df.sample(0.5, 3).count()
7
>>> df.sample(fraction=0.5, seed=3).count()
7
>>> df.sample(withReplacement=True, fraction=0.5, seed=3).count()
1
>>> df.sample(1.0).count()
10
>>> df.sample(fraction=1.0).count()
10
>>> df.sample(False, fraction=1.0).count()
10 |
def eigenvectors(T, k=None, right=True, ncv=None, reversible=False, mu=None):
r"""Compute eigenvectors of given transition matrix.
Parameters
----------
T : scipy.sparse matrix
Transition matrix (stochastic matrix).
k : int (optional) or array-like
For integer k compute the first k eigenvalues of T
else return those eigenvector sepcified by integer indices in k.
right : bool, optional
If True compute right eigenvectors, left eigenvectors otherwise
ncv : int (optional)
The number of Lanczos vectors generated, `ncv` must be greater than k;
it is recommended that ncv > 2*k
reversible : bool, optional
Indicate that transition matrix is reversible
mu : (M,) ndarray, optional
Stationary distribution of T
Returns
-------
eigvec : numpy.ndarray, shape=(d, n)
The eigenvectors of T ordered with decreasing absolute value of
the corresponding eigenvalue. If k is None then n=d, if k is
int then n=k otherwise n is the length of the given indices array.
Notes
-----
Eigenvectors are computed using the scipy interface
to the corresponding ARPACK routines.
"""
if k is None:
raise ValueError("Number of eigenvectors required for decomposition of sparse matrix")
else:
if reversible:
eigvec = eigenvectors_rev(T, k, right=right, ncv=ncv, mu=mu)
return eigvec
else:
eigvec = eigenvectors_nrev(T, k, right=right, ncv=ncv)
return eigvec | r"""Compute eigenvectors of given transition matrix.
Parameters
----------
T : scipy.sparse matrix
Transition matrix (stochastic matrix).
k : int (optional) or array-like
For integer k compute the first k eigenvalues of T
else return those eigenvector sepcified by integer indices in k.
right : bool, optional
If True compute right eigenvectors, left eigenvectors otherwise
ncv : int (optional)
The number of Lanczos vectors generated, `ncv` must be greater than k;
it is recommended that ncv > 2*k
reversible : bool, optional
Indicate that transition matrix is reversible
mu : (M,) ndarray, optional
Stationary distribution of T
Returns
-------
eigvec : numpy.ndarray, shape=(d, n)
The eigenvectors of T ordered with decreasing absolute value of
the corresponding eigenvalue. If k is None then n=d, if k is
int then n=k otherwise n is the length of the given indices array.
Notes
-----
Eigenvectors are computed using the scipy interface
to the corresponding ARPACK routines. |
def path(self):
"""Return the full path of the current object."""
names = []
obj = self
while obj:
names.insert(0, obj.name)
obj = obj.parent_dir
sep = self.filesystem._path_separator(self.name)
if names[0] == sep:
names.pop(0)
dir_path = sep.join(names)
# Windows paths with drive have a root separator entry
# which should be removed
is_drive = names and len(names[0]) == 2 and names[0][1] == ':'
if not is_drive:
dir_path = sep + dir_path
else:
dir_path = sep.join(names)
dir_path = self.filesystem.absnormpath(dir_path)
return dir_path | Return the full path of the current object. |
def save(self, to_save, **kwargs):
"""save method
"""
check = kwargs.pop('check', True)
if check:
self._valid_record(to_save)
if '_id' in to_save:
self.__collect.replace_one(
{'_id': to_save['_id']}, to_save, **kwargs)
return to_save['_id']
else:
result = self.__collect.insert_one(to_save, **kwargs)
return result.inserted_id | save method |
def get_data(start, end, username=None, password=None,
data_path=os.path.abspath(".")+'/tmp_data'):
"""**Download data (badly) from Blitzorg**
Using a specified time stamp for start and end, data is downloaded at a
default frequency (10 minute intervals). If a directory called data is not
present, it will be added to the cwd as the target for the downloads.
This is probably a bad idea however. It is much better to 1) get the data
from Blitzorg directly, or 2) if you only want a small part of the data
and have an account, download a csv file via their web interface.
:paramter start: string
:parameter end: string
:parameter freq: string
:Example:
>>> get_data(start="2015-02-01T06:30", end="2015-02-01T10:05")
"""
dl_link = "http://data.blitzortung.org/Data_1/Protected/Strokes/"
if not os.path.exists(data_path):
os.makedirs(data_path)
if not username:
username = input("Username to access Blitzorg with:")
password = getpass.getpass(
prompt='Enter password for {0}:'.format(username))
auth_handler = urllib.request.HTTPBasicAuthHandler()
auth_handler.add_password(realm='Blitzortung',
uri='http://data.blitzortung.org',
user=username,
passwd=password)
opener = urllib.request.build_opener(auth_handler)
urllib.request.install_opener(opener)
time_range = pd.date_range(start, end, freq='10min')
for time_stamp in tqdm(time_range):
tmp_link = dl_link+'/'.join(return_time_elements(time_stamp))\
+ '.json.gz'
tmp_name = "./tmp_data/bz-"+'-'.join(return_time_elements(time_stamp))\
+ ".json.gz"
if os.path.isfile(tmp_name):
print("{0} exists. Aborting download attempt".format(tmp_name))
else:
try:
urllib.request.urlretrieve(tmp_link, tmp_name)
except Exception as inst:
print(inst)
print(' Encountered unknown error. Continuing.') | **Download data (badly) from Blitzorg**
Using a specified time stamp for start and end, data is downloaded at a
default frequency (10 minute intervals). If a directory called data is not
present, it will be added to the cwd as the target for the downloads.
This is probably a bad idea however. It is much better to 1) get the data
from Blitzorg directly, or 2) if you only want a small part of the data
and have an account, download a csv file via their web interface.
:paramter start: string
:parameter end: string
:parameter freq: string
:Example:
>>> get_data(start="2015-02-01T06:30", end="2015-02-01T10:05") |
def hash_folder(folder, regex='[!_]*'):
"""
Get the md5 sum of each file in the folder and return to the user
:param folder: the folder to compute the sums over
:param regex: an expression to limit the files we match
:return:
Note: by default we will hash every file in the folder
Note: we will not match anything that starts with an underscore
"""
file_hashes = {}
for path in glob.glob(os.path.join(folder, regex)):
# exclude folders
if not os.path.isfile(path):
continue
with open(path, 'r') as fileP:
md5_hash = hashlib.md5(fileP.read()).digest()
file_name = os.path.basename(path)
file_hashes[file_name] = urlsafe_b64encode(md5_hash)
return file_hashes | Get the md5 sum of each file in the folder and return to the user
:param folder: the folder to compute the sums over
:param regex: an expression to limit the files we match
:return:
Note: by default we will hash every file in the folder
Note: we will not match anything that starts with an underscore |
def convert_to_duckling_language_id(cls, lang):
"""Ensure a language identifier has the correct duckling format and is supported."""
if lang is not None and cls.is_supported(lang):
return lang
elif lang is not None and cls.is_supported(lang + "$core"): # Support ISO 639-1 Language Codes (e.g. "en")
return lang + "$core"
else:
raise ValueError("Unsupported language '{}'. Supported languages: {}".format(
lang, ", ".join(cls.SUPPORTED_LANGUAGES))) | Ensure a language identifier has the correct duckling format and is supported. |
def align_and_build_tree(seqs, moltype, best_tree=False, params=None):
"""Returns an alignment and a tree from Sequences object seqs.
seqs: a cogent.core.alignment.SequenceCollection object, or data that can
be used to build one.
moltype: cogent.core.moltype.MolType object
best_tree: if True (default:False), uses a slower but more accurate
algorithm to build the tree.
params: dict of parameters to pass in to the Muscle app controller.
The result will be a tuple containing a cogent.core.alignment.Alignment
and a cogent.core.tree.PhyloNode object (or None for the alignment
and/or tree if either fails).
"""
aln = align_unaligned_seqs(seqs, moltype=moltype, params=params)
tree = build_tree_from_alignment(aln, moltype, best_tree, params)
return {'Align':aln, 'Tree':tree} | Returns an alignment and a tree from Sequences object seqs.
seqs: a cogent.core.alignment.SequenceCollection object, or data that can
be used to build one.
moltype: cogent.core.moltype.MolType object
best_tree: if True (default:False), uses a slower but more accurate
algorithm to build the tree.
params: dict of parameters to pass in to the Muscle app controller.
The result will be a tuple containing a cogent.core.alignment.Alignment
and a cogent.core.tree.PhyloNode object (or None for the alignment
and/or tree if either fails). |
def year(columns, name=None):
"""
Creates the grammar for a field containing a year.
:param columns: the number of columns for the year
:param name: the name of the field
:return:
"""
if columns < 0:
# Can't have negative size
raise BaseException()
field = numeric(columns, name)
# Parse action
field.addParseAction(_to_year)
return field | Creates the grammar for a field containing a year.
:param columns: the number of columns for the year
:param name: the name of the field
:return: |
def load_configuration_from_file(directory, args):
"""Return new ``args`` with configuration loaded from file."""
args = copy.copy(args)
directory_or_file = directory
if args.config is not None:
directory_or_file = args.config
options = _get_options(directory_or_file, debug=args.debug)
args.report = options.get('report', args.report)
threshold_dictionary = docutils.frontend.OptionParser.thresholds
args.report = int(threshold_dictionary.get(args.report, args.report))
args.ignore_language = get_and_split(
options, 'ignore_language', args.ignore_language)
args.ignore_messages = options.get(
'ignore_messages', args.ignore_messages)
args.ignore_directives = get_and_split(
options, 'ignore_directives', args.ignore_directives)
args.ignore_substitutions = get_and_split(
options, 'ignore_substitutions', args.ignore_substitutions)
args.ignore_roles = get_and_split(
options, 'ignore_roles', args.ignore_roles)
return args | Return new ``args`` with configuration loaded from file. |
def _send(self):
""" Send all queued messages to the server.
"""
data = self.output_buffer.view()
if not data:
return
if self.closed():
raise self.Error("Failed to write to closed connection {!r}".format(self.server.address))
if self.defunct():
raise self.Error("Failed to write to defunct connection {!r}".format(self.server.address))
self.socket.sendall(data)
self.output_buffer.clear() | Send all queued messages to the server. |
def label_contours(self, intervals, window=150, hop=30):
"""
In a very flowy contour, it is not trivial to say which pitch value corresponds
to what interval. This function labels pitch contours with intervals by guessing
from the characteristics of the contour and its melodic context.
:param window: the size of window over which the context is gauged, in milliseconds.
:param hop: hop size in milliseconds.
"""
window /= 1000.0
hop /= 1000.0
exposure = int(window / hop)
boundary = window - hop
final_index = utils.find_nearest_index(self.pitch_obj.timestamps,
self.pitch_obj.timestamps[-1] - boundary)
interval = np.median(np.diff(self.pitch_obj.timestamps))
#interval = 0.00290254832393
window_step = window / interval
hop_step = hop / interval
start_index = 0
end_index = window_step
contour_labels = {}
means = []
while end_index < final_index:
temp = self.pitch_obj.pitch[start_index:end_index][self.pitch_obj.pitch[start_index:end_index] > -10000]
means.append(np.mean(temp))
start_index = start_index + hop_step
end_index = start_index + window_step
for i in xrange(exposure, len(means) - exposure + 1):
_median = np.median(means[i - exposure:i])
if _median < -5000:
continue
ind = utils.find_nearest_index(_median, intervals)
contour_end = (i - exposure) * hop_step + window_step
contour_start = contour_end - hop_step
#print sliceBegin, sliceEnd, JICents[ind]
#newPitch[sliceBegin:sliceEnd] = JICents[ind]
if intervals[ind] in contour_labels.keys():
contour_labels[intervals[ind]].append([contour_start, contour_end])
else:
contour_labels[intervals[ind]] = [[contour_start, contour_end]]
self.contour_labels = contour_labels | In a very flowy contour, it is not trivial to say which pitch value corresponds
to what interval. This function labels pitch contours with intervals by guessing
from the characteristics of the contour and its melodic context.
:param window: the size of window over which the context is gauged, in milliseconds.
:param hop: hop size in milliseconds. |
def subdomain_row_factory(cls, cursor, row):
"""
Dict row factory for subdomains
"""
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d | Dict row factory for subdomains |
def ShouldRetry(self, exception):
"""Returns true if should retry based on the passed-in exception.
:param (errors.HTTPFailure instance) exception:
:rtype:
boolean
"""
if self.current_retry_attempt_count < self._max_retry_attempt_count:
self.current_retry_attempt_count += 1
self.retry_after_in_milliseconds = 0
if self._fixed_retry_interval_in_milliseconds:
self.retry_after_in_milliseconds = self._fixed_retry_interval_in_milliseconds
elif http_constants.HttpHeaders.RetryAfterInMilliseconds in exception.headers:
self.retry_after_in_milliseconds = int(exception.headers[http_constants.HttpHeaders.RetryAfterInMilliseconds])
if self.cummulative_wait_time_in_milliseconds < self._max_wait_time_in_milliseconds:
self.cummulative_wait_time_in_milliseconds += self.retry_after_in_milliseconds
return True
return False | Returns true if should retry based on the passed-in exception.
:param (errors.HTTPFailure instance) exception:
:rtype:
boolean |
def orientation_angle(im, approxangle=None, *, isshiftdft=False, truesize=None,
rotateAngle=None):
"""Give the highest contribution to the orientation
Parameters
----------
im: 2d array
The image
approxangle: number, optional
The approximate angle (None if unknown)
isshiftdft: Boolean, default False
True if the image has been processed (DFT, fftshift)
truesize: 2 numbers, optional
Truesize of the image if isshiftdft is True
rotateAngle: number, optional
The diagonals are more sensitives than the axis.
rotate the image to avoid pixel orientation (flat or diagonal)
Returns
-------
angle: number
The orientation of the image
Notes
-----
if approxangle is specified, search only within +- pi/4
"""
im = np.asarray(im)
# If we rotate the image first
if rotateAngle is not None and not isshiftdft:
# compute the scale corresponding to the image
scale = np.sqrt(.5 * (1 + (np.tan(rotateAngle) - 1)**2 /
(np.tan(rotateAngle) + 1)**2))
# rotate the image
im = rotate_scale(im, rotateAngle, scale)
# compute log fft (nearest interpolation as line go between pixels)
lp = polar_fft(im, isshiftdft=isshiftdft,
logoutput=False, interpolation='nearest',
truesize=truesize)
# get distribution
adis = lp.sum(-1)
if approxangle is not None:
#-np.pi/2 as we are in fft. +- pi/4 is search window
amin = clamp_angle(approxangle - np.pi / 4 - np.pi / 2)
amax = clamp_angle(approxangle + np.pi / 4 - np.pi / 2)
angles = np.linspace(-np.pi / 2, np.pi / 2,
lp.shape[0], endpoint=False)
if amin > amax:
adis[np.logical_and(angles > amax, angles < amin)] = adis.min()
else:
adis[np.logical_or(angles > amax, angles < amin)] = adis.min()
# get peak pos
ret = get_peak_pos(adis, wrap=True)
anglestep = np.pi / lp.shape[0]
"""
import matplotlib.pyplot as plt
plt.figure()
plt.plot(anglestep*np.arange(len(adis)),adis)
#"""
# return max (not -pi/2 as 0 is along y and we want 0 alonx x.)
# the transformation is basically "For Free"
ret = clamp_angle(ret * anglestep)
# Substract rotateAngle to get the original image
if rotateAngle is not None:
ret = clamp_angle(ret - rotateAngle)
return ret | Give the highest contribution to the orientation
Parameters
----------
im: 2d array
The image
approxangle: number, optional
The approximate angle (None if unknown)
isshiftdft: Boolean, default False
True if the image has been processed (DFT, fftshift)
truesize: 2 numbers, optional
Truesize of the image if isshiftdft is True
rotateAngle: number, optional
The diagonals are more sensitives than the axis.
rotate the image to avoid pixel orientation (flat or diagonal)
Returns
-------
angle: number
The orientation of the image
Notes
-----
if approxangle is specified, search only within +- pi/4 |
def irods_filepath(det_id, run_id):
"""Generate the iRODS filepath for given detector (O)ID and run ID"""
data_path = "/in2p3/km3net/data/raw/sea"
from km3pipe.db import DBManager
if not isinstance(det_id, int):
dts = DBManager().detectors
det_id = int(dts[dts.OID == det_id].SERIALNUMBER.values[0])
return data_path + "/KM3NeT_{0:08}/{2}/KM3NeT_{0:08}_{1:08}.root" \
.format(det_id, run_id, run_id//1000) | Generate the iRODS filepath for given detector (O)ID and run ID |
def restore(self, workspace_uuid):
"""
Restore the workspace to the given workspace_uuid.
If workspace_uuid is None then create a new workspace and use it.
"""
workspace = next((workspace for workspace in self.document_model.workspaces if workspace.uuid == workspace_uuid), None)
if workspace is None:
workspace = self.new_workspace()
self._change_workspace(workspace) | Restore the workspace to the given workspace_uuid.
If workspace_uuid is None then create a new workspace and use it. |
def create_manage_py(self, apps):
"""Creates manage.py file, with a given list of installed apps.
:param list apps:
"""
self.logger.debug('Creating manage.py ...')
with open(self._get_manage_py_path(), mode='w') as f:
south_migration_modules = []
for app in apps:
south_migration_modules.append("'%(app)s': '%(app)s.south_migrations'" % {'app': app})
f.write(MANAGE_PY % {
'apps_available': "', '".join(apps),
'apps_path': self.apps_path,
'south_migration_modules': ", ".join(south_migration_modules)
}) | Creates manage.py file, with a given list of installed apps.
:param list apps: |
def subgrid_kernel(kernel, subgrid_res, odd=False, num_iter=100):
"""
creates a higher resolution kernel with subgrid resolution as an interpolation of the original kernel in an
iterative approach
:param kernel: initial kernel
:param subgrid_res: subgrid resolution required
:return: kernel with higher resolution (larger)
"""
subgrid_res = int(subgrid_res)
if subgrid_res == 1:
return kernel
nx, ny = np.shape(kernel)
d_x = 1. / nx
x_in = np.linspace(d_x/2, 1-d_x/2, nx)
d_y = 1. / nx
y_in = np.linspace(d_y/2, 1-d_y/2, ny)
nx_new = nx * subgrid_res
ny_new = ny * subgrid_res
if odd is True:
if nx_new % 2 == 0:
nx_new -= 1
if ny_new % 2 == 0:
ny_new -= 1
d_x_new = 1. / nx_new
d_y_new = 1. / ny_new
x_out = np.linspace(d_x_new/2., 1-d_x_new/2., nx_new)
y_out = np.linspace(d_y_new/2., 1-d_y_new/2., ny_new)
kernel_input = copy.deepcopy(kernel)
kernel_subgrid = image_util.re_size_array(x_in, y_in, kernel_input, x_out, y_out)
kernel_subgrid = kernel_norm(kernel_subgrid)
for i in range(max(num_iter, 1)):
# given a proposition, re-size it to original pixel size
if subgrid_res % 2 == 0:
kernel_pixel = averaging_even_kernel(kernel_subgrid, subgrid_res)
else:
kernel_pixel = util.averaging(kernel_subgrid, numGrid=nx_new, numPix=nx)
delta = kernel - kernel_pixel
#plt.matshow(delta)
#plt.colorbar()
#plt.show()
temp_kernel = kernel_input + delta
kernel_subgrid = image_util.re_size_array(x_in, y_in, temp_kernel, x_out, y_out)#/norm_subgrid
kernel_subgrid = kernel_norm(kernel_subgrid)
kernel_input = temp_kernel
#from scipy.ndimage import zoom
#ratio = subgrid_res
#kernel_subgrid = zoom(kernel, ratio, order=4) / ratio ** 2
#print(np.shape(kernel_subgrid))
# whatever has not been matched is added to zeroth order (in squares of the undersampled PSF)
if subgrid_res % 2 == 0:
return kernel_subgrid
kernel_pixel = util.averaging(kernel_subgrid, numGrid=nx_new, numPix=nx)
kernel_pixel = kernel_norm(kernel_pixel)
delta_kernel = kernel_pixel - kernel_norm(kernel)
id = np.ones((subgrid_res, subgrid_res))
delta_kernel_sub = np.kron(delta_kernel, id)/subgrid_res**2
return kernel_norm(kernel_subgrid - delta_kernel_sub) | creates a higher resolution kernel with subgrid resolution as an interpolation of the original kernel in an
iterative approach
:param kernel: initial kernel
:param subgrid_res: subgrid resolution required
:return: kernel with higher resolution (larger) |
async def fetch(self, method, url, params=None, headers=None, data=None):
"""Make an HTTP request.
Automatically uses configured HTTP proxy, and adds Google authorization
header and cookies.
Failures will be retried MAX_RETRIES times before raising NetworkError.
Args:
method (str): Request method.
url (str): Request URL.
params (dict): (optional) Request query string parameters.
headers (dict): (optional) Request headers.
data: (str): (optional) Request body data.
Returns:
FetchResponse: Response data.
Raises:
NetworkError: If the request fails.
"""
logger.debug('Sending request %s %s:\n%r', method, url, data)
for retry_num in range(MAX_RETRIES):
try:
async with self.fetch_raw(method, url, params=params,
headers=headers, data=data) as res:
async with async_timeout.timeout(REQUEST_TIMEOUT):
body = await res.read()
logger.debug('Received response %d %s:\n%r',
res.status, res.reason, body)
except asyncio.TimeoutError:
error_msg = 'Request timed out'
except aiohttp.ServerDisconnectedError as err:
error_msg = 'Server disconnected error: {}'.format(err)
except (aiohttp.ClientError, ValueError) as err:
error_msg = 'Request connection error: {}'.format(err)
else:
break
logger.info('Request attempt %d failed: %s', retry_num, error_msg)
else:
logger.info('Request failed after %d attempts', MAX_RETRIES)
raise exceptions.NetworkError(error_msg)
if res.status != 200:
logger.info('Request returned unexpected status: %d %s',
res.status, res.reason)
raise exceptions.NetworkError(
'Request return unexpected status: {}: {}'
.format(res.status, res.reason)
)
return FetchResponse(res.status, body) | Make an HTTP request.
Automatically uses configured HTTP proxy, and adds Google authorization
header and cookies.
Failures will be retried MAX_RETRIES times before raising NetworkError.
Args:
method (str): Request method.
url (str): Request URL.
params (dict): (optional) Request query string parameters.
headers (dict): (optional) Request headers.
data: (str): (optional) Request body data.
Returns:
FetchResponse: Response data.
Raises:
NetworkError: If the request fails. |
def com_google_fonts_check_font_copyright(ttFont):
"""Copyright notices match canonical pattern in fonts"""
import re
from fontbakery.utils import get_name_entry_strings
failed = False
for string in get_name_entry_strings(ttFont, NameID.COPYRIGHT_NOTICE):
does_match = re.search(r'Copyright [0-9]{4} The .* Project Authors \([^\@]*\)',
string)
if does_match:
yield PASS, ("Name Table entry: Copyright field '{}'"
" matches canonical pattern.").format(string)
else:
failed = True
yield FAIL, ("Name Table entry: Copyright notices should match"
" a pattern similar to:"
" 'Copyright 2017 The Familyname"
" Project Authors (git url)'\n"
"But instead we have got:"
" '{}'").format(string)
if not failed:
yield PASS, "Name table copyright entries are good" | Copyright notices match canonical pattern in fonts |
def _get_name(self):
""" Property getter.
"""
if (self.tail_node is not None) and (self.head_node is not None):
return "%s %s %s" % (self.tail_node.ID, self.conn,
self.head_node.ID)
else:
return "Edge" | Property getter. |
def convex_hull(features):
"""Returns points on convex hull of an array of points in CCW order."""
points = sorted([s.point() for s in features])
l = reduce(_keep_left, points, [])
u = reduce(_keep_left, reversed(points), [])
return l.extend(u[i] for i in xrange(1, len(u) - 1)) or l | Returns points on convex hull of an array of points in CCW order. |
def validate_event_type(sender, event, created):
"""Verify that the Event's code is a valid one."""
if event.code not in sender.event_codes():
raise ValueError("The Event.code '{}' is not a valid Event "
"code.".format(event.code)) | Verify that the Event's code is a valid one. |
def _create_buffer(self):
"""
Create the `Buffer` for the Python input.
"""
python_buffer = Buffer(
name=DEFAULT_BUFFER,
complete_while_typing=Condition(lambda: self.complete_while_typing),
enable_history_search=Condition(lambda: self.enable_history_search),
tempfile_suffix='.py',
history=self.history,
completer=ThreadedCompleter(self._completer),
validator=ConditionalValidator(
self._validator,
Condition(lambda: self.enable_input_validation)),
auto_suggest=ConditionalAutoSuggest(
ThreadedAutoSuggest(AutoSuggestFromHistory()),
Condition(lambda: self.enable_auto_suggest)),
accept_handler=self._accept_handler,
on_text_changed=self._on_input_timeout)
return python_buffer | Create the `Buffer` for the Python input. |
def unlock(arguments):
"""Unlock the database."""
import redis
u = coil.utils.ask("Redis URL", "redis://localhost:6379/0")
db = redis.StrictRedis.from_url(u)
db.set('site:lock', 0)
print("Database unlocked.")
return 0 | Unlock the database. |
def get_gui_hint(self, hint):
"""Returns the value for specified gui hint (or a sensible default value,
if this argument doesn't specify the hint).
Args:
hint: name of the hint to get value for
Returns:
value of the hint specified in yaml or a sensible default
"""
if hint == 'type':
# 'self.kwargs.get('nargs') == 0' is there for default_iff_used, which may
# have nargs: 0, so that it works similarly to 'store_const'
if self.kwargs.get('action') == 'store_true' or self.kwargs.get('nargs') == 0:
return 'bool'
# store_const is represented by checkbox, but computes default differently
elif self.kwargs.get('action') == 'store_const':
return 'const'
return self.gui_hints.get('type', 'str')
elif hint == 'default':
hint_type = self.get_gui_hint('type')
hint_default = self.gui_hints.get('default', None)
arg_default = self.kwargs.get('default', None)
preserved_value = None
if 'preserved' in self.kwargs:
preserved_value = config_manager.get_config_value(self.kwargs['preserved'])
if hint_type == 'path':
if preserved_value is not None:
default = preserved_value
elif hint_default is not None:
default = hint_default.replace('$(pwd)', utils.get_cwd_or_homedir())
else:
default = arg_default or '~'
return os.path.abspath(os.path.expanduser(default))
elif hint_type == 'bool':
return hint_default or arg_default or False
elif hint_type == 'const':
return hint_default or arg_default
else:
if hint_default == '$(whoami)':
hint_default = getpass.getuser()
return preserved_value or hint_default or arg_default or '' | Returns the value for specified gui hint (or a sensible default value,
if this argument doesn't specify the hint).
Args:
hint: name of the hint to get value for
Returns:
value of the hint specified in yaml or a sensible default |
def hessian(self, x, y, n_sersic, R_sersic, k_eff, center_x=0, center_y=0):
"""
returns Hessian matrix of function d^2f/dx^2, d^f/dy^2, d^2/dxdy
"""
x_ = x - center_x
y_ = y - center_y
r = np.sqrt(x_**2 + y_**2)
if isinstance(r, int) or isinstance(r, float):
r = max(self._s, r)
else:
r[r < self._s] = self._s
d_alpha_dr = self.d_alpha_dr(x, y, n_sersic, R_sersic, k_eff, center_x, center_y)
alpha = -self.alpha_abs(x, y, n_sersic, R_sersic, k_eff, center_x, center_y)
#f_xx_ = d_alpha_dr * calc_util.d_r_dx(x_, y_) * x_/r + alpha * calc_util.d_x_diffr_dx(x_, y_)
#f_yy_ = d_alpha_dr * calc_util.d_r_dy(x_, y_) * y_/r + alpha * calc_util.d_y_diffr_dy(x_, y_)
#f_xy_ = d_alpha_dr * calc_util.d_r_dy(x_, y_) * x_/r + alpha * calc_util.d_x_diffr_dy(x_, y_)
f_xx = -(d_alpha_dr/r + alpha/r**2) * x_**2/r + alpha/r
f_yy = -(d_alpha_dr/r + alpha/r**2) * y_**2/r + alpha/r
f_xy = -(d_alpha_dr/r + alpha/r**2) * x_*y_/r
return f_xx, f_yy, f_xy | returns Hessian matrix of function d^2f/dx^2, d^f/dy^2, d^2/dxdy |
def create_release():
"""Creates a new release candidate for a build."""
build = g.build
release_name = request.form.get('release_name')
utils.jsonify_assert(release_name, 'release_name required')
url = request.form.get('url')
utils.jsonify_assert(release_name, 'url required')
release = models.Release(
name=release_name,
url=url,
number=1,
build_id=build.id)
last_candidate = (
models.Release.query
.filter_by(build_id=build.id, name=release_name)
.order_by(models.Release.number.desc())
.first())
if last_candidate:
release.number += last_candidate.number
if last_candidate.status == models.Release.PROCESSING:
canceled_task_count = work_queue.cancel(
release_id=last_candidate.id)
logging.info('Canceling %d tasks for previous attempt '
'build_id=%r, release_name=%r, release_number=%d',
canceled_task_count, build.id, last_candidate.name,
last_candidate.number)
last_candidate.status = models.Release.BAD
db.session.add(last_candidate)
db.session.add(release)
db.session.commit()
signals.release_updated_via_api.send(app, build=build, release=release)
logging.info('Created release: build_id=%r, release_name=%r, url=%r, '
'release_number=%d', build.id, release.name,
url, release.number)
return flask.jsonify(
success=True,
build_id=build.id,
release_name=release.name,
release_number=release.number,
url=url) | Creates a new release candidate for a build. |
def add_event(self, rule, callback):
"""Adds an event to the algorithm's EventManager.
Parameters
----------
rule : EventRule
The rule for when the callback should be triggered.
callback : callable[(context, data) -> None]
The function to execute when the rule is triggered.
"""
self.event_manager.add_event(
zipline.utils.events.Event(rule, callback),
) | Adds an event to the algorithm's EventManager.
Parameters
----------
rule : EventRule
The rule for when the callback should be triggered.
callback : callable[(context, data) -> None]
The function to execute when the rule is triggered. |
def _calculate_unpack_filter(cls, includes=None, excludes=None, spec=None):
"""Take regex patterns and return a filter function.
:param list includes: List of include patterns to pass to _file_filter.
:param list excludes: List of exclude patterns to pass to _file_filter.
"""
include_patterns = cls.compile_patterns(includes or [],
field_name='include_patterns',
spec=spec)
logger.debug('include_patterns: {}'
.format(list(p.pattern for p in include_patterns)))
exclude_patterns = cls.compile_patterns(excludes or [],
field_name='exclude_patterns',
spec=spec)
logger.debug('exclude_patterns: {}'
.format(list(p.pattern for p in exclude_patterns)))
return lambda f: cls._file_filter(f, include_patterns, exclude_patterns) | Take regex patterns and return a filter function.
:param list includes: List of include patterns to pass to _file_filter.
:param list excludes: List of exclude patterns to pass to _file_filter. |
def deployAll(self):
'''
Deploys all the items from the vault. Useful after a format
'''
targets = [Target.getTarget(iid) for iid, n, p in self.db.listTargets()]
for target in targets:
target.deploy()
verbose('Deploy all complete') | Deploys all the items from the vault. Useful after a format |
def _separate_epochs(activity_data, epoch_list):
""" create data epoch by epoch
Separate data into epochs of interest specified in epoch_list
and z-score them for computing correlation
Parameters
----------
activity_data: list of 2D array in shape [nVoxels, nTRs]
the masked activity data organized in voxel*TR formats of all subjects
epoch_list: list of 3D array in shape [condition, nEpochs, nTRs]
specification of epochs and conditions
assuming all subjects have the same number of epochs
len(epoch_list) equals the number of subjects
Returns
-------
raw_data: list of 2D array in shape [epoch length, nVoxels]
the data organized in epochs
and z-scored in preparation of correlation computation
len(raw_data) equals the number of epochs
labels: list of 1D array
the condition labels of the epochs
len(labels) labels equals the number of epochs
"""
time1 = time.time()
raw_data = []
labels = []
for sid in range(len(epoch_list)):
epoch = epoch_list[sid]
for cond in range(epoch.shape[0]):
sub_epoch = epoch[cond, :, :]
for eid in range(epoch.shape[1]):
r = np.sum(sub_epoch[eid, :])
if r > 0: # there is an epoch in this condition
# mat is row-major
# regardless of the order of acitvity_data[sid]
mat = activity_data[sid][:, sub_epoch[eid, :] == 1]
mat = np.ascontiguousarray(mat.T)
mat = zscore(mat, axis=0, ddof=0)
# if zscore fails (standard deviation is zero),
# set all values to be zero
mat = np.nan_to_num(mat)
mat = mat / math.sqrt(r)
raw_data.append(mat)
labels.append(cond)
time2 = time.time()
logger.debug(
'epoch separation done, takes %.2f s' %
(time2 - time1)
)
return raw_data, labels | create data epoch by epoch
Separate data into epochs of interest specified in epoch_list
and z-score them for computing correlation
Parameters
----------
activity_data: list of 2D array in shape [nVoxels, nTRs]
the masked activity data organized in voxel*TR formats of all subjects
epoch_list: list of 3D array in shape [condition, nEpochs, nTRs]
specification of epochs and conditions
assuming all subjects have the same number of epochs
len(epoch_list) equals the number of subjects
Returns
-------
raw_data: list of 2D array in shape [epoch length, nVoxels]
the data organized in epochs
and z-scored in preparation of correlation computation
len(raw_data) equals the number of epochs
labels: list of 1D array
the condition labels of the epochs
len(labels) labels equals the number of epochs |
def fetch_url(url, method='GET', user_agent='django-oembed', timeout=SOCKET_TIMEOUT):
"""
Fetch response headers and data from a URL, raising a generic exception
for any kind of failure.
"""
sock = httplib2.Http(timeout=timeout)
request_headers = {
'User-Agent': user_agent,
'Accept-Encoding': 'gzip'}
try:
headers, raw = sock.request(url, headers=request_headers, method=method)
except:
raise OEmbedHTTPException('Error fetching %s' % url)
return headers, raw | Fetch response headers and data from a URL, raising a generic exception
for any kind of failure. |
def exports(self):
"""
:rtype: twilio.rest.preview.bulk_exports.export.ExportList
"""
if self._exports is None:
self._exports = ExportList(self)
return self._exports | :rtype: twilio.rest.preview.bulk_exports.export.ExportList |
def create_dashboard(self, name):
'''
**Description**
Creates an empty dashboard. You can then add panels by using ``add_dashboard_panel``.
**Arguments**
- **name**: the name of the dashboard that will be created.
**Success Return Value**
A dictionary showing the details of the new dashboard.
**Example**
`examples/dashboard.py <https://github.com/draios/python-sdc-client/blob/master/examples/dashboard.py>`_
'''
dashboard_configuration = {
'name': name,
'schema': 2,
'items': []
}
#
# Create the new dashboard
#
res = requests.post(self.url + self._dashboards_api_endpoint, headers=self.hdrs, data=json.dumps({'dashboard': dashboard_configuration}),
verify=self.ssl_verify)
return self._request_result(res) | **Description**
Creates an empty dashboard. You can then add panels by using ``add_dashboard_panel``.
**Arguments**
- **name**: the name of the dashboard that will be created.
**Success Return Value**
A dictionary showing the details of the new dashboard.
**Example**
`examples/dashboard.py <https://github.com/draios/python-sdc-client/blob/master/examples/dashboard.py>`_ |
def wx_menu(self):
'''return a wx.Menu() for this menu'''
from MAVProxy.modules.lib.wx_loader import wx
menu = wx.Menu()
for i in range(len(self.items)):
m = self.items[i]
m._append(menu)
return menu | return a wx.Menu() for this menu |
def sign_up(self):
"""Signs up a participant for the experiment.
This is done using a POST request to the /participant/ endpoint.
"""
self.log("Bot player signing up.")
self.subscribe_to_quorum_channel()
while True:
url = (
"{host}/participant/{self.worker_id}/"
"{self.hit_id}/{self.assignment_id}/"
"debug?fingerprint_hash={hash}&recruiter=bots:{bot_name}".format(
host=self.host,
self=self,
hash=uuid.uuid4().hex,
bot_name=self.__class__.__name__,
)
)
try:
result = requests.post(url)
result.raise_for_status()
except RequestException:
self.stochastic_sleep()
continue
if result.json()["status"] == "error":
self.stochastic_sleep()
continue
self.on_signup(result.json())
return True | Signs up a participant for the experiment.
This is done using a POST request to the /participant/ endpoint. |
def dict_to_vtk(data, path='./dictvtk', voxel_size=1, origin=(0, 0, 0)):
r"""
Accepts multiple images as a dictionary and compiles them into a vtk file
Parameters
----------
data : dict
A dictionary of *key: value* pairs, where the *key* is the name of the
scalar property stored in each voxel of the array stored in the
corresponding *value*.
path : string
Path to output file
voxel_size : int
The side length of the voxels (voxels are cubic)
origin : float
data origin (according to selected voxel size)
Notes
-----
Outputs a vtk, vtp or vti file that can opened in ParaView
"""
vs = voxel_size
for entry in data:
if data[entry].dtype == bool:
data[entry] = data[entry].astype(np.int8)
if data[entry].flags['C_CONTIGUOUS']:
data[entry] = np.ascontiguousarray(data[entry])
imageToVTK(path, cellData=data, spacing=(vs, vs, vs), origin=origin) | r"""
Accepts multiple images as a dictionary and compiles them into a vtk file
Parameters
----------
data : dict
A dictionary of *key: value* pairs, where the *key* is the name of the
scalar property stored in each voxel of the array stored in the
corresponding *value*.
path : string
Path to output file
voxel_size : int
The side length of the voxels (voxels are cubic)
origin : float
data origin (according to selected voxel size)
Notes
-----
Outputs a vtk, vtp or vti file that can opened in ParaView |
Subsets and Splits