code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def kvectors(self, norm=False, form='broadcast', real=False, shift=False):
"""
Return the kvectors associated with this tile, given the standard form
of -0.5 to 0.5. `norm` and `form` arguments arethe same as that passed to
`Tile.coords`.
Parameters
-----------
real : boolean
whether to return kvectors associated with the real fft instead
"""
if norm is False:
norm = 1
if norm is True:
norm = np.array(self.shape)
norm = aN(norm, self.dim, dtype='float')
v = list(np.fft.fftfreq(self.shape[i])/norm[i] for i in range(self.dim))
if shift:
v = list(np.fft.fftshift(t) for t in v)
if real:
v[-1] = v[-1][:(self.shape[-1]+1)//2]
return self._format_vector(v, form=form) | Return the kvectors associated with this tile, given the standard form
of -0.5 to 0.5. `norm` and `form` arguments arethe same as that passed to
`Tile.coords`.
Parameters
-----------
real : boolean
whether to return kvectors associated with the real fft instead | Below is the the instruction that describes the task:
### Input:
Return the kvectors associated with this tile, given the standard form
of -0.5 to 0.5. `norm` and `form` arguments arethe same as that passed to
`Tile.coords`.
Parameters
-----------
real : boolean
whether to return kvectors associated with the real fft instead
### Response:
def kvectors(self, norm=False, form='broadcast', real=False, shift=False):
"""
Return the kvectors associated with this tile, given the standard form
of -0.5 to 0.5. `norm` and `form` arguments arethe same as that passed to
`Tile.coords`.
Parameters
-----------
real : boolean
whether to return kvectors associated with the real fft instead
"""
if norm is False:
norm = 1
if norm is True:
norm = np.array(self.shape)
norm = aN(norm, self.dim, dtype='float')
v = list(np.fft.fftfreq(self.shape[i])/norm[i] for i in range(self.dim))
if shift:
v = list(np.fft.fftshift(t) for t in v)
if real:
v[-1] = v[-1][:(self.shape[-1]+1)//2]
return self._format_vector(v, form=form) |
def new(self):
# type: () -> None
'''
A method to create a new UDF Anchor Volume Structure.
Parameters:
None.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Anchor Volume Structure already initialized')
self.desc_tag = UDFTag()
self.desc_tag.new(2) # FIXME: we should let the user set serial_number
self.main_vd_length = 32768
self.main_vd_extent = 0 # This will get set later.
self.reserve_vd_length = 32768
self.reserve_vd_extent = 0 # This will get set later.
self._initialized = True | A method to create a new UDF Anchor Volume Structure.
Parameters:
None.
Returns:
Nothing. | Below is the the instruction that describes the task:
### Input:
A method to create a new UDF Anchor Volume Structure.
Parameters:
None.
Returns:
Nothing.
### Response:
def new(self):
# type: () -> None
'''
A method to create a new UDF Anchor Volume Structure.
Parameters:
None.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Anchor Volume Structure already initialized')
self.desc_tag = UDFTag()
self.desc_tag.new(2) # FIXME: we should let the user set serial_number
self.main_vd_length = 32768
self.main_vd_extent = 0 # This will get set later.
self.reserve_vd_length = 32768
self.reserve_vd_extent = 0 # This will get set later.
self._initialized = True |
def _load_ssh_files(self):
"""
Trigger loading of configured SSH config file paths.
Expects that ``base_ssh_config`` has already been set to an
`~paramiko.config.SSHConfig` object.
:returns: ``None``.
"""
# TODO: does this want to more closely ape the behavior of
# InvokeConfig.load_files? re: having a _found attribute for each that
# determines whether to load or skip
if self._runtime_ssh_path is not None:
path = self._runtime_ssh_path
# Manually blow up like open() (_load_ssh_file normally doesn't)
if not os.path.exists(path):
msg = "No such file or directory: {!r}".format(path)
raise IOError(errno.ENOENT, msg)
self._load_ssh_file(os.path.expanduser(path))
elif self.load_ssh_configs:
for path in (self._user_ssh_path, self._system_ssh_path):
self._load_ssh_file(os.path.expanduser(path)) | Trigger loading of configured SSH config file paths.
Expects that ``base_ssh_config`` has already been set to an
`~paramiko.config.SSHConfig` object.
:returns: ``None``. | Below is the the instruction that describes the task:
### Input:
Trigger loading of configured SSH config file paths.
Expects that ``base_ssh_config`` has already been set to an
`~paramiko.config.SSHConfig` object.
:returns: ``None``.
### Response:
def _load_ssh_files(self):
"""
Trigger loading of configured SSH config file paths.
Expects that ``base_ssh_config`` has already been set to an
`~paramiko.config.SSHConfig` object.
:returns: ``None``.
"""
# TODO: does this want to more closely ape the behavior of
# InvokeConfig.load_files? re: having a _found attribute for each that
# determines whether to load or skip
if self._runtime_ssh_path is not None:
path = self._runtime_ssh_path
# Manually blow up like open() (_load_ssh_file normally doesn't)
if not os.path.exists(path):
msg = "No such file or directory: {!r}".format(path)
raise IOError(errno.ENOENT, msg)
self._load_ssh_file(os.path.expanduser(path))
elif self.load_ssh_configs:
for path in (self._user_ssh_path, self._system_ssh_path):
self._load_ssh_file(os.path.expanduser(path)) |
def get_vmpolicy_macaddr_output_vmpolicy_macaddr_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vmpolicy_macaddr = ET.Element("get_vmpolicy_macaddr")
config = get_vmpolicy_macaddr
output = ET.SubElement(get_vmpolicy_macaddr, "output")
vmpolicy_macaddr = ET.SubElement(output, "vmpolicy-macaddr")
name = ET.SubElement(vmpolicy_macaddr, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def get_vmpolicy_macaddr_output_vmpolicy_macaddr_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vmpolicy_macaddr = ET.Element("get_vmpolicy_macaddr")
config = get_vmpolicy_macaddr
output = ET.SubElement(get_vmpolicy_macaddr, "output")
vmpolicy_macaddr = ET.SubElement(output, "vmpolicy-macaddr")
name = ET.SubElement(vmpolicy_macaddr, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def prune_loop_for_kic(self, loops_segments, search_radius, expected_min_loop_length = None, expected_max_loop_length = None, generate_pymol_session = False):
'''A wrapper for prune_structure_according_to_loop_definitions suitable for the Rosetta kinematic closure (KIC) loop modeling method.'''
return self.prune_structure_according_to_loop_definitions(loops_segments, search_radius, expected_min_loop_length = expected_min_loop_length, expected_max_loop_length = expected_max_loop_length, generate_pymol_session = generate_pymol_session, check_sequence = True, keep_Ca_buttress_atoms = True) | A wrapper for prune_structure_according_to_loop_definitions suitable for the Rosetta kinematic closure (KIC) loop modeling method. | Below is the the instruction that describes the task:
### Input:
A wrapper for prune_structure_according_to_loop_definitions suitable for the Rosetta kinematic closure (KIC) loop modeling method.
### Response:
def prune_loop_for_kic(self, loops_segments, search_radius, expected_min_loop_length = None, expected_max_loop_length = None, generate_pymol_session = False):
'''A wrapper for prune_structure_according_to_loop_definitions suitable for the Rosetta kinematic closure (KIC) loop modeling method.'''
return self.prune_structure_according_to_loop_definitions(loops_segments, search_radius, expected_min_loop_length = expected_min_loop_length, expected_max_loop_length = expected_max_loop_length, generate_pymol_session = generate_pymol_session, check_sequence = True, keep_Ca_buttress_atoms = True) |
def _validate_config(self, config):
"""
Validates that all necessary config parameters are specified.
:type config: dict[str, dict[str, Any] | str]
:param config: the module config
"""
if config is None:
raise ValueError("OIDCFrontend conf can't be 'None'.")
for k in {"signing_key_path", "provider"}:
if k not in config:
raise ValueError("Missing configuration parameter '{}' for OpenID Connect frontend.".format(k)) | Validates that all necessary config parameters are specified.
:type config: dict[str, dict[str, Any] | str]
:param config: the module config | Below is the the instruction that describes the task:
### Input:
Validates that all necessary config parameters are specified.
:type config: dict[str, dict[str, Any] | str]
:param config: the module config
### Response:
def _validate_config(self, config):
"""
Validates that all necessary config parameters are specified.
:type config: dict[str, dict[str, Any] | str]
:param config: the module config
"""
if config is None:
raise ValueError("OIDCFrontend conf can't be 'None'.")
for k in {"signing_key_path", "provider"}:
if k not in config:
raise ValueError("Missing configuration parameter '{}' for OpenID Connect frontend.".format(k)) |
def get(self, **params):
"""
Return a list of commands that where affected by a recent change
(following a poll() return for the controlling file descriptor).
Each list element is a tuple:
(name, command_path, module_list)
The event queue will be read multiple times and reads continue
until a timeout occurs.
"""
log = self._getparam('log', self._discard, **params)
changes = {}
paths = self._watch.get(**params)
# On each event, de-invert the tree to produce a
# list of changes by command name.
#
for path in paths:
if path in self.modules:
for name in self.modules[path]:
if name in changes:
if path not in changes[name]:
changes[name].append(path)
else:
changes[name] = [path]
else:
log.warning("Path %r had no matching watch entry", path)
names = list(changes)
log.debug("Change was to %d name%s", len(names), ses(len(names)))
names.sort()
resp = []
for name in names:
resp.append((name, self.names.get(name), changes[name]))
return resp | Return a list of commands that where affected by a recent change
(following a poll() return for the controlling file descriptor).
Each list element is a tuple:
(name, command_path, module_list)
The event queue will be read multiple times and reads continue
until a timeout occurs. | Below is the the instruction that describes the task:
### Input:
Return a list of commands that where affected by a recent change
(following a poll() return for the controlling file descriptor).
Each list element is a tuple:
(name, command_path, module_list)
The event queue will be read multiple times and reads continue
until a timeout occurs.
### Response:
def get(self, **params):
"""
Return a list of commands that where affected by a recent change
(following a poll() return for the controlling file descriptor).
Each list element is a tuple:
(name, command_path, module_list)
The event queue will be read multiple times and reads continue
until a timeout occurs.
"""
log = self._getparam('log', self._discard, **params)
changes = {}
paths = self._watch.get(**params)
# On each event, de-invert the tree to produce a
# list of changes by command name.
#
for path in paths:
if path in self.modules:
for name in self.modules[path]:
if name in changes:
if path not in changes[name]:
changes[name].append(path)
else:
changes[name] = [path]
else:
log.warning("Path %r had no matching watch entry", path)
names = list(changes)
log.debug("Change was to %d name%s", len(names), ses(len(names)))
names.sort()
resp = []
for name in names:
resp.append((name, self.names.get(name), changes[name]))
return resp |
def help_center_user_segment_create(self, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/help_center/user_segments#create-user-segment"
api_path = "/api/v2/help_center/user_segments.json"
return self.call(api_path, method="POST", data=data, **kwargs) | https://developer.zendesk.com/rest_api/docs/help_center/user_segments#create-user-segment | Below is the the instruction that describes the task:
### Input:
https://developer.zendesk.com/rest_api/docs/help_center/user_segments#create-user-segment
### Response:
def help_center_user_segment_create(self, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/help_center/user_segments#create-user-segment"
api_path = "/api/v2/help_center/user_segments.json"
return self.call(api_path, method="POST", data=data, **kwargs) |
def numericise(value, empty2zero=False, default_blank="", allow_underscores_in_numeric_literals=False):
"""Returns a value that depends on the input string:
- Float if input can be converted to Float
- Integer if input can be converted to integer
- Zero if the input string is empty and empty2zero flag is set
- The same input string, empty or not, otherwise.
Executable examples:
>>> numericise("faa")
'faa'
>>> numericise("3")
3
>>> numericise("3_2", allow_underscores_in_numeric_literals=False)
'3_2'
>>> numericise("3_2", allow_underscores_in_numeric_literals=True)
'32'
>>> numericise("3.1")
3.1
>>> numericise("", empty2zero=True)
0
>>> numericise("", empty2zero=False)
''
>>> numericise("", default_blank=None)
>>>
>>> numericise("", default_blank="foo")
'foo'
>>> numericise("")
''
>>> numericise(None)
>>>
"""
if value is not None:
if "_" in value and not allow_underscores_in_numeric_literals:
return value
try:
value = int(value)
except ValueError:
try:
value = float(value)
except ValueError:
if value == "":
if empty2zero:
value = 0
else:
value = default_blank
return value | Returns a value that depends on the input string:
- Float if input can be converted to Float
- Integer if input can be converted to integer
- Zero if the input string is empty and empty2zero flag is set
- The same input string, empty or not, otherwise.
Executable examples:
>>> numericise("faa")
'faa'
>>> numericise("3")
3
>>> numericise("3_2", allow_underscores_in_numeric_literals=False)
'3_2'
>>> numericise("3_2", allow_underscores_in_numeric_literals=True)
'32'
>>> numericise("3.1")
3.1
>>> numericise("", empty2zero=True)
0
>>> numericise("", empty2zero=False)
''
>>> numericise("", default_blank=None)
>>>
>>> numericise("", default_blank="foo")
'foo'
>>> numericise("")
''
>>> numericise(None)
>>> | Below is the the instruction that describes the task:
### Input:
Returns a value that depends on the input string:
- Float if input can be converted to Float
- Integer if input can be converted to integer
- Zero if the input string is empty and empty2zero flag is set
- The same input string, empty or not, otherwise.
Executable examples:
>>> numericise("faa")
'faa'
>>> numericise("3")
3
>>> numericise("3_2", allow_underscores_in_numeric_literals=False)
'3_2'
>>> numericise("3_2", allow_underscores_in_numeric_literals=True)
'32'
>>> numericise("3.1")
3.1
>>> numericise("", empty2zero=True)
0
>>> numericise("", empty2zero=False)
''
>>> numericise("", default_blank=None)
>>>
>>> numericise("", default_blank="foo")
'foo'
>>> numericise("")
''
>>> numericise(None)
>>>
### Response:
def numericise(value, empty2zero=False, default_blank="", allow_underscores_in_numeric_literals=False):
"""Returns a value that depends on the input string:
- Float if input can be converted to Float
- Integer if input can be converted to integer
- Zero if the input string is empty and empty2zero flag is set
- The same input string, empty or not, otherwise.
Executable examples:
>>> numericise("faa")
'faa'
>>> numericise("3")
3
>>> numericise("3_2", allow_underscores_in_numeric_literals=False)
'3_2'
>>> numericise("3_2", allow_underscores_in_numeric_literals=True)
'32'
>>> numericise("3.1")
3.1
>>> numericise("", empty2zero=True)
0
>>> numericise("", empty2zero=False)
''
>>> numericise("", default_blank=None)
>>>
>>> numericise("", default_blank="foo")
'foo'
>>> numericise("")
''
>>> numericise(None)
>>>
"""
if value is not None:
if "_" in value and not allow_underscores_in_numeric_literals:
return value
try:
value = int(value)
except ValueError:
try:
value = float(value)
except ValueError:
if value == "":
if empty2zero:
value = 0
else:
value = default_blank
return value |
def on_zijd_mark(self, event):
"""
Get mouse position on double right click find the interpretation in
range of mouse
position then mark that interpretation bad or good
Parameters
----------
event : the wx Mouseevent for that click
Alters
------
current_fit
"""
if not array(self.CART_rot).any():
return
pos = event.GetPosition()
width, height = self.canvas1.get_width_height()
pos[1] = height - pos[1]
xpick_data, ypick_data = pos
xdata_org = list(self.CART_rot[:, 0]) + list(self.CART_rot[:, 0])
ydata_org = list(-1*self.CART_rot[:, 1]) + list(-1*self.CART_rot[:, 2])
data_corrected = self.zijplot.transData.transform(
vstack([xdata_org, ydata_org]).T)
xdata, ydata = data_corrected.T
xdata = list(map(float, xdata))
ydata = list(map(float, ydata))
e = 4e0
index = None
for i, (x, y) in enumerate(zip(xdata, ydata)):
if 0 < sqrt((x-xpick_data)**2. + (y-ypick_data)**2.) < e:
index = i
break
if index != None:
steps = self.Data[self.s]['zijdblock']
if self.Data[self.s]['measurement_flag'][index % len(steps)] == "g":
self.mark_meas_bad(index % len(steps))
else:
self.mark_meas_good(index % len(steps))
pmag.magic_write(os.path.join(
self.WD, "magic_measurements.txt"), self.mag_meas_data, "magic_measurements")
self.recalculate_current_specimen_interpreatations()
if self.ie_open:
self.ie.update_current_fit_data()
self.calculate_high_levels_data()
self.update_selection() | Get mouse position on double right click find the interpretation in
range of mouse
position then mark that interpretation bad or good
Parameters
----------
event : the wx Mouseevent for that click
Alters
------
current_fit | Below is the the instruction that describes the task:
### Input:
Get mouse position on double right click find the interpretation in
range of mouse
position then mark that interpretation bad or good
Parameters
----------
event : the wx Mouseevent for that click
Alters
------
current_fit
### Response:
def on_zijd_mark(self, event):
"""
Get mouse position on double right click find the interpretation in
range of mouse
position then mark that interpretation bad or good
Parameters
----------
event : the wx Mouseevent for that click
Alters
------
current_fit
"""
if not array(self.CART_rot).any():
return
pos = event.GetPosition()
width, height = self.canvas1.get_width_height()
pos[1] = height - pos[1]
xpick_data, ypick_data = pos
xdata_org = list(self.CART_rot[:, 0]) + list(self.CART_rot[:, 0])
ydata_org = list(-1*self.CART_rot[:, 1]) + list(-1*self.CART_rot[:, 2])
data_corrected = self.zijplot.transData.transform(
vstack([xdata_org, ydata_org]).T)
xdata, ydata = data_corrected.T
xdata = list(map(float, xdata))
ydata = list(map(float, ydata))
e = 4e0
index = None
for i, (x, y) in enumerate(zip(xdata, ydata)):
if 0 < sqrt((x-xpick_data)**2. + (y-ypick_data)**2.) < e:
index = i
break
if index != None:
steps = self.Data[self.s]['zijdblock']
if self.Data[self.s]['measurement_flag'][index % len(steps)] == "g":
self.mark_meas_bad(index % len(steps))
else:
self.mark_meas_good(index % len(steps))
pmag.magic_write(os.path.join(
self.WD, "magic_measurements.txt"), self.mag_meas_data, "magic_measurements")
self.recalculate_current_specimen_interpreatations()
if self.ie_open:
self.ie.update_current_fit_data()
self.calculate_high_levels_data()
self.update_selection() |
def word_frequency(word, lang, wordlist='best', minimum=0.):
"""
Get the frequency of `word` in the language with code `lang`, from the
specified `wordlist`.
These wordlists can be specified:
- 'large': a wordlist built from at least 5 sources, containing word
frequencies of 10^-8 and higher
- 'small': a wordlist built from at least 3 sources, containing word
frquencies of 10^-6 and higher
- 'best': uses 'large' if available, and 'small' otherwise
The value returned will always be at least as large as `minimum`.
You could set this value to 10^-8, for example, to return 10^-8 for
unknown words in the 'large' list instead of 0, avoiding a discontinuity.
"""
args = (word, lang, wordlist, minimum)
try:
return _wf_cache[args]
except KeyError:
if len(_wf_cache) >= CACHE_SIZE:
_wf_cache.clear()
_wf_cache[args] = _word_frequency(*args)
return _wf_cache[args] | Get the frequency of `word` in the language with code `lang`, from the
specified `wordlist`.
These wordlists can be specified:
- 'large': a wordlist built from at least 5 sources, containing word
frequencies of 10^-8 and higher
- 'small': a wordlist built from at least 3 sources, containing word
frquencies of 10^-6 and higher
- 'best': uses 'large' if available, and 'small' otherwise
The value returned will always be at least as large as `minimum`.
You could set this value to 10^-8, for example, to return 10^-8 for
unknown words in the 'large' list instead of 0, avoiding a discontinuity. | Below is the the instruction that describes the task:
### Input:
Get the frequency of `word` in the language with code `lang`, from the
specified `wordlist`.
These wordlists can be specified:
- 'large': a wordlist built from at least 5 sources, containing word
frequencies of 10^-8 and higher
- 'small': a wordlist built from at least 3 sources, containing word
frquencies of 10^-6 and higher
- 'best': uses 'large' if available, and 'small' otherwise
The value returned will always be at least as large as `minimum`.
You could set this value to 10^-8, for example, to return 10^-8 for
unknown words in the 'large' list instead of 0, avoiding a discontinuity.
### Response:
def word_frequency(word, lang, wordlist='best', minimum=0.):
"""
Get the frequency of `word` in the language with code `lang`, from the
specified `wordlist`.
These wordlists can be specified:
- 'large': a wordlist built from at least 5 sources, containing word
frequencies of 10^-8 and higher
- 'small': a wordlist built from at least 3 sources, containing word
frquencies of 10^-6 and higher
- 'best': uses 'large' if available, and 'small' otherwise
The value returned will always be at least as large as `minimum`.
You could set this value to 10^-8, for example, to return 10^-8 for
unknown words in the 'large' list instead of 0, avoiding a discontinuity.
"""
args = (word, lang, wordlist, minimum)
try:
return _wf_cache[args]
except KeyError:
if len(_wf_cache) >= CACHE_SIZE:
_wf_cache.clear()
_wf_cache[args] = _word_frequency(*args)
return _wf_cache[args] |
def load_objects(self, addr, num_bytes, ret_on_segv=False):
"""
Load memory objects from paged memory.
:param addr: Address to start loading.
:param num_bytes: Number of bytes to load.
:param bool ret_on_segv: True if you want load_bytes to return directly when a SIGSEV is triggered, otherwise
a SimSegfaultError will be raised.
:return: list of tuples of (addr, memory_object)
:rtype: tuple
"""
result = []
end = addr + num_bytes
for page_addr in self._containing_pages(addr, end):
try:
#print "Getting page %x" % (page_addr // self._page_size)
page = self._get_page(page_addr // self._page_size)
#print "... got it"
except KeyError:
#print "... missing"
#print "... SEGV"
# missing page
if self.allow_segv:
if ret_on_segv:
break
raise SimSegfaultError(addr, 'read-miss')
else:
continue
if self.allow_segv and not page.concrete_permissions & DbgPage.PROT_READ:
#print "... SEGV"
if ret_on_segv:
break
raise SimSegfaultError(addr, 'non-readable')
result.extend(page.load_slice(self.state, addr, end))
return result | Load memory objects from paged memory.
:param addr: Address to start loading.
:param num_bytes: Number of bytes to load.
:param bool ret_on_segv: True if you want load_bytes to return directly when a SIGSEV is triggered, otherwise
a SimSegfaultError will be raised.
:return: list of tuples of (addr, memory_object)
:rtype: tuple | Below is the the instruction that describes the task:
### Input:
Load memory objects from paged memory.
:param addr: Address to start loading.
:param num_bytes: Number of bytes to load.
:param bool ret_on_segv: True if you want load_bytes to return directly when a SIGSEV is triggered, otherwise
a SimSegfaultError will be raised.
:return: list of tuples of (addr, memory_object)
:rtype: tuple
### Response:
def load_objects(self, addr, num_bytes, ret_on_segv=False):
"""
Load memory objects from paged memory.
:param addr: Address to start loading.
:param num_bytes: Number of bytes to load.
:param bool ret_on_segv: True if you want load_bytes to return directly when a SIGSEV is triggered, otherwise
a SimSegfaultError will be raised.
:return: list of tuples of (addr, memory_object)
:rtype: tuple
"""
result = []
end = addr + num_bytes
for page_addr in self._containing_pages(addr, end):
try:
#print "Getting page %x" % (page_addr // self._page_size)
page = self._get_page(page_addr // self._page_size)
#print "... got it"
except KeyError:
#print "... missing"
#print "... SEGV"
# missing page
if self.allow_segv:
if ret_on_segv:
break
raise SimSegfaultError(addr, 'read-miss')
else:
continue
if self.allow_segv and not page.concrete_permissions & DbgPage.PROT_READ:
#print "... SEGV"
if ret_on_segv:
break
raise SimSegfaultError(addr, 'non-readable')
result.extend(page.load_slice(self.state, addr, end))
return result |
def fromJSON(jsonValue):
"""returns a featureset from a JSON string"""
jd = json.loads(jsonValue)
features = []
if 'fields' in jd:
fields = jd['fields']
else:
fields = {'fields':[]}
if 'features' in jd:
for feat in jd['features']:
wkid = None
spatialReference =None
if 'spatialReference' in jd:
spatialReference = jd['spatialReference']
if 'wkid' in jd['spatialReference']:
wkid = jd['spatialReference']['wkid']
elif 'latestWkid' in jd['spatialReference']: # kept for compatibility
wkid = jd['spatialReference']['latestWkid']
features.append(Feature(json_string=feat, wkid=wkid, spatialReference=spatialReference))
return FeatureSet(fields,
features,
hasZ=jd['hasZ'] if 'hasZ' in jd else False,
hasM=jd['hasM'] if 'hasM' in jd else False,
geometryType=jd['geometryType'] if 'geometryType' in jd else None,
objectIdFieldName=jd['objectIdFieldName'] if 'objectIdFieldName' in jd else None,
globalIdFieldName=jd['globalIdFieldName'] if 'globalIdFieldName' in jd else None,
displayFieldName=jd['displayFieldName'] if 'displayFieldName' in jd else None,
spatialReference=jd['spatialReference'] if 'spatialReference' in jd else None) | returns a featureset from a JSON string | Below is the the instruction that describes the task:
### Input:
returns a featureset from a JSON string
### Response:
def fromJSON(jsonValue):
"""returns a featureset from a JSON string"""
jd = json.loads(jsonValue)
features = []
if 'fields' in jd:
fields = jd['fields']
else:
fields = {'fields':[]}
if 'features' in jd:
for feat in jd['features']:
wkid = None
spatialReference =None
if 'spatialReference' in jd:
spatialReference = jd['spatialReference']
if 'wkid' in jd['spatialReference']:
wkid = jd['spatialReference']['wkid']
elif 'latestWkid' in jd['spatialReference']: # kept for compatibility
wkid = jd['spatialReference']['latestWkid']
features.append(Feature(json_string=feat, wkid=wkid, spatialReference=spatialReference))
return FeatureSet(fields,
features,
hasZ=jd['hasZ'] if 'hasZ' in jd else False,
hasM=jd['hasM'] if 'hasM' in jd else False,
geometryType=jd['geometryType'] if 'geometryType' in jd else None,
objectIdFieldName=jd['objectIdFieldName'] if 'objectIdFieldName' in jd else None,
globalIdFieldName=jd['globalIdFieldName'] if 'globalIdFieldName' in jd else None,
displayFieldName=jd['displayFieldName'] if 'displayFieldName' in jd else None,
spatialReference=jd['spatialReference'] if 'spatialReference' in jd else None) |
def _get_ln_a_n_max(self, C, n_sites, idx, rup):
"""
Defines the rock site amplification defined in equations 10a and 10b
"""
ln_a_n_max = C["lnSC1AM"] * np.ones(n_sites)
for i in [2, 3, 4]:
if np.any(idx[i]):
ln_a_n_max[idx[i]] += C["S{:g}".format(i)]
return ln_a_n_max | Defines the rock site amplification defined in equations 10a and 10b | Below is the the instruction that describes the task:
### Input:
Defines the rock site amplification defined in equations 10a and 10b
### Response:
def _get_ln_a_n_max(self, C, n_sites, idx, rup):
"""
Defines the rock site amplification defined in equations 10a and 10b
"""
ln_a_n_max = C["lnSC1AM"] * np.ones(n_sites)
for i in [2, 3, 4]:
if np.any(idx[i]):
ln_a_n_max[idx[i]] += C["S{:g}".format(i)]
return ln_a_n_max |
def calc_contours(data, num_contours):
"""Get sets of contour points for numpy array `data`.
`num_contours` specifies the number (int) of contours to make.
Returns a list of numpy arrays of points--each array makes a polygon
if plotted as such.
"""
mn = np.nanmean(data)
top = np.nanmax(data)
levels = np.linspace(mn, top, num_contours)
return get_contours(data, levels) | Get sets of contour points for numpy array `data`.
`num_contours` specifies the number (int) of contours to make.
Returns a list of numpy arrays of points--each array makes a polygon
if plotted as such. | Below is the the instruction that describes the task:
### Input:
Get sets of contour points for numpy array `data`.
`num_contours` specifies the number (int) of contours to make.
Returns a list of numpy arrays of points--each array makes a polygon
if plotted as such.
### Response:
def calc_contours(data, num_contours):
"""Get sets of contour points for numpy array `data`.
`num_contours` specifies the number (int) of contours to make.
Returns a list of numpy arrays of points--each array makes a polygon
if plotted as such.
"""
mn = np.nanmean(data)
top = np.nanmax(data)
levels = np.linspace(mn, top, num_contours)
return get_contours(data, levels) |
def acquire_direct(self, address):
""" Acquire a connection to a given address from the pool.
The address supplied should always be an IP address, not
a host name.
This method is thread safe.
"""
if self.closed():
raise ServiceUnavailable("Connection pool closed")
with self.lock:
try:
connections = self.connections[address]
except KeyError:
connections = self.connections[address] = deque()
connection_acquisition_start_timestamp = perf_counter()
while True:
# try to find a free connection in pool
for connection in list(connections):
if connection.closed() or connection.defunct() or connection.timedout():
connections.remove(connection)
continue
if not connection.in_use:
connection.in_use = True
return connection
# all connections in pool are in-use
infinite_connection_pool = (self._max_connection_pool_size < 0 or
self._max_connection_pool_size == float("inf"))
can_create_new_connection = infinite_connection_pool or len(connections) < self._max_connection_pool_size
if can_create_new_connection:
try:
connection = self.connector(address)
except ServiceUnavailable:
self.remove(address)
raise
else:
connection.pool = self
connection.in_use = True
connections.append(connection)
return connection
# failed to obtain a connection from pool because the pool is full and no free connection in the pool
span_timeout = self._connection_acquisition_timeout - (perf_counter() - connection_acquisition_start_timestamp)
if span_timeout > 0:
self.cond.wait(span_timeout)
# if timed out, then we throw error. This time computation is needed, as with python 2.7, we cannot
# tell if the condition is notified or timed out when we come to this line
if self._connection_acquisition_timeout <= (perf_counter() - connection_acquisition_start_timestamp):
raise ClientError("Failed to obtain a connection from pool within {!r}s".format(
self._connection_acquisition_timeout))
else:
raise ClientError("Failed to obtain a connection from pool within {!r}s".format(self._connection_acquisition_timeout)) | Acquire a connection to a given address from the pool.
The address supplied should always be an IP address, not
a host name.
This method is thread safe. | Below is the the instruction that describes the task:
### Input:
Acquire a connection to a given address from the pool.
The address supplied should always be an IP address, not
a host name.
This method is thread safe.
### Response:
def acquire_direct(self, address):
""" Acquire a connection to a given address from the pool.
The address supplied should always be an IP address, not
a host name.
This method is thread safe.
"""
if self.closed():
raise ServiceUnavailable("Connection pool closed")
with self.lock:
try:
connections = self.connections[address]
except KeyError:
connections = self.connections[address] = deque()
connection_acquisition_start_timestamp = perf_counter()
while True:
# try to find a free connection in pool
for connection in list(connections):
if connection.closed() or connection.defunct() or connection.timedout():
connections.remove(connection)
continue
if not connection.in_use:
connection.in_use = True
return connection
# all connections in pool are in-use
infinite_connection_pool = (self._max_connection_pool_size < 0 or
self._max_connection_pool_size == float("inf"))
can_create_new_connection = infinite_connection_pool or len(connections) < self._max_connection_pool_size
if can_create_new_connection:
try:
connection = self.connector(address)
except ServiceUnavailable:
self.remove(address)
raise
else:
connection.pool = self
connection.in_use = True
connections.append(connection)
return connection
# failed to obtain a connection from pool because the pool is full and no free connection in the pool
span_timeout = self._connection_acquisition_timeout - (perf_counter() - connection_acquisition_start_timestamp)
if span_timeout > 0:
self.cond.wait(span_timeout)
# if timed out, then we throw error. This time computation is needed, as with python 2.7, we cannot
# tell if the condition is notified or timed out when we come to this line
if self._connection_acquisition_timeout <= (perf_counter() - connection_acquisition_start_timestamp):
raise ClientError("Failed to obtain a connection from pool within {!r}s".format(
self._connection_acquisition_timeout))
else:
raise ClientError("Failed to obtain a connection from pool within {!r}s".format(self._connection_acquisition_timeout)) |
def sync_client(self):
"""Synchronous OAuth 2.0 Bearer client"""
if not self._sync_client:
self._sync_client = AlfSyncClient(
token_endpoint=self.config.get('OAUTH_TOKEN_ENDPOINT'),
client_id=self.config.get('OAUTH_CLIENT_ID'),
client_secret=self.config.get('OAUTH_CLIENT_SECRET')
)
return self._sync_client | Synchronous OAuth 2.0 Bearer client | Below is the the instruction that describes the task:
### Input:
Synchronous OAuth 2.0 Bearer client
### Response:
def sync_client(self):
"""Synchronous OAuth 2.0 Bearer client"""
if not self._sync_client:
self._sync_client = AlfSyncClient(
token_endpoint=self.config.get('OAUTH_TOKEN_ENDPOINT'),
client_id=self.config.get('OAUTH_CLIENT_ID'),
client_secret=self.config.get('OAUTH_CLIENT_SECRET')
)
return self._sync_client |
def TimeFromTicks(ticks, tz=None):
"""Construct a DB-API time value from the given ticks value.
:type ticks: float
:param ticks:
a number of seconds since the epoch; see the documentation of the
standard Python time module for details.
:type tz: :class:`datetime.tzinfo`
:param tz: (Optional) time zone to use for conversion
:rtype: :class:`datetime.time`
:returns: time represented by ticks.
"""
dt = datetime.datetime.fromtimestamp(ticks, tz=tz)
return dt.timetz() | Construct a DB-API time value from the given ticks value.
:type ticks: float
:param ticks:
a number of seconds since the epoch; see the documentation of the
standard Python time module for details.
:type tz: :class:`datetime.tzinfo`
:param tz: (Optional) time zone to use for conversion
:rtype: :class:`datetime.time`
:returns: time represented by ticks. | Below is the the instruction that describes the task:
### Input:
Construct a DB-API time value from the given ticks value.
:type ticks: float
:param ticks:
a number of seconds since the epoch; see the documentation of the
standard Python time module for details.
:type tz: :class:`datetime.tzinfo`
:param tz: (Optional) time zone to use for conversion
:rtype: :class:`datetime.time`
:returns: time represented by ticks.
### Response:
def TimeFromTicks(ticks, tz=None):
"""Construct a DB-API time value from the given ticks value.
:type ticks: float
:param ticks:
a number of seconds since the epoch; see the documentation of the
standard Python time module for details.
:type tz: :class:`datetime.tzinfo`
:param tz: (Optional) time zone to use for conversion
:rtype: :class:`datetime.time`
:returns: time represented by ticks.
"""
dt = datetime.datetime.fromtimestamp(ticks, tz=tz)
return dt.timetz() |
def open_file(self, store=current_store, use_seek=False):
"""The shorthand of :meth:`~Image.open_file()` for
the :attr:`original`.
:param store: the storage which contains the image files
:data:`~sqlalchemy_imageattach.context.current_store`
by default
:type store: :class:`~sqlalchemy_imageattach.store.Store`
:param use_seek: whether the file should seekable.
if :const:`True` it maybe buffered in the memory.
default is :const:`False`
:type use_seek: :class:`bool`
:returns: the file-like object of the image, which is a context
manager (plus, also seekable only if ``use_seek``
is :const:`True`)
:rtype: :class:`file`,
:class:`~sqlalchemy_imageattach.file.FileProxy`,
file-like object
"""
original = self.require_original()
return original.open_file(store, use_seek) | The shorthand of :meth:`~Image.open_file()` for
the :attr:`original`.
:param store: the storage which contains the image files
:data:`~sqlalchemy_imageattach.context.current_store`
by default
:type store: :class:`~sqlalchemy_imageattach.store.Store`
:param use_seek: whether the file should seekable.
if :const:`True` it maybe buffered in the memory.
default is :const:`False`
:type use_seek: :class:`bool`
:returns: the file-like object of the image, which is a context
manager (plus, also seekable only if ``use_seek``
is :const:`True`)
:rtype: :class:`file`,
:class:`~sqlalchemy_imageattach.file.FileProxy`,
file-like object | Below is the the instruction that describes the task:
### Input:
The shorthand of :meth:`~Image.open_file()` for
the :attr:`original`.
:param store: the storage which contains the image files
:data:`~sqlalchemy_imageattach.context.current_store`
by default
:type store: :class:`~sqlalchemy_imageattach.store.Store`
:param use_seek: whether the file should seekable.
if :const:`True` it maybe buffered in the memory.
default is :const:`False`
:type use_seek: :class:`bool`
:returns: the file-like object of the image, which is a context
manager (plus, also seekable only if ``use_seek``
is :const:`True`)
:rtype: :class:`file`,
:class:`~sqlalchemy_imageattach.file.FileProxy`,
file-like object
### Response:
def open_file(self, store=current_store, use_seek=False):
"""The shorthand of :meth:`~Image.open_file()` for
the :attr:`original`.
:param store: the storage which contains the image files
:data:`~sqlalchemy_imageattach.context.current_store`
by default
:type store: :class:`~sqlalchemy_imageattach.store.Store`
:param use_seek: whether the file should seekable.
if :const:`True` it maybe buffered in the memory.
default is :const:`False`
:type use_seek: :class:`bool`
:returns: the file-like object of the image, which is a context
manager (plus, also seekable only if ``use_seek``
is :const:`True`)
:rtype: :class:`file`,
:class:`~sqlalchemy_imageattach.file.FileProxy`,
file-like object
"""
original = self.require_original()
return original.open_file(store, use_seek) |
def unpack_rsp(cls, rsp_pb):
"""Convert from PLS response to user response"""
if rsp_pb.retType != RET_OK:
return RET_ERROR, rsp_pb.retMsg, None
order_id = str(rsp_pb.s2c.orderID)
modify_order_list = [{
'trd_env': TRADE.REV_TRD_ENV_MAP[rsp_pb.s2c.header.trdEnv],
'order_id': order_id
}]
return RET_OK, "", modify_order_list | Convert from PLS response to user response | Below is the the instruction that describes the task:
### Input:
Convert from PLS response to user response
### Response:
def unpack_rsp(cls, rsp_pb):
"""Convert from PLS response to user response"""
if rsp_pb.retType != RET_OK:
return RET_ERROR, rsp_pb.retMsg, None
order_id = str(rsp_pb.s2c.orderID)
modify_order_list = [{
'trd_env': TRADE.REV_TRD_ENV_MAP[rsp_pb.s2c.header.trdEnv],
'order_id': order_id
}]
return RET_OK, "", modify_order_list |
def com_google_fonts_check_name_trailing_spaces(ttFont):
"""Name table records must not have trailing spaces."""
failed = False
for name_record in ttFont['name'].names:
name_string = name_record.toUnicode()
if name_string != name_string.strip():
failed = True
name_key = tuple([name_record.platformID, name_record.platEncID,
name_record.langID, name_record.nameID])
shortened_str = name_record.toUnicode()
if len(shortened_str) > 20:
shortened_str = shortened_str[:10] + "[...]" + shortened_str[-10:]
yield FAIL, (f"Name table record with key = {name_key} has"
" trailing spaces that must be removed:"
f" '{shortened_str}'")
if not failed:
yield PASS, ("No trailing spaces on name table entries.") | Name table records must not have trailing spaces. | Below is the the instruction that describes the task:
### Input:
Name table records must not have trailing spaces.
### Response:
def com_google_fonts_check_name_trailing_spaces(ttFont):
"""Name table records must not have trailing spaces."""
failed = False
for name_record in ttFont['name'].names:
name_string = name_record.toUnicode()
if name_string != name_string.strip():
failed = True
name_key = tuple([name_record.platformID, name_record.platEncID,
name_record.langID, name_record.nameID])
shortened_str = name_record.toUnicode()
if len(shortened_str) > 20:
shortened_str = shortened_str[:10] + "[...]" + shortened_str[-10:]
yield FAIL, (f"Name table record with key = {name_key} has"
" trailing spaces that must be removed:"
f" '{shortened_str}'")
if not failed:
yield PASS, ("No trailing spaces on name table entries.") |
def DbGetClassPropertyList(self, argin):
""" Get property list for a given Tango class with a specified filter
:param argin: The filter
:type: tango.DevString
:return: Property name list
:rtype: tango.DevVarStringArray """
self._log.debug("In DbGetClassPropertyList()")
if not argin:
argin = "%"
else:
argin = replace_wildcard(argin)
return self.db.get_class_property_list(argin) | Get property list for a given Tango class with a specified filter
:param argin: The filter
:type: tango.DevString
:return: Property name list
:rtype: tango.DevVarStringArray | Below is the the instruction that describes the task:
### Input:
Get property list for a given Tango class with a specified filter
:param argin: The filter
:type: tango.DevString
:return: Property name list
:rtype: tango.DevVarStringArray
### Response:
def DbGetClassPropertyList(self, argin):
""" Get property list for a given Tango class with a specified filter
:param argin: The filter
:type: tango.DevString
:return: Property name list
:rtype: tango.DevVarStringArray """
self._log.debug("In DbGetClassPropertyList()")
if not argin:
argin = "%"
else:
argin = replace_wildcard(argin)
return self.db.get_class_property_list(argin) |
def _to_string(self):
"""Implemented a function for __str__ and __repr__ to use, but
which prevents infinite recursion when migrating to Python 3"""
if self.sections:
start = "/" if self.bound_start else "**/"
sections = "/**/".join(str(section) for section in self.sections)
end = "" if self.bound_end else "/**"
else:
start = ""
sections = ""
end = "" if self.bound_end else "**"
return "{0}{1}{2}/{3}".format(start, sections, end, str(self.file_pattern)) | Implemented a function for __str__ and __repr__ to use, but
which prevents infinite recursion when migrating to Python 3 | Below is the the instruction that describes the task:
### Input:
Implemented a function for __str__ and __repr__ to use, but
which prevents infinite recursion when migrating to Python 3
### Response:
def _to_string(self):
"""Implemented a function for __str__ and __repr__ to use, but
which prevents infinite recursion when migrating to Python 3"""
if self.sections:
start = "/" if self.bound_start else "**/"
sections = "/**/".join(str(section) for section in self.sections)
end = "" if self.bound_end else "/**"
else:
start = ""
sections = ""
end = "" if self.bound_end else "**"
return "{0}{1}{2}/{3}".format(start, sections, end, str(self.file_pattern)) |
def _remove_string_from_commastring(self, field, string):
# type: (str, str) -> bool
"""Remove a string from a comma separated list of strings
Args:
field (str): Field containing comma separated list
string (str): String to remove
Returns:
bool: True if string removed or False if not
"""
commastring = self.data.get(field, '')
if string in commastring:
self.data[field] = commastring.replace(string, '')
return True
return False | Remove a string from a comma separated list of strings
Args:
field (str): Field containing comma separated list
string (str): String to remove
Returns:
bool: True if string removed or False if not | Below is the the instruction that describes the task:
### Input:
Remove a string from a comma separated list of strings
Args:
field (str): Field containing comma separated list
string (str): String to remove
Returns:
bool: True if string removed or False if not
### Response:
def _remove_string_from_commastring(self, field, string):
# type: (str, str) -> bool
"""Remove a string from a comma separated list of strings
Args:
field (str): Field containing comma separated list
string (str): String to remove
Returns:
bool: True if string removed or False if not
"""
commastring = self.data.get(field, '')
if string in commastring:
self.data[field] = commastring.replace(string, '')
return True
return False |
def _retstr(ins):
""" Returns from a procedure / function a string pointer (16bits) value
"""
tmp, output = _str_oper(ins.quad[1], no_exaf=True)
if not tmp:
output.append('call __LOADSTR')
REQUIRES.add('loadstr.asm')
output.append('#pragma opt require hl')
output.append('jp %s' % str(ins.quad[2]))
return output | Returns from a procedure / function a string pointer (16bits) value | Below is the the instruction that describes the task:
### Input:
Returns from a procedure / function a string pointer (16bits) value
### Response:
def _retstr(ins):
""" Returns from a procedure / function a string pointer (16bits) value
"""
tmp, output = _str_oper(ins.quad[1], no_exaf=True)
if not tmp:
output.append('call __LOADSTR')
REQUIRES.add('loadstr.asm')
output.append('#pragma opt require hl')
output.append('jp %s' % str(ins.quad[2]))
return output |
def _write(self, session, openFile, replaceParamFile):
"""
Storm Pipe Network File Write to File Method
"""
# Retrieve Connection objects and write to file
connections = self.connections
self._writeConnections(connections=connections,
fileObject=openFile)
# Retrieve SuperJunction objects and write to file
sjuncs = self.superJunctions
self._writeSuperJunctions(superJunctions=sjuncs,
fileObject=openFile)
# Retrieve SuperLink objects and write to file
slinks = self.superLinks
self._writeSuperLinks(superLinks=slinks,
fileObject=openFile) | Storm Pipe Network File Write to File Method | Below is the the instruction that describes the task:
### Input:
Storm Pipe Network File Write to File Method
### Response:
def _write(self, session, openFile, replaceParamFile):
"""
Storm Pipe Network File Write to File Method
"""
# Retrieve Connection objects and write to file
connections = self.connections
self._writeConnections(connections=connections,
fileObject=openFile)
# Retrieve SuperJunction objects and write to file
sjuncs = self.superJunctions
self._writeSuperJunctions(superJunctions=sjuncs,
fileObject=openFile)
# Retrieve SuperLink objects and write to file
slinks = self.superLinks
self._writeSuperLinks(superLinks=slinks,
fileObject=openFile) |
def _create_pipe(self):
"""
Creates a new pipe and returns the child end of the connection.
To request an account from the pipe, use::
pipe = queue._create_pipe()
# Let the account manager choose an account.
pipe.send(('acquire-account-for-host', host))
account = pipe.recv()
...
pipe.send(('release-account', account.id()))
# Or acquire a specific account.
pipe.send(('acquire-account', account.id()))
account = pipe.recv()
...
pipe.send(('release-account', account.id()))
pipe.close()
"""
child = _PipeHandler(self.account_manager)
self.pipe_handlers[id(child)] = child
child.start()
return child.to_parent | Creates a new pipe and returns the child end of the connection.
To request an account from the pipe, use::
pipe = queue._create_pipe()
# Let the account manager choose an account.
pipe.send(('acquire-account-for-host', host))
account = pipe.recv()
...
pipe.send(('release-account', account.id()))
# Or acquire a specific account.
pipe.send(('acquire-account', account.id()))
account = pipe.recv()
...
pipe.send(('release-account', account.id()))
pipe.close() | Below is the the instruction that describes the task:
### Input:
Creates a new pipe and returns the child end of the connection.
To request an account from the pipe, use::
pipe = queue._create_pipe()
# Let the account manager choose an account.
pipe.send(('acquire-account-for-host', host))
account = pipe.recv()
...
pipe.send(('release-account', account.id()))
# Or acquire a specific account.
pipe.send(('acquire-account', account.id()))
account = pipe.recv()
...
pipe.send(('release-account', account.id()))
pipe.close()
### Response:
def _create_pipe(self):
"""
Creates a new pipe and returns the child end of the connection.
To request an account from the pipe, use::
pipe = queue._create_pipe()
# Let the account manager choose an account.
pipe.send(('acquire-account-for-host', host))
account = pipe.recv()
...
pipe.send(('release-account', account.id()))
# Or acquire a specific account.
pipe.send(('acquire-account', account.id()))
account = pipe.recv()
...
pipe.send(('release-account', account.id()))
pipe.close()
"""
child = _PipeHandler(self.account_manager)
self.pipe_handlers[id(child)] = child
child.start()
return child.to_parent |
def compute_samples(self):
""" Sample from a Normal distribution with inferred mu and std """
eps = tf.random_normal([self.batch_size, self.eq_samples, self.iw_samples, self.num_latent])
z = tf.reshape(eps * self.std + self.mu, [-1, self.num_latent])
return z | Sample from a Normal distribution with inferred mu and std | Below is the the instruction that describes the task:
### Input:
Sample from a Normal distribution with inferred mu and std
### Response:
def compute_samples(self):
""" Sample from a Normal distribution with inferred mu and std """
eps = tf.random_normal([self.batch_size, self.eq_samples, self.iw_samples, self.num_latent])
z = tf.reshape(eps * self.std + self.mu, [-1, self.num_latent])
return z |
def oauth_token_show(self, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/oauth_tokens#show-token"
api_path = "/api/v2/oauth/tokens/{id}.json"
api_path = api_path.format(id=id)
return self.call(api_path, **kwargs) | https://developer.zendesk.com/rest_api/docs/core/oauth_tokens#show-token | Below is the the instruction that describes the task:
### Input:
https://developer.zendesk.com/rest_api/docs/core/oauth_tokens#show-token
### Response:
def oauth_token_show(self, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/oauth_tokens#show-token"
api_path = "/api/v2/oauth/tokens/{id}.json"
api_path = api_path.format(id=id)
return self.call(api_path, **kwargs) |
def _authenticate_ssh(org):
"""Try authenticating via ssh, if succesful yields a User, otherwise raises Error."""
# Try to get username from git config
username = os.environ.get(f"{org.upper()}_USERNAME")
# Require ssh-agent
child = pexpect.spawn("ssh -T [email protected]", encoding="utf8")
# GitHub prints 'Hi {username}!...' when attempting to get shell access
i = child.expect(["Hi (.+)! You've successfully authenticated",
"Enter passphrase for key",
"Permission denied",
"Are you sure you want to continue connecting"])
child.close()
if i == 0:
if username is None:
username = child.match.groups()[0]
else:
return None
return User(name=username,
repo=f"[email protected]:{org}/{username}") | Try authenticating via ssh, if succesful yields a User, otherwise raises Error. | Below is the the instruction that describes the task:
### Input:
Try authenticating via ssh, if succesful yields a User, otherwise raises Error.
### Response:
def _authenticate_ssh(org):
"""Try authenticating via ssh, if succesful yields a User, otherwise raises Error."""
# Try to get username from git config
username = os.environ.get(f"{org.upper()}_USERNAME")
# Require ssh-agent
child = pexpect.spawn("ssh -T [email protected]", encoding="utf8")
# GitHub prints 'Hi {username}!...' when attempting to get shell access
i = child.expect(["Hi (.+)! You've successfully authenticated",
"Enter passphrase for key",
"Permission denied",
"Are you sure you want to continue connecting"])
child.close()
if i == 0:
if username is None:
username = child.match.groups()[0]
else:
return None
return User(name=username,
repo=f"[email protected]:{org}/{username}") |
def d(msg, *args, **kwargs):
'''
log a message at debug level;
'''
return logging.log(DEBUG, msg, *args, **kwargs) | log a message at debug level; | Below is the the instruction that describes the task:
### Input:
log a message at debug level;
### Response:
def d(msg, *args, **kwargs):
'''
log a message at debug level;
'''
return logging.log(DEBUG, msg, *args, **kwargs) |
def appdata_roaming_dir():
"""Returns the roaming AppData directory for the installed ArcGIS Desktop."""
install = arcpy.GetInstallInfo('desktop')
app_data = arcpy.GetSystemEnvironment("APPDATA")
product_dir = ''.join((install['ProductName'], major_version()))
return os.path.join(app_data, 'ESRI', product_dir) | Returns the roaming AppData directory for the installed ArcGIS Desktop. | Below is the the instruction that describes the task:
### Input:
Returns the roaming AppData directory for the installed ArcGIS Desktop.
### Response:
def appdata_roaming_dir():
"""Returns the roaming AppData directory for the installed ArcGIS Desktop."""
install = arcpy.GetInstallInfo('desktop')
app_data = arcpy.GetSystemEnvironment("APPDATA")
product_dir = ''.join((install['ProductName'], major_version()))
return os.path.join(app_data, 'ESRI', product_dir) |
def terminate(self):
"""Terminate the child process.
It is not an error to call this method when the child has already exited.
"""
try:
self.send_signal(signal.SIGTERM)
except pyuv.error.ProcessError as e:
if e.args[0] != pyuv.errno.UV_ESRCH:
raise | Terminate the child process.
It is not an error to call this method when the child has already exited. | Below is the the instruction that describes the task:
### Input:
Terminate the child process.
It is not an error to call this method when the child has already exited.
### Response:
def terminate(self):
"""Terminate the child process.
It is not an error to call this method when the child has already exited.
"""
try:
self.send_signal(signal.SIGTERM)
except pyuv.error.ProcessError as e:
if e.args[0] != pyuv.errno.UV_ESRCH:
raise |
def all_status():
'''
Return a composite of all status data and info for this minion.
Warning: There is a LOT here!
CLI Example:
.. code-block:: bash
salt '*' status.all_status
'''
return {'cpuinfo': cpuinfo(),
'cpustats': cpustats(),
'diskstats': diskstats(),
'diskusage': diskusage(),
'loadavg': loadavg(),
'meminfo': meminfo(),
'netdev': netdev(),
'netstats': netstats(),
'uptime': uptime(),
'vmstats': vmstats(),
'w': w()} | Return a composite of all status data and info for this minion.
Warning: There is a LOT here!
CLI Example:
.. code-block:: bash
salt '*' status.all_status | Below is the the instruction that describes the task:
### Input:
Return a composite of all status data and info for this minion.
Warning: There is a LOT here!
CLI Example:
.. code-block:: bash
salt '*' status.all_status
### Response:
def all_status():
'''
Return a composite of all status data and info for this minion.
Warning: There is a LOT here!
CLI Example:
.. code-block:: bash
salt '*' status.all_status
'''
return {'cpuinfo': cpuinfo(),
'cpustats': cpustats(),
'diskstats': diskstats(),
'diskusage': diskusage(),
'loadavg': loadavg(),
'meminfo': meminfo(),
'netdev': netdev(),
'netstats': netstats(),
'uptime': uptime(),
'vmstats': vmstats(),
'w': w()} |
def get_file_list(file_dir, regex=''):
"""
Creates a list of files that match the search_regex within file_dir.
The list of files will have file_dir as path prefix.
Parameters
----------
@param file_dir:
@param search_regex:
Returns:
--------
List of paths to files that match the search_regex
"""
file_list = os.listdir(file_dir)
file_list.sort()
if regex:
file_list = search_list(file_list, regex)
file_list = [op.join(file_dir, fname) for fname in file_list]
return file_list | Creates a list of files that match the search_regex within file_dir.
The list of files will have file_dir as path prefix.
Parameters
----------
@param file_dir:
@param search_regex:
Returns:
--------
List of paths to files that match the search_regex | Below is the the instruction that describes the task:
### Input:
Creates a list of files that match the search_regex within file_dir.
The list of files will have file_dir as path prefix.
Parameters
----------
@param file_dir:
@param search_regex:
Returns:
--------
List of paths to files that match the search_regex
### Response:
def get_file_list(file_dir, regex=''):
"""
Creates a list of files that match the search_regex within file_dir.
The list of files will have file_dir as path prefix.
Parameters
----------
@param file_dir:
@param search_regex:
Returns:
--------
List of paths to files that match the search_regex
"""
file_list = os.listdir(file_dir)
file_list.sort()
if regex:
file_list = search_list(file_list, regex)
file_list = [op.join(file_dir, fname) for fname in file_list]
return file_list |
def drop(self, filter_func=None, stash='active'):
"""
Drops states from a stash. This is an alias for move(), with defaults for the stashes.
:param filter_func: Drop states that match this filter. Should be a function that takes
a state and returns True or False. (default: drop all states)
:param stash: Drop matching states from this stash. (default: 'active')
:returns: The simulation manager, for chaining.
:rtype: SimulationManager
"""
return self.move(stash, self.DROP, filter_func=filter_func) | Drops states from a stash. This is an alias for move(), with defaults for the stashes.
:param filter_func: Drop states that match this filter. Should be a function that takes
a state and returns True or False. (default: drop all states)
:param stash: Drop matching states from this stash. (default: 'active')
:returns: The simulation manager, for chaining.
:rtype: SimulationManager | Below is the the instruction that describes the task:
### Input:
Drops states from a stash. This is an alias for move(), with defaults for the stashes.
:param filter_func: Drop states that match this filter. Should be a function that takes
a state and returns True or False. (default: drop all states)
:param stash: Drop matching states from this stash. (default: 'active')
:returns: The simulation manager, for chaining.
:rtype: SimulationManager
### Response:
def drop(self, filter_func=None, stash='active'):
"""
Drops states from a stash. This is an alias for move(), with defaults for the stashes.
:param filter_func: Drop states that match this filter. Should be a function that takes
a state and returns True or False. (default: drop all states)
:param stash: Drop matching states from this stash. (default: 'active')
:returns: The simulation manager, for chaining.
:rtype: SimulationManager
"""
return self.move(stash, self.DROP, filter_func=filter_func) |
def plot_data():
'''Plot sample data up with the fancy colormaps.
'''
var = ['temp', 'oxygen', 'salinity', 'fluorescence-ECO', 'density', 'PAR', 'turbidity', 'fluorescence-CDOM']
# colorbar limits for each property
lims = np.array([[26, 33], [0, 10], [0, 36], [0, 6], [1005, 1025], [0, 0.6], [0, 2], [0, 9]]) # reasonable values
# lims = np.array([[20,36], [26,33], [1.5,5.6], [0,4], [0,9], [0,1.5]]) # values to show colormaps
for fname in fnames:
fig, axes = plt.subplots(nrows=4, ncols=2)
fig.set_size_inches(20, 10)
fig.subplots_adjust(top=0.95, bottom=0.01, left=0.2, right=0.99, wspace=0.0, hspace=0.07)
i = 0
for ax, Var, cmap in zip(axes.flat, var, cmaps): # loop through data to plot up
# get variable data
lat, lon, z, data = test.read(Var, fname)
map1 = ax.scatter(lat, -z, c=data, cmap=cmap, s=10, linewidths=0., vmin=lims[i, 0], vmax=lims[i, 1])
# no stupid offset
y_formatter = mpl.ticker.ScalarFormatter(useOffset=False)
ax.xaxis.set_major_formatter(y_formatter)
if i == 6:
ax.set_xlabel('Latitude [degrees]')
ax.set_ylabel('Depth [m]')
else:
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_ylim(-z.max(), 0)
ax.set_xlim(lat.min(), lat.max())
cb = plt.colorbar(map1, ax=ax, pad=0.02)
cb.set_label(cmap.name + ' [' + '$' + cmap.units + '$]')
i += 1
fig.savefig('figures/' + fname.split('.')[0] + '.png', bbox_inches='tight') | Plot sample data up with the fancy colormaps. | Below is the the instruction that describes the task:
### Input:
Plot sample data up with the fancy colormaps.
### Response:
def plot_data():
'''Plot sample data up with the fancy colormaps.
'''
var = ['temp', 'oxygen', 'salinity', 'fluorescence-ECO', 'density', 'PAR', 'turbidity', 'fluorescence-CDOM']
# colorbar limits for each property
lims = np.array([[26, 33], [0, 10], [0, 36], [0, 6], [1005, 1025], [0, 0.6], [0, 2], [0, 9]]) # reasonable values
# lims = np.array([[20,36], [26,33], [1.5,5.6], [0,4], [0,9], [0,1.5]]) # values to show colormaps
for fname in fnames:
fig, axes = plt.subplots(nrows=4, ncols=2)
fig.set_size_inches(20, 10)
fig.subplots_adjust(top=0.95, bottom=0.01, left=0.2, right=0.99, wspace=0.0, hspace=0.07)
i = 0
for ax, Var, cmap in zip(axes.flat, var, cmaps): # loop through data to plot up
# get variable data
lat, lon, z, data = test.read(Var, fname)
map1 = ax.scatter(lat, -z, c=data, cmap=cmap, s=10, linewidths=0., vmin=lims[i, 0], vmax=lims[i, 1])
# no stupid offset
y_formatter = mpl.ticker.ScalarFormatter(useOffset=False)
ax.xaxis.set_major_formatter(y_formatter)
if i == 6:
ax.set_xlabel('Latitude [degrees]')
ax.set_ylabel('Depth [m]')
else:
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_ylim(-z.max(), 0)
ax.set_xlim(lat.min(), lat.max())
cb = plt.colorbar(map1, ax=ax, pad=0.02)
cb.set_label(cmap.name + ' [' + '$' + cmap.units + '$]')
i += 1
fig.savefig('figures/' + fname.split('.')[0] + '.png', bbox_inches='tight') |
def matrix_chain_mult(M):
"""Matrix chain multiplication
:param M: list of matrices
:returns: M[0] * ... * M[-1], computed in time optimal order
:complexity: whatever is needed by the multiplications
"""
opt, arg = matrix_mult_opt_order(M)
return _apply_order(M, arg, 0, len(M)-1) | Matrix chain multiplication
:param M: list of matrices
:returns: M[0] * ... * M[-1], computed in time optimal order
:complexity: whatever is needed by the multiplications | Below is the the instruction that describes the task:
### Input:
Matrix chain multiplication
:param M: list of matrices
:returns: M[0] * ... * M[-1], computed in time optimal order
:complexity: whatever is needed by the multiplications
### Response:
def matrix_chain_mult(M):
"""Matrix chain multiplication
:param M: list of matrices
:returns: M[0] * ... * M[-1], computed in time optimal order
:complexity: whatever is needed by the multiplications
"""
opt, arg = matrix_mult_opt_order(M)
return _apply_order(M, arg, 0, len(M)-1) |
def delete(self, item, dry_run=None):
"""Deletes item in file."""
logger.debug('Deleting item. Item: {item} Table: {namespace}'.format(
item=item,
namespace=self.namespace
))
if not dry_run:
self.table.delete_item(Key={'id': item['id']})
return item | Deletes item in file. | Below is the the instruction that describes the task:
### Input:
Deletes item in file.
### Response:
def delete(self, item, dry_run=None):
"""Deletes item in file."""
logger.debug('Deleting item. Item: {item} Table: {namespace}'.format(
item=item,
namespace=self.namespace
))
if not dry_run:
self.table.delete_item(Key={'id': item['id']})
return item |
def get(self, section, name, default=None, **kw):
"""Replacement for :func:`boto.pyami.config.Config.get()`."""
try:
return self.unbound_method(self.instance, section, name, **kw)
except Exception:
return default | Replacement for :func:`boto.pyami.config.Config.get()`. | Below is the the instruction that describes the task:
### Input:
Replacement for :func:`boto.pyami.config.Config.get()`.
### Response:
def get(self, section, name, default=None, **kw):
"""Replacement for :func:`boto.pyami.config.Config.get()`."""
try:
return self.unbound_method(self.instance, section, name, **kw)
except Exception:
return default |
def permission_required(perm, queryset_or_model=None,
login_url=None, raise_exception=False):
"""
Permission check decorator for classbased/functional generic view
This decorator works as class, method or function decorator without any
modification.
DO NOT use ``method_decorator`` or whatever while this decorator will use
``self`` argument for method of classbased generic view.
Parameters
----------
perm : string
A permission string
queryset_or_model : queryset or model
A queryset or model for finding object.
With classbased generic view, ``None`` for using view default queryset.
When the view does not define ``get_queryset``, ``queryset``,
``get_object``, or ``object`` then ``obj=None`` is used to check
permission.
With functional generic view, ``None`` for using passed queryset.
When non queryset was passed then ``obj=None`` is used to check
permission.
Examples
--------
>>> # As class decorator
>>> @permission_required('auth.change_user')
>>> class UpdateAuthUserView(UpdateView):
... pass
>>> # As method decorator
>>> class UpdateAuthUserView(UpdateView):
... @permission_required('auth.change_user')
... def dispatch(self, request, *args, **kwargs):
... pass
>>> # As function decorator
>>> @permission_required('auth.change_user')
>>> def update_auth_user(request, *args, **kwargs):
... pass
.. Note::
Classbased generic view is recommended while you can regulate the queryset
with ``get_queryset()`` method.
Detecting object from passed kwargs may not work correctly.
"""
# convert model to queryset
if queryset_or_model and issubclass(queryset_or_model, Model):
queryset_or_model = queryset_or_model._default_manager.all()
def wrapper(class_or_method):
if inspect.isclass(class_or_method):
from permission.decorators.classbase import \
permission_required as decorator
else:
# method_permission_required can handle method or function
# correctly.
from permission.decorators.methodbase import \
permission_required as decorator
return decorator(perm, queryset_or_model,
login_url, raise_exception)(class_or_method)
return wrapper | Permission check decorator for classbased/functional generic view
This decorator works as class, method or function decorator without any
modification.
DO NOT use ``method_decorator`` or whatever while this decorator will use
``self`` argument for method of classbased generic view.
Parameters
----------
perm : string
A permission string
queryset_or_model : queryset or model
A queryset or model for finding object.
With classbased generic view, ``None`` for using view default queryset.
When the view does not define ``get_queryset``, ``queryset``,
``get_object``, or ``object`` then ``obj=None`` is used to check
permission.
With functional generic view, ``None`` for using passed queryset.
When non queryset was passed then ``obj=None`` is used to check
permission.
Examples
--------
>>> # As class decorator
>>> @permission_required('auth.change_user')
>>> class UpdateAuthUserView(UpdateView):
... pass
>>> # As method decorator
>>> class UpdateAuthUserView(UpdateView):
... @permission_required('auth.change_user')
... def dispatch(self, request, *args, **kwargs):
... pass
>>> # As function decorator
>>> @permission_required('auth.change_user')
>>> def update_auth_user(request, *args, **kwargs):
... pass
.. Note::
Classbased generic view is recommended while you can regulate the queryset
with ``get_queryset()`` method.
Detecting object from passed kwargs may not work correctly. | Below is the the instruction that describes the task:
### Input:
Permission check decorator for classbased/functional generic view
This decorator works as class, method or function decorator without any
modification.
DO NOT use ``method_decorator`` or whatever while this decorator will use
``self`` argument for method of classbased generic view.
Parameters
----------
perm : string
A permission string
queryset_or_model : queryset or model
A queryset or model for finding object.
With classbased generic view, ``None`` for using view default queryset.
When the view does not define ``get_queryset``, ``queryset``,
``get_object``, or ``object`` then ``obj=None`` is used to check
permission.
With functional generic view, ``None`` for using passed queryset.
When non queryset was passed then ``obj=None`` is used to check
permission.
Examples
--------
>>> # As class decorator
>>> @permission_required('auth.change_user')
>>> class UpdateAuthUserView(UpdateView):
... pass
>>> # As method decorator
>>> class UpdateAuthUserView(UpdateView):
... @permission_required('auth.change_user')
... def dispatch(self, request, *args, **kwargs):
... pass
>>> # As function decorator
>>> @permission_required('auth.change_user')
>>> def update_auth_user(request, *args, **kwargs):
... pass
.. Note::
Classbased generic view is recommended while you can regulate the queryset
with ``get_queryset()`` method.
Detecting object from passed kwargs may not work correctly.
### Response:
def permission_required(perm, queryset_or_model=None,
login_url=None, raise_exception=False):
"""
Permission check decorator for classbased/functional generic view
This decorator works as class, method or function decorator without any
modification.
DO NOT use ``method_decorator`` or whatever while this decorator will use
``self`` argument for method of classbased generic view.
Parameters
----------
perm : string
A permission string
queryset_or_model : queryset or model
A queryset or model for finding object.
With classbased generic view, ``None`` for using view default queryset.
When the view does not define ``get_queryset``, ``queryset``,
``get_object``, or ``object`` then ``obj=None`` is used to check
permission.
With functional generic view, ``None`` for using passed queryset.
When non queryset was passed then ``obj=None`` is used to check
permission.
Examples
--------
>>> # As class decorator
>>> @permission_required('auth.change_user')
>>> class UpdateAuthUserView(UpdateView):
... pass
>>> # As method decorator
>>> class UpdateAuthUserView(UpdateView):
... @permission_required('auth.change_user')
... def dispatch(self, request, *args, **kwargs):
... pass
>>> # As function decorator
>>> @permission_required('auth.change_user')
>>> def update_auth_user(request, *args, **kwargs):
... pass
.. Note::
Classbased generic view is recommended while you can regulate the queryset
with ``get_queryset()`` method.
Detecting object from passed kwargs may not work correctly.
"""
# convert model to queryset
if queryset_or_model and issubclass(queryset_or_model, Model):
queryset_or_model = queryset_or_model._default_manager.all()
def wrapper(class_or_method):
if inspect.isclass(class_or_method):
from permission.decorators.classbase import \
permission_required as decorator
else:
# method_permission_required can handle method or function
# correctly.
from permission.decorators.methodbase import \
permission_required as decorator
return decorator(perm, queryset_or_model,
login_url, raise_exception)(class_or_method)
return wrapper |
def serialize_message(self, message: SegmentSequence) -> bytes:
"""Serialize a message (as SegmentSequence, list of FinTS3Segment, or FinTS3Segment) into a byte array"""
if isinstance(message, FinTS3Segment):
message = SegmentSequence([message])
if isinstance(message, (list, tuple, Iterable)):
message = SegmentSequence(list(message))
result = []
for segment in message.segments:
result.append(self.serialize_segment(segment))
return self.implode_segments(result) | Serialize a message (as SegmentSequence, list of FinTS3Segment, or FinTS3Segment) into a byte array | Below is the the instruction that describes the task:
### Input:
Serialize a message (as SegmentSequence, list of FinTS3Segment, or FinTS3Segment) into a byte array
### Response:
def serialize_message(self, message: SegmentSequence) -> bytes:
"""Serialize a message (as SegmentSequence, list of FinTS3Segment, or FinTS3Segment) into a byte array"""
if isinstance(message, FinTS3Segment):
message = SegmentSequence([message])
if isinstance(message, (list, tuple, Iterable)):
message = SegmentSequence(list(message))
result = []
for segment in message.segments:
result.append(self.serialize_segment(segment))
return self.implode_segments(result) |
def mkdir(self, astr_dirSpec):
"""
Given an <astr_dirSpec> in form '/a/b/c/d/.../f',
create that path in the internal stree, creating all
intermediate nodes as necessary
:param astr_dirSpec:
:return:
"""
if astr_dirSpec != '/' and astr_dirSpec != "//":
str_currentPath = self.cwd()
l_pathSpec = astr_dirSpec.split('/')
if not len(l_pathSpec[0]):
self.cd('/')
l_nodesDepth = l_pathSpec[1:]
else:
l_nodesDepth = l_pathSpec
for d in l_nodesDepth:
self.mkcd(d)
self.cd(str_currentPath) | Given an <astr_dirSpec> in form '/a/b/c/d/.../f',
create that path in the internal stree, creating all
intermediate nodes as necessary
:param astr_dirSpec:
:return: | Below is the the instruction that describes the task:
### Input:
Given an <astr_dirSpec> in form '/a/b/c/d/.../f',
create that path in the internal stree, creating all
intermediate nodes as necessary
:param astr_dirSpec:
:return:
### Response:
def mkdir(self, astr_dirSpec):
"""
Given an <astr_dirSpec> in form '/a/b/c/d/.../f',
create that path in the internal stree, creating all
intermediate nodes as necessary
:param astr_dirSpec:
:return:
"""
if astr_dirSpec != '/' and astr_dirSpec != "//":
str_currentPath = self.cwd()
l_pathSpec = astr_dirSpec.split('/')
if not len(l_pathSpec[0]):
self.cd('/')
l_nodesDepth = l_pathSpec[1:]
else:
l_nodesDepth = l_pathSpec
for d in l_nodesDepth:
self.mkcd(d)
self.cd(str_currentPath) |
def _can_compute(self, _id, persistence):
"""
Return true if this feature stored, or is unstored, but can be computed
from stored dependencies
"""
if self.store and self._stored(_id, persistence):
return True
if self.is_root:
return False
return all(
[n._can_compute(_id, persistence) for n in self.dependencies]) | Return true if this feature stored, or is unstored, but can be computed
from stored dependencies | Below is the the instruction that describes the task:
### Input:
Return true if this feature stored, or is unstored, but can be computed
from stored dependencies
### Response:
def _can_compute(self, _id, persistence):
"""
Return true if this feature stored, or is unstored, but can be computed
from stored dependencies
"""
if self.store and self._stored(_id, persistence):
return True
if self.is_root:
return False
return all(
[n._can_compute(_id, persistence) for n in self.dependencies]) |
def load_plugin(self, name):
"""This automatically loads a plugin by the given name from the
current source and returns the module. This is a convenient
alternative to the import statement and saves you from invoking
``__import__`` or a similar function yourself.
:param name: the name of the plugin to load.
"""
if '.' in name:
raise ImportError('Plugin names cannot contain dots.')
with self:
return __import__(self.base.package + '.' + name,
globals(), {}, ['__name__']) | This automatically loads a plugin by the given name from the
current source and returns the module. This is a convenient
alternative to the import statement and saves you from invoking
``__import__`` or a similar function yourself.
:param name: the name of the plugin to load. | Below is the the instruction that describes the task:
### Input:
This automatically loads a plugin by the given name from the
current source and returns the module. This is a convenient
alternative to the import statement and saves you from invoking
``__import__`` or a similar function yourself.
:param name: the name of the plugin to load.
### Response:
def load_plugin(self, name):
"""This automatically loads a plugin by the given name from the
current source and returns the module. This is a convenient
alternative to the import statement and saves you from invoking
``__import__`` or a similar function yourself.
:param name: the name of the plugin to load.
"""
if '.' in name:
raise ImportError('Plugin names cannot contain dots.')
with self:
return __import__(self.base.package + '.' + name,
globals(), {}, ['__name__']) |
def generate_phase_1(dim = 40):
"""
The first step in creating datapoints in the Poirazi & Mel model.
This returns a vector of dimension dim, with the last four values set to
1 and the rest drawn from a normal distribution.
"""
phase_1 = numpy.random.normal(0, 1, dim)
for i in range(dim - 4, dim):
phase_1[i] = 1.0
return phase_1 | The first step in creating datapoints in the Poirazi & Mel model.
This returns a vector of dimension dim, with the last four values set to
1 and the rest drawn from a normal distribution. | Below is the the instruction that describes the task:
### Input:
The first step in creating datapoints in the Poirazi & Mel model.
This returns a vector of dimension dim, with the last four values set to
1 and the rest drawn from a normal distribution.
### Response:
def generate_phase_1(dim = 40):
"""
The first step in creating datapoints in the Poirazi & Mel model.
This returns a vector of dimension dim, with the last four values set to
1 and the rest drawn from a normal distribution.
"""
phase_1 = numpy.random.normal(0, 1, dim)
for i in range(dim - 4, dim):
phase_1[i] = 1.0
return phase_1 |
def plane_xz(size=(10, 10), resolution=(10, 10)) -> VAO:
"""
Generates a plane on the xz axis of a specific size and resolution.
Normals and texture coordinates are also included.
Args:
size: (x, y) tuple
resolution: (x, y) tuple
Returns:
A :py:class:`demosys.opengl.vao.VAO` instance
"""
sx, sz = size
rx, rz = resolution
dx, dz = sx / rx, sz / rz # step
ox, oz = -sx / 2, -sz / 2 # start offset
def gen_pos():
for z in range(rz):
for x in range(rx):
yield ox + x * dx
yield 0
yield oz + z * dz
def gen_uv():
for z in range(rz):
for x in range(rx):
yield x / (rx - 1)
yield 1 - z / (rz - 1)
def gen_normal():
for _ in range(rx * rz):
yield 0.0
yield 1.0
yield 0.0
def gen_index():
for z in range(rz - 1):
for x in range(rx - 1):
# quad poly left
yield z * rz + x + 1
yield z * rz + x
yield z * rz + x + rx
# quad poly right
yield z * rz + x + 1
yield z * rz + x + rx
yield z * rz + x + rx + 1
pos_data = numpy.fromiter(gen_pos(), dtype=numpy.float32)
uv_data = numpy.fromiter(gen_uv(), dtype=numpy.float32)
normal_data = numpy.fromiter(gen_normal(), dtype=numpy.float32)
index_data = numpy.fromiter(gen_index(), dtype=numpy.uint32)
vao = VAO("plane_xz", mode=moderngl.TRIANGLES)
vao.buffer(pos_data, '3f', ['in_position'])
vao.buffer(uv_data, '2f', ['in_uv'])
vao.buffer(normal_data, '3f', ['in_normal'])
vao.index_buffer(index_data, index_element_size=4)
return vao | Generates a plane on the xz axis of a specific size and resolution.
Normals and texture coordinates are also included.
Args:
size: (x, y) tuple
resolution: (x, y) tuple
Returns:
A :py:class:`demosys.opengl.vao.VAO` instance | Below is the the instruction that describes the task:
### Input:
Generates a plane on the xz axis of a specific size and resolution.
Normals and texture coordinates are also included.
Args:
size: (x, y) tuple
resolution: (x, y) tuple
Returns:
A :py:class:`demosys.opengl.vao.VAO` instance
### Response:
def plane_xz(size=(10, 10), resolution=(10, 10)) -> VAO:
"""
Generates a plane on the xz axis of a specific size and resolution.
Normals and texture coordinates are also included.
Args:
size: (x, y) tuple
resolution: (x, y) tuple
Returns:
A :py:class:`demosys.opengl.vao.VAO` instance
"""
sx, sz = size
rx, rz = resolution
dx, dz = sx / rx, sz / rz # step
ox, oz = -sx / 2, -sz / 2 # start offset
def gen_pos():
for z in range(rz):
for x in range(rx):
yield ox + x * dx
yield 0
yield oz + z * dz
def gen_uv():
for z in range(rz):
for x in range(rx):
yield x / (rx - 1)
yield 1 - z / (rz - 1)
def gen_normal():
for _ in range(rx * rz):
yield 0.0
yield 1.0
yield 0.0
def gen_index():
for z in range(rz - 1):
for x in range(rx - 1):
# quad poly left
yield z * rz + x + 1
yield z * rz + x
yield z * rz + x + rx
# quad poly right
yield z * rz + x + 1
yield z * rz + x + rx
yield z * rz + x + rx + 1
pos_data = numpy.fromiter(gen_pos(), dtype=numpy.float32)
uv_data = numpy.fromiter(gen_uv(), dtype=numpy.float32)
normal_data = numpy.fromiter(gen_normal(), dtype=numpy.float32)
index_data = numpy.fromiter(gen_index(), dtype=numpy.uint32)
vao = VAO("plane_xz", mode=moderngl.TRIANGLES)
vao.buffer(pos_data, '3f', ['in_position'])
vao.buffer(uv_data, '2f', ['in_uv'])
vao.buffer(normal_data, '3f', ['in_normal'])
vao.index_buffer(index_data, index_element_size=4)
return vao |
def outspiral_loop(N):
"""
Return a list of points that will loop outwards in a 2D lattice in terms
of distance from a central point. So if N=2 this will be [0,0], [0,1],
[0,-1],[1,0],[-1,0],[1,1] .... This is useful when you want to loop over
a number of bins, but want to start in the center and work outwards.
"""
# Create a 2D lattice of all points
X,Y = numpy.meshgrid(numpy.arange(-N,N+1), numpy.arange(-N,N+1))
# Flatten it
X = numpy.ndarray.flatten(X)
Y = numpy.ndarray.flatten(Y)
# Force to an integer
X = numpy.array(X, dtype=int)
Y = numpy.array(Y, dtype=int)
# Calculate distances
G = numpy.sqrt(X**2+Y**2)
# Combine back into an array
out_arr = numpy.array([X,Y,G])
# And order correctly
sorted_out_arr = out_arr[:,out_arr[2].argsort()]
return sorted_out_arr[:2,:].T | Return a list of points that will loop outwards in a 2D lattice in terms
of distance from a central point. So if N=2 this will be [0,0], [0,1],
[0,-1],[1,0],[-1,0],[1,1] .... This is useful when you want to loop over
a number of bins, but want to start in the center and work outwards. | Below is the the instruction that describes the task:
### Input:
Return a list of points that will loop outwards in a 2D lattice in terms
of distance from a central point. So if N=2 this will be [0,0], [0,1],
[0,-1],[1,0],[-1,0],[1,1] .... This is useful when you want to loop over
a number of bins, but want to start in the center and work outwards.
### Response:
def outspiral_loop(N):
"""
Return a list of points that will loop outwards in a 2D lattice in terms
of distance from a central point. So if N=2 this will be [0,0], [0,1],
[0,-1],[1,0],[-1,0],[1,1] .... This is useful when you want to loop over
a number of bins, but want to start in the center and work outwards.
"""
# Create a 2D lattice of all points
X,Y = numpy.meshgrid(numpy.arange(-N,N+1), numpy.arange(-N,N+1))
# Flatten it
X = numpy.ndarray.flatten(X)
Y = numpy.ndarray.flatten(Y)
# Force to an integer
X = numpy.array(X, dtype=int)
Y = numpy.array(Y, dtype=int)
# Calculate distances
G = numpy.sqrt(X**2+Y**2)
# Combine back into an array
out_arr = numpy.array([X,Y,G])
# And order correctly
sorted_out_arr = out_arr[:,out_arr[2].argsort()]
return sorted_out_arr[:2,:].T |
def sample_given_context(self, c, c_dims):
'''
Sample the region with max progress among regions that have the same context
c: context value on c_dims dimensions
c_dims: w.r.t sensory space dimensions
'''
index = self.discrete_progress.sample_given_context(c, c_dims, self.space)
return self.space.rand_value(index).flatten()[list(set(range(len(self.space.cardinalities))) - set(c_dims))] | Sample the region with max progress among regions that have the same context
c: context value on c_dims dimensions
c_dims: w.r.t sensory space dimensions | Below is the the instruction that describes the task:
### Input:
Sample the region with max progress among regions that have the same context
c: context value on c_dims dimensions
c_dims: w.r.t sensory space dimensions
### Response:
def sample_given_context(self, c, c_dims):
'''
Sample the region with max progress among regions that have the same context
c: context value on c_dims dimensions
c_dims: w.r.t sensory space dimensions
'''
index = self.discrete_progress.sample_given_context(c, c_dims, self.space)
return self.space.rand_value(index).flatten()[list(set(range(len(self.space.cardinalities))) - set(c_dims))] |
def iterdays(self, d1, d2):
"""
Date iterator returning dates in d1 <= x < d2
"""
curr = datetime.datetime.combine(d1, datetime.time())
end = datetime.datetime.combine(d2, datetime.time())
if d1.date() == d2.date():
yield curr
return
while curr < end:
yield curr
curr = curr + datetime.timedelta(days=1) | Date iterator returning dates in d1 <= x < d2 | Below is the the instruction that describes the task:
### Input:
Date iterator returning dates in d1 <= x < d2
### Response:
def iterdays(self, d1, d2):
"""
Date iterator returning dates in d1 <= x < d2
"""
curr = datetime.datetime.combine(d1, datetime.time())
end = datetime.datetime.combine(d2, datetime.time())
if d1.date() == d2.date():
yield curr
return
while curr < end:
yield curr
curr = curr + datetime.timedelta(days=1) |
def _parse_port_ranges(pool_str):
"""Given a 'N-P,X-Y' description of port ranges, return a set of ints."""
ports = set()
for range_str in pool_str.split(','):
try:
a, b = range_str.split('-', 1)
start, end = int(a), int(b)
except ValueError:
log.error('Ignoring unparsable port range %r.', range_str)
continue
if start < 1 or end > 65535:
log.error('Ignoring out of bounds port range %r.', range_str)
continue
ports.update(set(range(start, end + 1)))
return ports | Given a 'N-P,X-Y' description of port ranges, return a set of ints. | Below is the the instruction that describes the task:
### Input:
Given a 'N-P,X-Y' description of port ranges, return a set of ints.
### Response:
def _parse_port_ranges(pool_str):
"""Given a 'N-P,X-Y' description of port ranges, return a set of ints."""
ports = set()
for range_str in pool_str.split(','):
try:
a, b = range_str.split('-', 1)
start, end = int(a), int(b)
except ValueError:
log.error('Ignoring unparsable port range %r.', range_str)
continue
if start < 1 or end > 65535:
log.error('Ignoring out of bounds port range %r.', range_str)
continue
ports.update(set(range(start, end + 1)))
return ports |
def reverse_shortlex(end, other, excludeend=False):
"""Yield all intersections of end with other in reverse shortlex order.
>>> ['{:03b}'.format(s) for s in reverse_shortlex(0b111, [0b011, 0b101, 0b110])]
['111', '011', '101', '110', '001', '010', '100', '000']
>>> ', '.join(''.join(sorted(s))
... for s in reverse_shortlex({'a', 'b', 'c', 'd'},
... [{'b', 'c', 'd'}, {'a', 'c', 'd'}, {'a', 'b', 'd'}, {'a', 'b', 'c'}]))
'abcd, bcd, acd, abd, abc, cd, bd, bc, ad, ac, ab, d, c, b, a, '
>>> assert list(reverse_shortlex({1, 2}, [{1}, {2}], excludeend=True)) == \
[{1}, {2}, set()]
"""
if not excludeend:
yield end
queue = collections.deque([(end, other)])
while queue:
current, other = queue.popleft()
while other:
first, other = other[0], other[1:]
result = current & first
yield result
if other:
queue.append((result, other)) | Yield all intersections of end with other in reverse shortlex order.
>>> ['{:03b}'.format(s) for s in reverse_shortlex(0b111, [0b011, 0b101, 0b110])]
['111', '011', '101', '110', '001', '010', '100', '000']
>>> ', '.join(''.join(sorted(s))
... for s in reverse_shortlex({'a', 'b', 'c', 'd'},
... [{'b', 'c', 'd'}, {'a', 'c', 'd'}, {'a', 'b', 'd'}, {'a', 'b', 'c'}]))
'abcd, bcd, acd, abd, abc, cd, bd, bc, ad, ac, ab, d, c, b, a, '
>>> assert list(reverse_shortlex({1, 2}, [{1}, {2}], excludeend=True)) == \
[{1}, {2}, set()] | Below is the the instruction that describes the task:
### Input:
Yield all intersections of end with other in reverse shortlex order.
>>> ['{:03b}'.format(s) for s in reverse_shortlex(0b111, [0b011, 0b101, 0b110])]
['111', '011', '101', '110', '001', '010', '100', '000']
>>> ', '.join(''.join(sorted(s))
... for s in reverse_shortlex({'a', 'b', 'c', 'd'},
... [{'b', 'c', 'd'}, {'a', 'c', 'd'}, {'a', 'b', 'd'}, {'a', 'b', 'c'}]))
'abcd, bcd, acd, abd, abc, cd, bd, bc, ad, ac, ab, d, c, b, a, '
>>> assert list(reverse_shortlex({1, 2}, [{1}, {2}], excludeend=True)) == \
[{1}, {2}, set()]
### Response:
def reverse_shortlex(end, other, excludeend=False):
"""Yield all intersections of end with other in reverse shortlex order.
>>> ['{:03b}'.format(s) for s in reverse_shortlex(0b111, [0b011, 0b101, 0b110])]
['111', '011', '101', '110', '001', '010', '100', '000']
>>> ', '.join(''.join(sorted(s))
... for s in reverse_shortlex({'a', 'b', 'c', 'd'},
... [{'b', 'c', 'd'}, {'a', 'c', 'd'}, {'a', 'b', 'd'}, {'a', 'b', 'c'}]))
'abcd, bcd, acd, abd, abc, cd, bd, bc, ad, ac, ab, d, c, b, a, '
>>> assert list(reverse_shortlex({1, 2}, [{1}, {2}], excludeend=True)) == \
[{1}, {2}, set()]
"""
if not excludeend:
yield end
queue = collections.deque([(end, other)])
while queue:
current, other = queue.popleft()
while other:
first, other = other[0], other[1:]
result = current & first
yield result
if other:
queue.append((result, other)) |
def resolve_interpreter(exe):
"""
If the executable given isn't an absolute path, search $PATH for the interpreter
"""
if os.path.abspath(exe) != exe:
paths = os.environ.get('PATH', '').split(os.pathsep)
for path in paths:
if os.path.exists(os.path.join(path, exe)):
exe = os.path.join(path, exe)
break
if not os.path.exists(exe):
logger.fatal('The executable %s (from --python=%s) does not exist' % (exe, exe))
sys.exit(3)
return exe | If the executable given isn't an absolute path, search $PATH for the interpreter | Below is the the instruction that describes the task:
### Input:
If the executable given isn't an absolute path, search $PATH for the interpreter
### Response:
def resolve_interpreter(exe):
"""
If the executable given isn't an absolute path, search $PATH for the interpreter
"""
if os.path.abspath(exe) != exe:
paths = os.environ.get('PATH', '').split(os.pathsep)
for path in paths:
if os.path.exists(os.path.join(path, exe)):
exe = os.path.join(path, exe)
break
if not os.path.exists(exe):
logger.fatal('The executable %s (from --python=%s) does not exist' % (exe, exe))
sys.exit(3)
return exe |
def context(self):
"""Get the xpathContext from an xpathParserContext """
ret = libxml2mod.xmlXPathParserGetContext(self._o)
if ret is None:raise xpathError('xmlXPathParserGetContext() failed')
__tmp = xpathContext(_obj=ret)
return __tmp | Get the xpathContext from an xpathParserContext | Below is the the instruction that describes the task:
### Input:
Get the xpathContext from an xpathParserContext
### Response:
def context(self):
"""Get the xpathContext from an xpathParserContext """
ret = libxml2mod.xmlXPathParserGetContext(self._o)
if ret is None:raise xpathError('xmlXPathParserGetContext() failed')
__tmp = xpathContext(_obj=ret)
return __tmp |
def session(self):
"""
Get session object to benefit from connection pooling.
http://docs.python-requests.org/en/master/user/advanced/#session-objects
:rtype: requests.Session
"""
if self._session is None:
self._session = requests.Session()
self._session.headers.update(self._headers)
return self._session | Get session object to benefit from connection pooling.
http://docs.python-requests.org/en/master/user/advanced/#session-objects
:rtype: requests.Session | Below is the the instruction that describes the task:
### Input:
Get session object to benefit from connection pooling.
http://docs.python-requests.org/en/master/user/advanced/#session-objects
:rtype: requests.Session
### Response:
def session(self):
"""
Get session object to benefit from connection pooling.
http://docs.python-requests.org/en/master/user/advanced/#session-objects
:rtype: requests.Session
"""
if self._session is None:
self._session = requests.Session()
self._session.headers.update(self._headers)
return self._session |
def searchNamesIndex(self, nameIndex, nameData, searchString, category="", math=False, game=False, extension="", searchFiles=False):
"""Search the names index for a string and returns the namedata"""
nameData = {}
try:
nameFile = open(nameIndex, 'rt')
except IOError:
self.repo.printd("Error: Unable to read index file " + self.fileIndex)
return None
count = 1
for line in nameFile:
count += 1
if searchString.lower() in line.lower():
#Extension argument
if extension in line:
nameData[count] = line[:len(line) - 1]
else:
nameData[count] = None
#category arg
if category in line and extension in line:
nameData[count] = line[:len(line) - 1]
else:
nameData[count] = None
#Both game and math
if (game and math):
if ("/games/" in line or "/math/" in line or "/science" in line):
nameData[count] = line[:len(line) - 1]
else:
nameData[count] = None
#Game option switch
elif game:
if "/games/" in line:
nameData[count] = line[:len(line) - 1]
else:
nameData[count] = None
#Math option switch
elif math:
if ("/math/" in line or "/science/" in line):
nameData[count] = line[:len(line) - 1]
else:
nameData[count] = None
#Close the name index and return
nameFile.close()
return nameData | Search the names index for a string and returns the namedata | Below is the the instruction that describes the task:
### Input:
Search the names index for a string and returns the namedata
### Response:
def searchNamesIndex(self, nameIndex, nameData, searchString, category="", math=False, game=False, extension="", searchFiles=False):
"""Search the names index for a string and returns the namedata"""
nameData = {}
try:
nameFile = open(nameIndex, 'rt')
except IOError:
self.repo.printd("Error: Unable to read index file " + self.fileIndex)
return None
count = 1
for line in nameFile:
count += 1
if searchString.lower() in line.lower():
#Extension argument
if extension in line:
nameData[count] = line[:len(line) - 1]
else:
nameData[count] = None
#category arg
if category in line and extension in line:
nameData[count] = line[:len(line) - 1]
else:
nameData[count] = None
#Both game and math
if (game and math):
if ("/games/" in line or "/math/" in line or "/science" in line):
nameData[count] = line[:len(line) - 1]
else:
nameData[count] = None
#Game option switch
elif game:
if "/games/" in line:
nameData[count] = line[:len(line) - 1]
else:
nameData[count] = None
#Math option switch
elif math:
if ("/math/" in line or "/science/" in line):
nameData[count] = line[:len(line) - 1]
else:
nameData[count] = None
#Close the name index and return
nameFile.close()
return nameData |
def fetch_github_token(self):
"""
Fetch GitHub token. First try to use variable provided
by --token option, otherwise try to fetch it from git config
and last CHANGELOG_GITHUB_TOKEN env variable.
:returns: Nothing
"""
if not self.options.token:
try:
for v in GH_CFG_VARS:
cmd = ['git', 'config', '--get', '{0}'.format(v)]
self.options.token = subprocess.Popen(
cmd, stdout=subprocess.PIPE).communicate()[0].strip()
if self.options.token:
break
except (subprocess.CalledProcessError, WindowsError):
pass
if not self.options.token:
self.options.token = os.environ.get(CHANGELOG_GITHUB_TOKEN)
if not self.options.token:
print(NO_TOKEN_PROVIDED) | Fetch GitHub token. First try to use variable provided
by --token option, otherwise try to fetch it from git config
and last CHANGELOG_GITHUB_TOKEN env variable.
:returns: Nothing | Below is the the instruction that describes the task:
### Input:
Fetch GitHub token. First try to use variable provided
by --token option, otherwise try to fetch it from git config
and last CHANGELOG_GITHUB_TOKEN env variable.
:returns: Nothing
### Response:
def fetch_github_token(self):
"""
Fetch GitHub token. First try to use variable provided
by --token option, otherwise try to fetch it from git config
and last CHANGELOG_GITHUB_TOKEN env variable.
:returns: Nothing
"""
if not self.options.token:
try:
for v in GH_CFG_VARS:
cmd = ['git', 'config', '--get', '{0}'.format(v)]
self.options.token = subprocess.Popen(
cmd, stdout=subprocess.PIPE).communicate()[0].strip()
if self.options.token:
break
except (subprocess.CalledProcessError, WindowsError):
pass
if not self.options.token:
self.options.token = os.environ.get(CHANGELOG_GITHUB_TOKEN)
if not self.options.token:
print(NO_TOKEN_PROVIDED) |
def get_below_right_key_rect(self):
"""Returns tuple key rect of below right cell"""
key_below_right = self.row + 1, self.col + 1, self.tab
border_width_right = \
float(self.cell_attributes[self.key]["borderwidth_right"]) / 2.0
border_width_bottom = \
float(self.cell_attributes[self.key]["borderwidth_bottom"]) / 2.0
rect_below_right = (self.x+self.width, self.y-self.height,
border_width_right, border_width_bottom)
return key_below_right, rect_below_right | Returns tuple key rect of below right cell | Below is the the instruction that describes the task:
### Input:
Returns tuple key rect of below right cell
### Response:
def get_below_right_key_rect(self):
"""Returns tuple key rect of below right cell"""
key_below_right = self.row + 1, self.col + 1, self.tab
border_width_right = \
float(self.cell_attributes[self.key]["borderwidth_right"]) / 2.0
border_width_bottom = \
float(self.cell_attributes[self.key]["borderwidth_bottom"]) / 2.0
rect_below_right = (self.x+self.width, self.y-self.height,
border_width_right, border_width_bottom)
return key_below_right, rect_below_right |
def getObject(ID, jd, lat, lon):
""" Returns an object for a specific date and
location.
"""
if ID == const.SOUTH_NODE:
obj = swe.sweObject(const.NORTH_NODE, jd)
obj.update({
'id': const.SOUTH_NODE,
'lon': angle.norm(obj['lon'] + 180)
})
elif ID == const.PARS_FORTUNA:
pflon = tools.pfLon(jd, lat, lon)
obj = {
'id': ID,
'lon': pflon,
'lat': 0,
'lonspeed': 0,
'latspeed': 0
}
elif ID == const.SYZYGY:
szjd = tools.syzygyJD(jd)
obj = swe.sweObject(const.MOON, szjd)
obj['id'] = const.SYZYGY
else:
obj = swe.sweObject(ID, jd)
_signInfo(obj)
return obj | Returns an object for a specific date and
location. | Below is the the instruction that describes the task:
### Input:
Returns an object for a specific date and
location.
### Response:
def getObject(ID, jd, lat, lon):
""" Returns an object for a specific date and
location.
"""
if ID == const.SOUTH_NODE:
obj = swe.sweObject(const.NORTH_NODE, jd)
obj.update({
'id': const.SOUTH_NODE,
'lon': angle.norm(obj['lon'] + 180)
})
elif ID == const.PARS_FORTUNA:
pflon = tools.pfLon(jd, lat, lon)
obj = {
'id': ID,
'lon': pflon,
'lat': 0,
'lonspeed': 0,
'latspeed': 0
}
elif ID == const.SYZYGY:
szjd = tools.syzygyJD(jd)
obj = swe.sweObject(const.MOON, szjd)
obj['id'] = const.SYZYGY
else:
obj = swe.sweObject(ID, jd)
_signInfo(obj)
return obj |
def cmd(send, msg, args):
"""'Inspects a bot attribute.
Syntax: {command} <attr>
"""
if not hasattr(args['handler'], msg):
send("That attribute was not found in the handler.")
return
send(str(getattr(args['handler'], msg))) | Inspects a bot attribute.
Syntax: {command} <attr> | Below is the the instruction that describes the task:
### Input:
Inspects a bot attribute.
Syntax: {command} <attr>
### Response:
def cmd(send, msg, args):
"""'Inspects a bot attribute.
Syntax: {command} <attr>
"""
if not hasattr(args['handler'], msg):
send("That attribute was not found in the handler.")
return
send(str(getattr(args['handler'], msg))) |
def single_valued(self, e):
"""
Returns True whether `e` is a concrete value or is a value set with
only 1 possible value. This differs from `unique` in that this *does*
not query the constraint solver.
"""
if self.state.mode == 'static':
if type(e) in (int, bytes, float, bool):
return True
else:
return e.cardinality <= 1
else:
# All symbolic expressions are not single-valued
return not self.symbolic(e) | Returns True whether `e` is a concrete value or is a value set with
only 1 possible value. This differs from `unique` in that this *does*
not query the constraint solver. | Below is the the instruction that describes the task:
### Input:
Returns True whether `e` is a concrete value or is a value set with
only 1 possible value. This differs from `unique` in that this *does*
not query the constraint solver.
### Response:
def single_valued(self, e):
"""
Returns True whether `e` is a concrete value or is a value set with
only 1 possible value. This differs from `unique` in that this *does*
not query the constraint solver.
"""
if self.state.mode == 'static':
if type(e) in (int, bytes, float, bool):
return True
else:
return e.cardinality <= 1
else:
# All symbolic expressions are not single-valued
return not self.symbolic(e) |
def process_kwargs(obj, kwargs):
'''
Convenience wrapper around binwalk.core.module.Modules.kwargs.
@obj - The class object (an instance of a sub-class of binwalk.core.module.Module).
@kwargs - The kwargs provided to the object's __init__ method.
Returns None.
'''
with Modules() as m:
kwargs = m.kwargs(obj, kwargs)
return kwargs | Convenience wrapper around binwalk.core.module.Modules.kwargs.
@obj - The class object (an instance of a sub-class of binwalk.core.module.Module).
@kwargs - The kwargs provided to the object's __init__ method.
Returns None. | Below is the the instruction that describes the task:
### Input:
Convenience wrapper around binwalk.core.module.Modules.kwargs.
@obj - The class object (an instance of a sub-class of binwalk.core.module.Module).
@kwargs - The kwargs provided to the object's __init__ method.
Returns None.
### Response:
def process_kwargs(obj, kwargs):
'''
Convenience wrapper around binwalk.core.module.Modules.kwargs.
@obj - The class object (an instance of a sub-class of binwalk.core.module.Module).
@kwargs - The kwargs provided to the object's __init__ method.
Returns None.
'''
with Modules() as m:
kwargs = m.kwargs(obj, kwargs)
return kwargs |
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref | Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update | Below is the the instruction that describes the task:
### Input:
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
### Response:
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref |
def connect_delete_namespaced_service_proxy_with_path(self, name, namespace, path, **kwargs):
"""
connect DELETE requests to proxy of Service
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connect_delete_namespaced_service_proxy_with_path(name, namespace, path, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ServiceProxyOptions (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str path: path to the resource (required)
:param str path2: Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.connect_delete_namespaced_service_proxy_with_path_with_http_info(name, namespace, path, **kwargs)
else:
(data) = self.connect_delete_namespaced_service_proxy_with_path_with_http_info(name, namespace, path, **kwargs)
return data | connect DELETE requests to proxy of Service
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connect_delete_namespaced_service_proxy_with_path(name, namespace, path, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ServiceProxyOptions (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str path: path to the resource (required)
:param str path2: Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.
:return: str
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
connect DELETE requests to proxy of Service
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connect_delete_namespaced_service_proxy_with_path(name, namespace, path, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ServiceProxyOptions (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str path: path to the resource (required)
:param str path2: Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.
:return: str
If the method is called asynchronously,
returns the request thread.
### Response:
def connect_delete_namespaced_service_proxy_with_path(self, name, namespace, path, **kwargs):
"""
connect DELETE requests to proxy of Service
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connect_delete_namespaced_service_proxy_with_path(name, namespace, path, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ServiceProxyOptions (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str path: path to the resource (required)
:param str path2: Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.connect_delete_namespaced_service_proxy_with_path_with_http_info(name, namespace, path, **kwargs)
else:
(data) = self.connect_delete_namespaced_service_proxy_with_path_with_http_info(name, namespace, path, **kwargs)
return data |
def from_series(cls, series, offset=0):
"""
Creates and return a Series from a Series
:param series: raccoon Series
:param offset: offset value must be provided as there is no equivalent for a DataFrame
:return: Series
"""
return cls(data=series.data, index=series.index, data_name=series.data_name, index_name=series.index_name,
sort=series.sort, offset=offset) | Creates and return a Series from a Series
:param series: raccoon Series
:param offset: offset value must be provided as there is no equivalent for a DataFrame
:return: Series | Below is the the instruction that describes the task:
### Input:
Creates and return a Series from a Series
:param series: raccoon Series
:param offset: offset value must be provided as there is no equivalent for a DataFrame
:return: Series
### Response:
def from_series(cls, series, offset=0):
"""
Creates and return a Series from a Series
:param series: raccoon Series
:param offset: offset value must be provided as there is no equivalent for a DataFrame
:return: Series
"""
return cls(data=series.data, index=series.index, data_name=series.data_name, index_name=series.index_name,
sort=series.sort, offset=offset) |
def get_loader(config_uri, protocols=None):
"""
Find a :class:`plaster.ILoader` object capable of handling ``config_uri``.
:param config_uri: Anything that can be parsed by
:func:`plaster.parse_uri`.
:param protocols: Zero or more :term:`loader protocol` identifiers that
the loader must implement to match the desired ``config_uri``.
:returns: A :class:`plaster.ILoader` object.
:raises plaster.LoaderNotFound: If no loader could be found.
:raises plaster.MultipleLoadersFound: If multiple loaders match the
requested criteria. If this happens, you can disambiguate the lookup
by appending the package name to the scheme for the loader you wish
to use. For example if ``ini`` is ambiguous then specify
``ini+myapp`` to use the ini loader from the ``myapp`` package.
"""
config_uri = parse_uri(config_uri)
requested_scheme = config_uri.scheme
matched_loaders = find_loaders(requested_scheme, protocols=protocols)
if len(matched_loaders) < 1:
raise LoaderNotFound(requested_scheme, protocols=protocols)
if len(matched_loaders) > 1:
raise MultipleLoadersFound(
requested_scheme, matched_loaders, protocols=protocols)
loader_info = matched_loaders[0]
loader = loader_info.load(config_uri)
return loader | Find a :class:`plaster.ILoader` object capable of handling ``config_uri``.
:param config_uri: Anything that can be parsed by
:func:`plaster.parse_uri`.
:param protocols: Zero or more :term:`loader protocol` identifiers that
the loader must implement to match the desired ``config_uri``.
:returns: A :class:`plaster.ILoader` object.
:raises plaster.LoaderNotFound: If no loader could be found.
:raises plaster.MultipleLoadersFound: If multiple loaders match the
requested criteria. If this happens, you can disambiguate the lookup
by appending the package name to the scheme for the loader you wish
to use. For example if ``ini`` is ambiguous then specify
``ini+myapp`` to use the ini loader from the ``myapp`` package. | Below is the the instruction that describes the task:
### Input:
Find a :class:`plaster.ILoader` object capable of handling ``config_uri``.
:param config_uri: Anything that can be parsed by
:func:`plaster.parse_uri`.
:param protocols: Zero or more :term:`loader protocol` identifiers that
the loader must implement to match the desired ``config_uri``.
:returns: A :class:`plaster.ILoader` object.
:raises plaster.LoaderNotFound: If no loader could be found.
:raises plaster.MultipleLoadersFound: If multiple loaders match the
requested criteria. If this happens, you can disambiguate the lookup
by appending the package name to the scheme for the loader you wish
to use. For example if ``ini`` is ambiguous then specify
``ini+myapp`` to use the ini loader from the ``myapp`` package.
### Response:
def get_loader(config_uri, protocols=None):
"""
Find a :class:`plaster.ILoader` object capable of handling ``config_uri``.
:param config_uri: Anything that can be parsed by
:func:`plaster.parse_uri`.
:param protocols: Zero or more :term:`loader protocol` identifiers that
the loader must implement to match the desired ``config_uri``.
:returns: A :class:`plaster.ILoader` object.
:raises plaster.LoaderNotFound: If no loader could be found.
:raises plaster.MultipleLoadersFound: If multiple loaders match the
requested criteria. If this happens, you can disambiguate the lookup
by appending the package name to the scheme for the loader you wish
to use. For example if ``ini`` is ambiguous then specify
``ini+myapp`` to use the ini loader from the ``myapp`` package.
"""
config_uri = parse_uri(config_uri)
requested_scheme = config_uri.scheme
matched_loaders = find_loaders(requested_scheme, protocols=protocols)
if len(matched_loaders) < 1:
raise LoaderNotFound(requested_scheme, protocols=protocols)
if len(matched_loaders) > 1:
raise MultipleLoadersFound(
requested_scheme, matched_loaders, protocols=protocols)
loader_info = matched_loaders[0]
loader = loader_info.load(config_uri)
return loader |
def keyword_search(rows, **kwargs):
"""
Takes a list of dictionaries and finds all the dictionaries where the
keys and values match those found in the keyword arguments.
Keys in the row data have ' ' and '-' replaced with '_', so they can
match the keyword argument parsing. For example, the keyword argument
'fix_up_path' will match a key named 'fix-up path'.
In addition, several suffixes can be added to the key name to do partial
matching of values:
* '__contains' will test whether the data value contains the given
value.
* '__startswith' tests if the data value starts with the given value
* '__lower_value' compares the lower-case version of the data and given
values.
Arguments:
rows (list): A list of dictionaries representing the data to be
searched.
**kwargs (dict): keyword-value pairs corresponding to the fields that
need to be found and their required values in the data rows.
Returns:
(list): The list of rows that match the search keywords. If no
keyword arguments are given, no rows are returned.
Examples:
>>> rows = [
... {'domain': 'oracle', 'type': 'soft', 'item': 'nofile', 'value': 1024},
... {'domain': 'oracle', 'type': 'hard', 'item': 'nofile', 'value': 65536},
... {'domain': 'oracle', 'type': 'soft', 'item': 'stack', 'value': 10240},
... {'domain': 'oracle', 'type': 'hard', 'item': 'stack', 'value': 3276},
... {'domain': 'root', 'type': 'soft', 'item': 'nproc', 'value': -1}]
...
>>> keyword_search(rows, domain='root')
[{'domain': 'root', 'type': 'soft', 'item': 'nproc', 'value': -1}]
>>> keyword_search(rows, item__contains='c')
[{'domain': 'oracle', 'type': 'soft', 'item': 'stack', 'value': 10240},
{'domain': 'oracle', 'type': 'hard', 'item': 'stack', 'value': 3276},
{'domain': 'root', 'type': 'soft', 'item': 'nproc', 'value': -1}]
>>> keyword_search(rows, domain__startswith='r')
[{'domain': 'root', 'type': 'soft', 'item': 'nproc', 'value': -1}]
"""
results = []
if not kwargs:
return results
# Allows us to transform the key and do lookups like __contains and
# __startswith
matchers = {
'default': lambda s, v: s == v,
'contains': lambda s, v: v in s,
'startswith': lambda s, v: s.startswith(v),
'lower_value': lambda s, v: s.lower() == v.lower(),
}
def key_match(row, key, value):
# Translate ' ' and '-' of keys in dict to '_' to match keyword arguments.
my_row = {}
for my_key, val in row.items():
my_row[my_key.replace(' ', '_').replace('-', '_')] = val
matcher_fn = matchers['default']
if '__' in key:
key, matcher = key.split('__', 1)
if matcher not in matchers:
# put key back the way we found it, matcher fn unchanged
key = key + '__' + matcher
else:
matcher_fn = matchers[matcher]
return key in my_row and matcher_fn(my_row[key], value)
data = []
for row in rows:
if all(map(lambda kv: key_match(row, kv[0], kv[1]), kwargs.items())):
data.append(row)
return data | Takes a list of dictionaries and finds all the dictionaries where the
keys and values match those found in the keyword arguments.
Keys in the row data have ' ' and '-' replaced with '_', so they can
match the keyword argument parsing. For example, the keyword argument
'fix_up_path' will match a key named 'fix-up path'.
In addition, several suffixes can be added to the key name to do partial
matching of values:
* '__contains' will test whether the data value contains the given
value.
* '__startswith' tests if the data value starts with the given value
* '__lower_value' compares the lower-case version of the data and given
values.
Arguments:
rows (list): A list of dictionaries representing the data to be
searched.
**kwargs (dict): keyword-value pairs corresponding to the fields that
need to be found and their required values in the data rows.
Returns:
(list): The list of rows that match the search keywords. If no
keyword arguments are given, no rows are returned.
Examples:
>>> rows = [
... {'domain': 'oracle', 'type': 'soft', 'item': 'nofile', 'value': 1024},
... {'domain': 'oracle', 'type': 'hard', 'item': 'nofile', 'value': 65536},
... {'domain': 'oracle', 'type': 'soft', 'item': 'stack', 'value': 10240},
... {'domain': 'oracle', 'type': 'hard', 'item': 'stack', 'value': 3276},
... {'domain': 'root', 'type': 'soft', 'item': 'nproc', 'value': -1}]
...
>>> keyword_search(rows, domain='root')
[{'domain': 'root', 'type': 'soft', 'item': 'nproc', 'value': -1}]
>>> keyword_search(rows, item__contains='c')
[{'domain': 'oracle', 'type': 'soft', 'item': 'stack', 'value': 10240},
{'domain': 'oracle', 'type': 'hard', 'item': 'stack', 'value': 3276},
{'domain': 'root', 'type': 'soft', 'item': 'nproc', 'value': -1}]
>>> keyword_search(rows, domain__startswith='r')
[{'domain': 'root', 'type': 'soft', 'item': 'nproc', 'value': -1}] | Below is the the instruction that describes the task:
### Input:
Takes a list of dictionaries and finds all the dictionaries where the
keys and values match those found in the keyword arguments.
Keys in the row data have ' ' and '-' replaced with '_', so they can
match the keyword argument parsing. For example, the keyword argument
'fix_up_path' will match a key named 'fix-up path'.
In addition, several suffixes can be added to the key name to do partial
matching of values:
* '__contains' will test whether the data value contains the given
value.
* '__startswith' tests if the data value starts with the given value
* '__lower_value' compares the lower-case version of the data and given
values.
Arguments:
rows (list): A list of dictionaries representing the data to be
searched.
**kwargs (dict): keyword-value pairs corresponding to the fields that
need to be found and their required values in the data rows.
Returns:
(list): The list of rows that match the search keywords. If no
keyword arguments are given, no rows are returned.
Examples:
>>> rows = [
... {'domain': 'oracle', 'type': 'soft', 'item': 'nofile', 'value': 1024},
... {'domain': 'oracle', 'type': 'hard', 'item': 'nofile', 'value': 65536},
... {'domain': 'oracle', 'type': 'soft', 'item': 'stack', 'value': 10240},
... {'domain': 'oracle', 'type': 'hard', 'item': 'stack', 'value': 3276},
... {'domain': 'root', 'type': 'soft', 'item': 'nproc', 'value': -1}]
...
>>> keyword_search(rows, domain='root')
[{'domain': 'root', 'type': 'soft', 'item': 'nproc', 'value': -1}]
>>> keyword_search(rows, item__contains='c')
[{'domain': 'oracle', 'type': 'soft', 'item': 'stack', 'value': 10240},
{'domain': 'oracle', 'type': 'hard', 'item': 'stack', 'value': 3276},
{'domain': 'root', 'type': 'soft', 'item': 'nproc', 'value': -1}]
>>> keyword_search(rows, domain__startswith='r')
[{'domain': 'root', 'type': 'soft', 'item': 'nproc', 'value': -1}]
### Response:
def keyword_search(rows, **kwargs):
"""
Takes a list of dictionaries and finds all the dictionaries where the
keys and values match those found in the keyword arguments.
Keys in the row data have ' ' and '-' replaced with '_', so they can
match the keyword argument parsing. For example, the keyword argument
'fix_up_path' will match a key named 'fix-up path'.
In addition, several suffixes can be added to the key name to do partial
matching of values:
* '__contains' will test whether the data value contains the given
value.
* '__startswith' tests if the data value starts with the given value
* '__lower_value' compares the lower-case version of the data and given
values.
Arguments:
rows (list): A list of dictionaries representing the data to be
searched.
**kwargs (dict): keyword-value pairs corresponding to the fields that
need to be found and their required values in the data rows.
Returns:
(list): The list of rows that match the search keywords. If no
keyword arguments are given, no rows are returned.
Examples:
>>> rows = [
... {'domain': 'oracle', 'type': 'soft', 'item': 'nofile', 'value': 1024},
... {'domain': 'oracle', 'type': 'hard', 'item': 'nofile', 'value': 65536},
... {'domain': 'oracle', 'type': 'soft', 'item': 'stack', 'value': 10240},
... {'domain': 'oracle', 'type': 'hard', 'item': 'stack', 'value': 3276},
... {'domain': 'root', 'type': 'soft', 'item': 'nproc', 'value': -1}]
...
>>> keyword_search(rows, domain='root')
[{'domain': 'root', 'type': 'soft', 'item': 'nproc', 'value': -1}]
>>> keyword_search(rows, item__contains='c')
[{'domain': 'oracle', 'type': 'soft', 'item': 'stack', 'value': 10240},
{'domain': 'oracle', 'type': 'hard', 'item': 'stack', 'value': 3276},
{'domain': 'root', 'type': 'soft', 'item': 'nproc', 'value': -1}]
>>> keyword_search(rows, domain__startswith='r')
[{'domain': 'root', 'type': 'soft', 'item': 'nproc', 'value': -1}]
"""
results = []
if not kwargs:
return results
# Allows us to transform the key and do lookups like __contains and
# __startswith
matchers = {
'default': lambda s, v: s == v,
'contains': lambda s, v: v in s,
'startswith': lambda s, v: s.startswith(v),
'lower_value': lambda s, v: s.lower() == v.lower(),
}
def key_match(row, key, value):
# Translate ' ' and '-' of keys in dict to '_' to match keyword arguments.
my_row = {}
for my_key, val in row.items():
my_row[my_key.replace(' ', '_').replace('-', '_')] = val
matcher_fn = matchers['default']
if '__' in key:
key, matcher = key.split('__', 1)
if matcher not in matchers:
# put key back the way we found it, matcher fn unchanged
key = key + '__' + matcher
else:
matcher_fn = matchers[matcher]
return key in my_row and matcher_fn(my_row[key], value)
data = []
for row in rows:
if all(map(lambda kv: key_match(row, kv[0], kv[1]), kwargs.items())):
data.append(row)
return data |
def require(self, name):
"""Return the value of the requested parameter or raise an error."""
value = self.get(name)
if value is None:
raise TypeError(
"{0} requires the parameter '{1}'.".format(
self.__class__, name
)
)
return value | Return the value of the requested parameter or raise an error. | Below is the the instruction that describes the task:
### Input:
Return the value of the requested parameter or raise an error.
### Response:
def require(self, name):
"""Return the value of the requested parameter or raise an error."""
value = self.get(name)
if value is None:
raise TypeError(
"{0} requires the parameter '{1}'.".format(
self.__class__, name
)
)
return value |
def get_form(self, request, obj=None, **kwargs):
"""Returns modified form for TreeItem model.
'Parent' field choices are built by sitetree itself.
"""
if obj is not None and obj.parent is not None:
self.previous_parent = obj.parent
previous_parent_id = self.previous_parent.id
else:
previous_parent_id = None
my_choice_field = TreeItemChoiceField(self.tree, initial=previous_parent_id)
form = super(TreeItemAdmin, self).get_form(request, obj, **kwargs)
my_choice_field.label = form.base_fields['parent'].label
my_choice_field.help_text = form.base_fields['parent'].help_text
my_choice_field.widget = form.base_fields['parent'].widget
# Replace 'parent' TreeItem field with new appropriate one
form.base_fields['parent'] = my_choice_field
# Try to resolve all currently registered url names including those in namespaces.
if not getattr(self, 'known_url_names', False):
self.known_url_names = []
self.known_url_rules = []
resolver = get_resolver(get_urlconf())
for ns, (url_prefix, ns_resolver) in resolver.namespace_dict.items():
if ns != 'admin':
self._stack_known_urls(ns_resolver.reverse_dict, ns)
self._stack_known_urls(resolver.reverse_dict)
self.known_url_rules = sorted(self.known_url_rules)
form.known_url_names_hint = _(
'You are seeing this warning because "URL as Pattern" option is active and pattern entered above '
'seems to be invalid. Currently registered URL pattern names and parameters: ')
form.known_url_names = self.known_url_names
form.known_url_rules = self.known_url_rules
return form | Returns modified form for TreeItem model.
'Parent' field choices are built by sitetree itself. | Below is the the instruction that describes the task:
### Input:
Returns modified form for TreeItem model.
'Parent' field choices are built by sitetree itself.
### Response:
def get_form(self, request, obj=None, **kwargs):
"""Returns modified form for TreeItem model.
'Parent' field choices are built by sitetree itself.
"""
if obj is not None and obj.parent is not None:
self.previous_parent = obj.parent
previous_parent_id = self.previous_parent.id
else:
previous_parent_id = None
my_choice_field = TreeItemChoiceField(self.tree, initial=previous_parent_id)
form = super(TreeItemAdmin, self).get_form(request, obj, **kwargs)
my_choice_field.label = form.base_fields['parent'].label
my_choice_field.help_text = form.base_fields['parent'].help_text
my_choice_field.widget = form.base_fields['parent'].widget
# Replace 'parent' TreeItem field with new appropriate one
form.base_fields['parent'] = my_choice_field
# Try to resolve all currently registered url names including those in namespaces.
if not getattr(self, 'known_url_names', False):
self.known_url_names = []
self.known_url_rules = []
resolver = get_resolver(get_urlconf())
for ns, (url_prefix, ns_resolver) in resolver.namespace_dict.items():
if ns != 'admin':
self._stack_known_urls(ns_resolver.reverse_dict, ns)
self._stack_known_urls(resolver.reverse_dict)
self.known_url_rules = sorted(self.known_url_rules)
form.known_url_names_hint = _(
'You are seeing this warning because "URL as Pattern" option is active and pattern entered above '
'seems to be invalid. Currently registered URL pattern names and parameters: ')
form.known_url_names = self.known_url_names
form.known_url_rules = self.known_url_rules
return form |
def from_shape_pixel_scale_and_sub_grid_size(cls, shape, pixel_scale, sub_grid_size=2):
"""Setup a grid-stack of grid_stack from a 2D array shape, a pixel scale and a sub-grid size.
This grid corresponds to a fully unmasked 2D array.
Parameters
-----------
shape : (int, int)
The 2D shape of the array, where all pixels are used to generate the grid-stack's grid_stack.
pixel_scale : float
The size of each pixel in arc seconds.
sub_grid_size : int
The size of a sub-pixel's sub-grid (sub_grid_size x sub_grid_size).
"""
regular_grid = RegularGrid.from_shape_and_pixel_scale(shape=shape, pixel_scale=pixel_scale)
sub_grid = SubGrid.from_shape_pixel_scale_and_sub_grid_size(shape=shape, pixel_scale=pixel_scale,
sub_grid_size=sub_grid_size)
blurring_grid = np.array([[0.0, 0.0]])
return GridStack(regular_grid, sub_grid, blurring_grid) | Setup a grid-stack of grid_stack from a 2D array shape, a pixel scale and a sub-grid size.
This grid corresponds to a fully unmasked 2D array.
Parameters
-----------
shape : (int, int)
The 2D shape of the array, where all pixels are used to generate the grid-stack's grid_stack.
pixel_scale : float
The size of each pixel in arc seconds.
sub_grid_size : int
The size of a sub-pixel's sub-grid (sub_grid_size x sub_grid_size). | Below is the the instruction that describes the task:
### Input:
Setup a grid-stack of grid_stack from a 2D array shape, a pixel scale and a sub-grid size.
This grid corresponds to a fully unmasked 2D array.
Parameters
-----------
shape : (int, int)
The 2D shape of the array, where all pixels are used to generate the grid-stack's grid_stack.
pixel_scale : float
The size of each pixel in arc seconds.
sub_grid_size : int
The size of a sub-pixel's sub-grid (sub_grid_size x sub_grid_size).
### Response:
def from_shape_pixel_scale_and_sub_grid_size(cls, shape, pixel_scale, sub_grid_size=2):
"""Setup a grid-stack of grid_stack from a 2D array shape, a pixel scale and a sub-grid size.
This grid corresponds to a fully unmasked 2D array.
Parameters
-----------
shape : (int, int)
The 2D shape of the array, where all pixels are used to generate the grid-stack's grid_stack.
pixel_scale : float
The size of each pixel in arc seconds.
sub_grid_size : int
The size of a sub-pixel's sub-grid (sub_grid_size x sub_grid_size).
"""
regular_grid = RegularGrid.from_shape_and_pixel_scale(shape=shape, pixel_scale=pixel_scale)
sub_grid = SubGrid.from_shape_pixel_scale_and_sub_grid_size(shape=shape, pixel_scale=pixel_scale,
sub_grid_size=sub_grid_size)
blurring_grid = np.array([[0.0, 0.0]])
return GridStack(regular_grid, sub_grid, blurring_grid) |
def script_finished(self):
"""
waits for the script to emit the script_finshed signal
"""
script = self.current_script
script.updateProgress.disconnect(self.update_status)
self.script_thread.started.disconnect()
script.finished.disconnect()
self.current_script = None
self.plot_script(script)
self.progressBar.setValue(100)
self.btn_start_script.setEnabled(True)
self.btn_skip_subscript.setEnabled(False) | waits for the script to emit the script_finshed signal | Below is the the instruction that describes the task:
### Input:
waits for the script to emit the script_finshed signal
### Response:
def script_finished(self):
"""
waits for the script to emit the script_finshed signal
"""
script = self.current_script
script.updateProgress.disconnect(self.update_status)
self.script_thread.started.disconnect()
script.finished.disconnect()
self.current_script = None
self.plot_script(script)
self.progressBar.setValue(100)
self.btn_start_script.setEnabled(True)
self.btn_skip_subscript.setEnabled(False) |
def delete_gemini_query(self, query_id):
"""Delete a gemini query
Args:
name (str)
"""
query_obj = self.gemini_query(query_id)
logger.debug("Delete query: {0}".format(query_obj.name_query))
self.session.delete(query_obj)
self.save() | Delete a gemini query
Args:
name (str) | Below is the the instruction that describes the task:
### Input:
Delete a gemini query
Args:
name (str)
### Response:
def delete_gemini_query(self, query_id):
"""Delete a gemini query
Args:
name (str)
"""
query_obj = self.gemini_query(query_id)
logger.debug("Delete query: {0}".format(query_obj.name_query))
self.session.delete(query_obj)
self.save() |
def get_related_clients(self, client):
"""
Get all other clients that are connected to the same kernel as `client`
"""
related_clients = []
for cl in self.get_clients():
if cl.connection_file == client.connection_file and \
cl is not client:
related_clients.append(cl)
return related_clients | Get all other clients that are connected to the same kernel as `client` | Below is the the instruction that describes the task:
### Input:
Get all other clients that are connected to the same kernel as `client`
### Response:
def get_related_clients(self, client):
"""
Get all other clients that are connected to the same kernel as `client`
"""
related_clients = []
for cl in self.get_clients():
if cl.connection_file == client.connection_file and \
cl is not client:
related_clients.append(cl)
return related_clients |
def addSection(self, data, name =".pype32\x00", flags = 0x60000000):
"""
Adds a new section to the existing L{PE} instance.
@type data: str
@param data: The data to be added in the new section.
@type name: str
@param name: (Optional) The name for the new section.
@type flags: int
@param flags: (Optional) The attributes for the new section.
"""
fa = self.ntHeaders.optionalHeader.fileAlignment.value
sa = self.ntHeaders.optionalHeader.sectionAlignment.value
padding = "\xcc" * (fa - len(data))
sh = SectionHeader()
if len(self.sectionHeaders):
# get the va, vz, ra and rz of the last section in the array of section headers
vaLastSection = self.sectionHeaders[-1].virtualAddress.value
sizeLastSection = self.sectionHeaders[-1].misc.value
pointerToRawDataLastSection = self.sectionHeaders[-1].pointerToRawData.value
sizeOfRawDataLastSection = self.sectionHeaders[-1].sizeOfRawData.value
sh.virtualAddress.value = self._adjustSectionAlignment(vaLastSection + sizeLastSection, fa, sa)
sh.pointerToRawData.value = self._adjustFileAlignment(pointerToRawDataLastSection + sizeOfRawDataLastSection, fa)
sh.misc.value = self._adjustSectionAlignment(len(data), fa, sa) or consts.DEFAULT_PAGE_SIZE
sh.sizeOfRawData.value = self._adjustFileAlignment(len(data), fa) or consts.DEFAULT_FILE_ALIGNMENT
sh.characteristics.value = flags
sh.name.value = name
self.sectionHeaders.append(sh)
self.sections.append(data + padding)
self.ntHeaders.fileHeader.numberOfSections.value += 1 | Adds a new section to the existing L{PE} instance.
@type data: str
@param data: The data to be added in the new section.
@type name: str
@param name: (Optional) The name for the new section.
@type flags: int
@param flags: (Optional) The attributes for the new section. | Below is the the instruction that describes the task:
### Input:
Adds a new section to the existing L{PE} instance.
@type data: str
@param data: The data to be added in the new section.
@type name: str
@param name: (Optional) The name for the new section.
@type flags: int
@param flags: (Optional) The attributes for the new section.
### Response:
def addSection(self, data, name =".pype32\x00", flags = 0x60000000):
"""
Adds a new section to the existing L{PE} instance.
@type data: str
@param data: The data to be added in the new section.
@type name: str
@param name: (Optional) The name for the new section.
@type flags: int
@param flags: (Optional) The attributes for the new section.
"""
fa = self.ntHeaders.optionalHeader.fileAlignment.value
sa = self.ntHeaders.optionalHeader.sectionAlignment.value
padding = "\xcc" * (fa - len(data))
sh = SectionHeader()
if len(self.sectionHeaders):
# get the va, vz, ra and rz of the last section in the array of section headers
vaLastSection = self.sectionHeaders[-1].virtualAddress.value
sizeLastSection = self.sectionHeaders[-1].misc.value
pointerToRawDataLastSection = self.sectionHeaders[-1].pointerToRawData.value
sizeOfRawDataLastSection = self.sectionHeaders[-1].sizeOfRawData.value
sh.virtualAddress.value = self._adjustSectionAlignment(vaLastSection + sizeLastSection, fa, sa)
sh.pointerToRawData.value = self._adjustFileAlignment(pointerToRawDataLastSection + sizeOfRawDataLastSection, fa)
sh.misc.value = self._adjustSectionAlignment(len(data), fa, sa) or consts.DEFAULT_PAGE_SIZE
sh.sizeOfRawData.value = self._adjustFileAlignment(len(data), fa) or consts.DEFAULT_FILE_ALIGNMENT
sh.characteristics.value = flags
sh.name.value = name
self.sectionHeaders.append(sh)
self.sections.append(data + padding)
self.ntHeaders.fileHeader.numberOfSections.value += 1 |
def _get_section(name, source):
# type: (str, str) -> Optional[str]
"""Extract the named section from the source.
Args:
name: The name of the section to extract (e.g. "Usage").
source: The usage string to parse.
Returns:
A string containing only the requested section. If the section appears
multiple times, each instance will be merged into a single section.
"""
pattern = re.compile(
'^([^\n]*{name}[^\n]*\n?(?:[ \t].*?(?:\n|$))*)'.format(name=name),
re.IGNORECASE | re.MULTILINE)
usage = None
for section in pattern.findall(source):
usage = _merge_section(usage, section.strip())
return usage | Extract the named section from the source.
Args:
name: The name of the section to extract (e.g. "Usage").
source: The usage string to parse.
Returns:
A string containing only the requested section. If the section appears
multiple times, each instance will be merged into a single section. | Below is the the instruction that describes the task:
### Input:
Extract the named section from the source.
Args:
name: The name of the section to extract (e.g. "Usage").
source: The usage string to parse.
Returns:
A string containing only the requested section. If the section appears
multiple times, each instance will be merged into a single section.
### Response:
def _get_section(name, source):
# type: (str, str) -> Optional[str]
"""Extract the named section from the source.
Args:
name: The name of the section to extract (e.g. "Usage").
source: The usage string to parse.
Returns:
A string containing only the requested section. If the section appears
multiple times, each instance will be merged into a single section.
"""
pattern = re.compile(
'^([^\n]*{name}[^\n]*\n?(?:[ \t].*?(?:\n|$))*)'.format(name=name),
re.IGNORECASE | re.MULTILINE)
usage = None
for section in pattern.findall(source):
usage = _merge_section(usage, section.strip())
return usage |
def _set_aws_environment(task: Task = None):
"""Sets up AWS environment from NCLUSTER environment variables"""
current_zone = os.environ.get('NCLUSTER_ZONE', '')
current_region = os.environ.get('AWS_DEFAULT_REGION', '')
def log(*args):
if task:
task.log(*args)
else:
util.log(*args)
if current_region and current_zone:
assert current_zone.startswith(
current_region), f'Current zone "{current_zone}" ($NCLUSTER_ZONE) is not ' \
f'in current region "{current_region} ($AWS_DEFAULT_REGION)'
assert u.get_session().region_name == current_region # setting from ~/.aws
# zone is set, set region from zone
if current_zone and not current_region:
current_region = current_zone[:-1]
os.environ['AWS_DEFAULT_REGION'] = current_region
# neither zone nor region not set, use default setting for region
# if default is not set, use NCLUSTER_DEFAULT_REGION
if not current_region:
current_region = u.get_session().region_name
if not current_region:
log(f"No default region available, using {NCLUSTER_DEFAULT_REGION}")
current_region = NCLUSTER_DEFAULT_REGION
os.environ['AWS_DEFAULT_REGION'] = current_region
# zone not set, use first zone of the region
# if not current_zone:
# current_zone = current_region + 'a'
# os.environ['NCLUSTER_ZONE'] = current_zone
log(f"Using account {u.get_account_number()}, region {current_region}, "
f"zone {current_zone}") | Sets up AWS environment from NCLUSTER environment variables | Below is the the instruction that describes the task:
### Input:
Sets up AWS environment from NCLUSTER environment variables
### Response:
def _set_aws_environment(task: Task = None):
"""Sets up AWS environment from NCLUSTER environment variables"""
current_zone = os.environ.get('NCLUSTER_ZONE', '')
current_region = os.environ.get('AWS_DEFAULT_REGION', '')
def log(*args):
if task:
task.log(*args)
else:
util.log(*args)
if current_region and current_zone:
assert current_zone.startswith(
current_region), f'Current zone "{current_zone}" ($NCLUSTER_ZONE) is not ' \
f'in current region "{current_region} ($AWS_DEFAULT_REGION)'
assert u.get_session().region_name == current_region # setting from ~/.aws
# zone is set, set region from zone
if current_zone and not current_region:
current_region = current_zone[:-1]
os.environ['AWS_DEFAULT_REGION'] = current_region
# neither zone nor region not set, use default setting for region
# if default is not set, use NCLUSTER_DEFAULT_REGION
if not current_region:
current_region = u.get_session().region_name
if not current_region:
log(f"No default region available, using {NCLUSTER_DEFAULT_REGION}")
current_region = NCLUSTER_DEFAULT_REGION
os.environ['AWS_DEFAULT_REGION'] = current_region
# zone not set, use first zone of the region
# if not current_zone:
# current_zone = current_region + 'a'
# os.environ['NCLUSTER_ZONE'] = current_zone
log(f"Using account {u.get_account_number()}, region {current_region}, "
f"zone {current_zone}") |
def finalize_concurrency_state(self, outcome):
""" Utility function to finalize the forward execution of the concurrency state.
:param outcome:
:return:
"""
final_outcome = outcome
self.write_output_data()
self.check_output_data_type()
self.execution_history.push_return_history_item(self, CallType.CONTAINER, self, self.output_data)
self.state_execution_status = StateExecutionStatus.WAIT_FOR_NEXT_STATE
singleton.state_machine_execution_engine._modify_run_to_states(self)
if self.preempted:
final_outcome = Outcome(-2, "preempted")
return self.finalize(final_outcome) | Utility function to finalize the forward execution of the concurrency state.
:param outcome:
:return: | Below is the the instruction that describes the task:
### Input:
Utility function to finalize the forward execution of the concurrency state.
:param outcome:
:return:
### Response:
def finalize_concurrency_state(self, outcome):
""" Utility function to finalize the forward execution of the concurrency state.
:param outcome:
:return:
"""
final_outcome = outcome
self.write_output_data()
self.check_output_data_type()
self.execution_history.push_return_history_item(self, CallType.CONTAINER, self, self.output_data)
self.state_execution_status = StateExecutionStatus.WAIT_FOR_NEXT_STATE
singleton.state_machine_execution_engine._modify_run_to_states(self)
if self.preempted:
final_outcome = Outcome(-2, "preempted")
return self.finalize(final_outcome) |
def log10(self, floor=None):
"""Take the log base 10 of the entire dataset.
Parameters
----------
floor : number (optional)
Clip values below floor after log. Default is None.
"""
def f(dataset, s, floor):
arr = dataset[s]
arr = np.log10(arr)
if floor is not None:
arr[arr < floor] = floor
dataset[s] = arr
self.chunkwise(f, floor=floor) | Take the log base 10 of the entire dataset.
Parameters
----------
floor : number (optional)
Clip values below floor after log. Default is None. | Below is the the instruction that describes the task:
### Input:
Take the log base 10 of the entire dataset.
Parameters
----------
floor : number (optional)
Clip values below floor after log. Default is None.
### Response:
def log10(self, floor=None):
"""Take the log base 10 of the entire dataset.
Parameters
----------
floor : number (optional)
Clip values below floor after log. Default is None.
"""
def f(dataset, s, floor):
arr = dataset[s]
arr = np.log10(arr)
if floor is not None:
arr[arr < floor] = floor
dataset[s] = arr
self.chunkwise(f, floor=floor) |
def add_args():
"""Adds commandline arguments and formatted Help"""
parser = argparse.ArgumentParser()
parser.add_argument('-host', action='store', dest='host', default='127.0.0.1', help='DEFAULT "127.0.0.1"')
parser.add_argument('-port', action='store', dest='port', default='2947', help='DEFAULT 2947', type=int)
parser.add_argument('-json', dest='gpsd_protocol', const='json', action='store_const', default='json', help='DEFAULT JSON objects */')
parser.add_argument('-device', dest='devicepath', action='store', help='alternate devicepath e.g.,"-device /dev/ttyUSB4"')
# Infrequently used options
parser.add_argument('-nmea', dest='gpsd_protocol', const='nmea', action='store_const', help='*/ output in NMEA */')
# parser.add_argument('-rare', dest='gpsd_protocol', const='rare', action='store_const', help='*/ output of packets in hex */')
# parser.add_argument('-raw', dest='gpsd_protocol', const='raw', action='store_const', help='*/ output of raw packets */')
# parser.add_argument('-scaled', dest='gpsd_protocol', const='scaled', action='store_const', help='*/ scale output to floats */')
# parser.add_argument('-timing', dest='gpsd_protocol', const='timing', action='store_const', help='*/ timing information */')
# parser.add_argument('-split24', dest='gpsd_protocol', const='split24', action='store_const', help='*/ split AIS Type 24s */')
# parser.add_argument('-pps', dest='gpsd_protocol', const='pps', action='store_const', help='*/ enable PPS JSON */')
parser.add_argument('-v', '--version', action='version', version='Version: {}'.format(__version__))
cli_args = parser.parse_args()
return cli_args | Adds commandline arguments and formatted Help | Below is the the instruction that describes the task:
### Input:
Adds commandline arguments and formatted Help
### Response:
def add_args():
"""Adds commandline arguments and formatted Help"""
parser = argparse.ArgumentParser()
parser.add_argument('-host', action='store', dest='host', default='127.0.0.1', help='DEFAULT "127.0.0.1"')
parser.add_argument('-port', action='store', dest='port', default='2947', help='DEFAULT 2947', type=int)
parser.add_argument('-json', dest='gpsd_protocol', const='json', action='store_const', default='json', help='DEFAULT JSON objects */')
parser.add_argument('-device', dest='devicepath', action='store', help='alternate devicepath e.g.,"-device /dev/ttyUSB4"')
# Infrequently used options
parser.add_argument('-nmea', dest='gpsd_protocol', const='nmea', action='store_const', help='*/ output in NMEA */')
# parser.add_argument('-rare', dest='gpsd_protocol', const='rare', action='store_const', help='*/ output of packets in hex */')
# parser.add_argument('-raw', dest='gpsd_protocol', const='raw', action='store_const', help='*/ output of raw packets */')
# parser.add_argument('-scaled', dest='gpsd_protocol', const='scaled', action='store_const', help='*/ scale output to floats */')
# parser.add_argument('-timing', dest='gpsd_protocol', const='timing', action='store_const', help='*/ timing information */')
# parser.add_argument('-split24', dest='gpsd_protocol', const='split24', action='store_const', help='*/ split AIS Type 24s */')
# parser.add_argument('-pps', dest='gpsd_protocol', const='pps', action='store_const', help='*/ enable PPS JSON */')
parser.add_argument('-v', '--version', action='version', version='Version: {}'.format(__version__))
cli_args = parser.parse_args()
return cli_args |
def get_vulnerability_functions_04(fname):
"""
Parse the vulnerability model in NRML 0.4 format.
:param fname:
path of the vulnerability file
:returns:
a dictionary imt, taxonomy -> vulnerability function + vset
"""
categories = dict(assetCategory=set(), lossCategory=set(),
vulnerabilitySetID=set())
imts = set()
taxonomies = set()
vf_dict = {} # imt, taxonomy -> vulnerability function
for vset in nrml.read(fname).vulnerabilityModel:
categories['assetCategory'].add(vset['assetCategory'])
categories['lossCategory'].add(vset['lossCategory'])
categories['vulnerabilitySetID'].add(vset['vulnerabilitySetID'])
IML = vset.IML
imt_str = IML['IMT']
imls = ~IML
imts.add(imt_str)
for vfun in vset.getnodes('discreteVulnerability'):
taxonomy = vfun['vulnerabilityFunctionID']
if taxonomy in taxonomies:
raise InvalidFile(
'Duplicated vulnerabilityFunctionID: %s: %s, line %d' %
(taxonomy, fname, vfun.lineno))
taxonomies.add(taxonomy)
with context(fname, vfun):
loss_ratios = ~vfun.lossRatio
coefficients = ~vfun.coefficientsVariation
if len(loss_ratios) != len(imls):
raise InvalidFile(
'There are %d loss ratios, but %d imls: %s, line %d' %
(len(loss_ratios), len(imls), fname,
vfun.lossRatio.lineno))
if len(coefficients) != len(imls):
raise InvalidFile(
'There are %d coefficients, but %d imls: %s, line %d' %
(len(coefficients), len(imls), fname,
vfun.coefficientsVariation.lineno))
with context(fname, vfun):
vf_dict[imt_str, taxonomy] = scientific.VulnerabilityFunction(
taxonomy, imt_str, imls, loss_ratios, coefficients,
vfun['probabilisticDistribution'])
categories['id'] = '_'.join(sorted(categories['vulnerabilitySetID']))
del categories['vulnerabilitySetID']
return vf_dict, categories | Parse the vulnerability model in NRML 0.4 format.
:param fname:
path of the vulnerability file
:returns:
a dictionary imt, taxonomy -> vulnerability function + vset | Below is the the instruction that describes the task:
### Input:
Parse the vulnerability model in NRML 0.4 format.
:param fname:
path of the vulnerability file
:returns:
a dictionary imt, taxonomy -> vulnerability function + vset
### Response:
def get_vulnerability_functions_04(fname):
"""
Parse the vulnerability model in NRML 0.4 format.
:param fname:
path of the vulnerability file
:returns:
a dictionary imt, taxonomy -> vulnerability function + vset
"""
categories = dict(assetCategory=set(), lossCategory=set(),
vulnerabilitySetID=set())
imts = set()
taxonomies = set()
vf_dict = {} # imt, taxonomy -> vulnerability function
for vset in nrml.read(fname).vulnerabilityModel:
categories['assetCategory'].add(vset['assetCategory'])
categories['lossCategory'].add(vset['lossCategory'])
categories['vulnerabilitySetID'].add(vset['vulnerabilitySetID'])
IML = vset.IML
imt_str = IML['IMT']
imls = ~IML
imts.add(imt_str)
for vfun in vset.getnodes('discreteVulnerability'):
taxonomy = vfun['vulnerabilityFunctionID']
if taxonomy in taxonomies:
raise InvalidFile(
'Duplicated vulnerabilityFunctionID: %s: %s, line %d' %
(taxonomy, fname, vfun.lineno))
taxonomies.add(taxonomy)
with context(fname, vfun):
loss_ratios = ~vfun.lossRatio
coefficients = ~vfun.coefficientsVariation
if len(loss_ratios) != len(imls):
raise InvalidFile(
'There are %d loss ratios, but %d imls: %s, line %d' %
(len(loss_ratios), len(imls), fname,
vfun.lossRatio.lineno))
if len(coefficients) != len(imls):
raise InvalidFile(
'There are %d coefficients, but %d imls: %s, line %d' %
(len(coefficients), len(imls), fname,
vfun.coefficientsVariation.lineno))
with context(fname, vfun):
vf_dict[imt_str, taxonomy] = scientific.VulnerabilityFunction(
taxonomy, imt_str, imls, loss_ratios, coefficients,
vfun['probabilisticDistribution'])
categories['id'] = '_'.join(sorted(categories['vulnerabilitySetID']))
del categories['vulnerabilitySetID']
return vf_dict, categories |
def handle_join(self, connection, event):
"""
Store join time for a nickname when it joins.
"""
nickname = self.get_nickname(event)
self.joined[nickname] = datetime.now() | Store join time for a nickname when it joins. | Below is the the instruction that describes the task:
### Input:
Store join time for a nickname when it joins.
### Response:
def handle_join(self, connection, event):
"""
Store join time for a nickname when it joins.
"""
nickname = self.get_nickname(event)
self.joined[nickname] = datetime.now() |
def get_multilevel_rpn_anchor_input(im, boxes, is_crowd):
"""
Args:
im: an image
boxes: nx4, floatbox, gt. shoudn't be changed
is_crowd: n,
Returns:
[(fm_labels, fm_boxes)]: Returns a tuple for each FPN level.
Each tuple contains the anchor labels and target boxes for each pixel in the featuremap.
fm_labels: fHxfWx NUM_ANCHOR_RATIOS
fm_boxes: fHxfWx NUM_ANCHOR_RATIOS x4
"""
boxes = boxes.copy()
anchors_per_level = get_all_anchors_fpn()
flatten_anchors_per_level = [k.reshape((-1, 4)) for k in anchors_per_level]
all_anchors_flatten = np.concatenate(flatten_anchors_per_level, axis=0)
inside_ind, inside_anchors = filter_boxes_inside_shape(all_anchors_flatten, im.shape[:2])
anchor_labels, anchor_gt_boxes = get_anchor_labels(inside_anchors, boxes[is_crowd == 0], boxes[is_crowd == 1])
# map back to all_anchors, then split to each level
num_all_anchors = all_anchors_flatten.shape[0]
all_labels = -np.ones((num_all_anchors, ), dtype='int32')
all_labels[inside_ind] = anchor_labels
all_boxes = np.zeros((num_all_anchors, 4), dtype='float32')
all_boxes[inside_ind] = anchor_gt_boxes
start = 0
multilevel_inputs = []
for level_anchor in anchors_per_level:
assert level_anchor.shape[2] == len(cfg.RPN.ANCHOR_RATIOS)
anchor_shape = level_anchor.shape[:3] # fHxfWxNUM_ANCHOR_RATIOS
num_anchor_this_level = np.prod(anchor_shape)
end = start + num_anchor_this_level
multilevel_inputs.append(
(all_labels[start: end].reshape(anchor_shape),
all_boxes[start: end, :].reshape(anchor_shape + (4,))
))
start = end
assert end == num_all_anchors, "{} != {}".format(end, num_all_anchors)
return multilevel_inputs | Args:
im: an image
boxes: nx4, floatbox, gt. shoudn't be changed
is_crowd: n,
Returns:
[(fm_labels, fm_boxes)]: Returns a tuple for each FPN level.
Each tuple contains the anchor labels and target boxes for each pixel in the featuremap.
fm_labels: fHxfWx NUM_ANCHOR_RATIOS
fm_boxes: fHxfWx NUM_ANCHOR_RATIOS x4 | Below is the the instruction that describes the task:
### Input:
Args:
im: an image
boxes: nx4, floatbox, gt. shoudn't be changed
is_crowd: n,
Returns:
[(fm_labels, fm_boxes)]: Returns a tuple for each FPN level.
Each tuple contains the anchor labels and target boxes for each pixel in the featuremap.
fm_labels: fHxfWx NUM_ANCHOR_RATIOS
fm_boxes: fHxfWx NUM_ANCHOR_RATIOS x4
### Response:
def get_multilevel_rpn_anchor_input(im, boxes, is_crowd):
"""
Args:
im: an image
boxes: nx4, floatbox, gt. shoudn't be changed
is_crowd: n,
Returns:
[(fm_labels, fm_boxes)]: Returns a tuple for each FPN level.
Each tuple contains the anchor labels and target boxes for each pixel in the featuremap.
fm_labels: fHxfWx NUM_ANCHOR_RATIOS
fm_boxes: fHxfWx NUM_ANCHOR_RATIOS x4
"""
boxes = boxes.copy()
anchors_per_level = get_all_anchors_fpn()
flatten_anchors_per_level = [k.reshape((-1, 4)) for k in anchors_per_level]
all_anchors_flatten = np.concatenate(flatten_anchors_per_level, axis=0)
inside_ind, inside_anchors = filter_boxes_inside_shape(all_anchors_flatten, im.shape[:2])
anchor_labels, anchor_gt_boxes = get_anchor_labels(inside_anchors, boxes[is_crowd == 0], boxes[is_crowd == 1])
# map back to all_anchors, then split to each level
num_all_anchors = all_anchors_flatten.shape[0]
all_labels = -np.ones((num_all_anchors, ), dtype='int32')
all_labels[inside_ind] = anchor_labels
all_boxes = np.zeros((num_all_anchors, 4), dtype='float32')
all_boxes[inside_ind] = anchor_gt_boxes
start = 0
multilevel_inputs = []
for level_anchor in anchors_per_level:
assert level_anchor.shape[2] == len(cfg.RPN.ANCHOR_RATIOS)
anchor_shape = level_anchor.shape[:3] # fHxfWxNUM_ANCHOR_RATIOS
num_anchor_this_level = np.prod(anchor_shape)
end = start + num_anchor_this_level
multilevel_inputs.append(
(all_labels[start: end].reshape(anchor_shape),
all_boxes[start: end, :].reshape(anchor_shape + (4,))
))
start = end
assert end == num_all_anchors, "{} != {}".format(end, num_all_anchors)
return multilevel_inputs |
def register(cache):
''' Registers a cache. '''
global caches
name = cache().name
if not caches.has_key(name):
caches[name] = cache | Registers a cache. | Below is the the instruction that describes the task:
### Input:
Registers a cache.
### Response:
def register(cache):
''' Registers a cache. '''
global caches
name = cache().name
if not caches.has_key(name):
caches[name] = cache |
def add_group_grant(self, permission, group_id):
"""
Convenience method that provides a quick way to add a canonical group
grant to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL and
then PUT's the new ACL back to GS.
:type permission: string
:param permission: The permission being granted. Should be one of:
READ|FULL_CONTROL
See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
for more details on permissions.
:type group_id: string
:param group_id: The canonical group id associated with the Google
Groups account you are granting the permission to.
"""
acl = self.get_acl()
acl.add_group_grant(permission, group_id)
self.set_acl(acl) | Convenience method that provides a quick way to add a canonical group
grant to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL and
then PUT's the new ACL back to GS.
:type permission: string
:param permission: The permission being granted. Should be one of:
READ|FULL_CONTROL
See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
for more details on permissions.
:type group_id: string
:param group_id: The canonical group id associated with the Google
Groups account you are granting the permission to. | Below is the the instruction that describes the task:
### Input:
Convenience method that provides a quick way to add a canonical group
grant to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL and
then PUT's the new ACL back to GS.
:type permission: string
:param permission: The permission being granted. Should be one of:
READ|FULL_CONTROL
See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
for more details on permissions.
:type group_id: string
:param group_id: The canonical group id associated with the Google
Groups account you are granting the permission to.
### Response:
def add_group_grant(self, permission, group_id):
"""
Convenience method that provides a quick way to add a canonical group
grant to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL and
then PUT's the new ACL back to GS.
:type permission: string
:param permission: The permission being granted. Should be one of:
READ|FULL_CONTROL
See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
for more details on permissions.
:type group_id: string
:param group_id: The canonical group id associated with the Google
Groups account you are granting the permission to.
"""
acl = self.get_acl()
acl.add_group_grant(permission, group_id)
self.set_acl(acl) |
def on_gtk_prefer_dark_theme_toggled(self, chk):
"""Set the `gtk_prefer_dark_theme' property in dconf
"""
self.settings.general.set_boolean('gtk-prefer-dark-theme', chk.get_active())
select_gtk_theme(self.settings) | Set the `gtk_prefer_dark_theme' property in dconf | Below is the the instruction that describes the task:
### Input:
Set the `gtk_prefer_dark_theme' property in dconf
### Response:
def on_gtk_prefer_dark_theme_toggled(self, chk):
"""Set the `gtk_prefer_dark_theme' property in dconf
"""
self.settings.general.set_boolean('gtk-prefer-dark-theme', chk.get_active())
select_gtk_theme(self.settings) |
def all_coplanar(triangles):
"""
Check to see if a list of triangles are all coplanar
Parameters
----------------
triangles: (n, 3, 3) float
Vertices of triangles
Returns
---------------
all_coplanar : bool
True if all triangles are coplanar
"""
triangles = np.asanyarray(triangles, dtype=np.float64)
if not util.is_shape(triangles, (-1, 3, 3)):
raise ValueError('Triangles must be (n,3,3)!')
test_normal = normals(triangles)[0]
test_vertex = triangles[0][0]
distances = point_plane_distance(points=triangles[1:].reshape((-1, 3)),
plane_normal=test_normal,
plane_origin=test_vertex)
all_coplanar = np.all(np.abs(distances) < tol.zero)
return all_coplanar | Check to see if a list of triangles are all coplanar
Parameters
----------------
triangles: (n, 3, 3) float
Vertices of triangles
Returns
---------------
all_coplanar : bool
True if all triangles are coplanar | Below is the the instruction that describes the task:
### Input:
Check to see if a list of triangles are all coplanar
Parameters
----------------
triangles: (n, 3, 3) float
Vertices of triangles
Returns
---------------
all_coplanar : bool
True if all triangles are coplanar
### Response:
def all_coplanar(triangles):
"""
Check to see if a list of triangles are all coplanar
Parameters
----------------
triangles: (n, 3, 3) float
Vertices of triangles
Returns
---------------
all_coplanar : bool
True if all triangles are coplanar
"""
triangles = np.asanyarray(triangles, dtype=np.float64)
if not util.is_shape(triangles, (-1, 3, 3)):
raise ValueError('Triangles must be (n,3,3)!')
test_normal = normals(triangles)[0]
test_vertex = triangles[0][0]
distances = point_plane_distance(points=triangles[1:].reshape((-1, 3)),
plane_normal=test_normal,
plane_origin=test_vertex)
all_coplanar = np.all(np.abs(distances) < tol.zero)
return all_coplanar |
def _get_crud_params(compiler, stmt, **kw):
""" extract values from crud parameters
taken from SQLAlchemy's crud module (since 1.0.x) and
adapted for Crate dialect"""
compiler.postfetch = []
compiler.insert_prefetch = []
compiler.update_prefetch = []
compiler.returning = []
# no parameters in the statement, no parameters in the
# compiled params - return binds for all columns
if compiler.column_keys is None and stmt.parameters is None:
return [(c, crud._create_bind_param(compiler, c, None,
required=True))
for c in stmt.table.columns]
if stmt._has_multi_parameters:
stmt_parameters = stmt.parameters[0]
else:
stmt_parameters = stmt.parameters
# getters - these are normally just column.key,
# but in the case of mysql multi-table update, the rules for
# .key must conditionally take tablename into account
if SA_VERSION >= SA_1_1:
_column_as_key, _getattr_col_key, _col_bind_name = \
crud._key_getters_for_crud_column(compiler, stmt)
else:
_column_as_key, _getattr_col_key, _col_bind_name = \
crud._key_getters_for_crud_column(compiler)
# if we have statement parameters - set defaults in the
# compiled params
if compiler.column_keys is None:
parameters = {}
else:
parameters = dict((_column_as_key(key), crud.REQUIRED)
for key in compiler.column_keys
if not stmt_parameters or
key not in stmt_parameters)
# create a list of column assignment clauses as tuples
values = []
if stmt_parameters is not None:
crud._get_stmt_parameters_params(
compiler,
parameters, stmt_parameters, _column_as_key, values, kw)
check_columns = {}
crud._scan_cols(compiler, stmt, parameters,
_getattr_col_key, _column_as_key,
_col_bind_name, check_columns, values, kw)
if stmt._has_multi_parameters:
values = crud._extend_values_for_multiparams(compiler, stmt,
values, kw)
return values | extract values from crud parameters
taken from SQLAlchemy's crud module (since 1.0.x) and
adapted for Crate dialect | Below is the the instruction that describes the task:
### Input:
extract values from crud parameters
taken from SQLAlchemy's crud module (since 1.0.x) and
adapted for Crate dialect
### Response:
def _get_crud_params(compiler, stmt, **kw):
""" extract values from crud parameters
taken from SQLAlchemy's crud module (since 1.0.x) and
adapted for Crate dialect"""
compiler.postfetch = []
compiler.insert_prefetch = []
compiler.update_prefetch = []
compiler.returning = []
# no parameters in the statement, no parameters in the
# compiled params - return binds for all columns
if compiler.column_keys is None and stmt.parameters is None:
return [(c, crud._create_bind_param(compiler, c, None,
required=True))
for c in stmt.table.columns]
if stmt._has_multi_parameters:
stmt_parameters = stmt.parameters[0]
else:
stmt_parameters = stmt.parameters
# getters - these are normally just column.key,
# but in the case of mysql multi-table update, the rules for
# .key must conditionally take tablename into account
if SA_VERSION >= SA_1_1:
_column_as_key, _getattr_col_key, _col_bind_name = \
crud._key_getters_for_crud_column(compiler, stmt)
else:
_column_as_key, _getattr_col_key, _col_bind_name = \
crud._key_getters_for_crud_column(compiler)
# if we have statement parameters - set defaults in the
# compiled params
if compiler.column_keys is None:
parameters = {}
else:
parameters = dict((_column_as_key(key), crud.REQUIRED)
for key in compiler.column_keys
if not stmt_parameters or
key not in stmt_parameters)
# create a list of column assignment clauses as tuples
values = []
if stmt_parameters is not None:
crud._get_stmt_parameters_params(
compiler,
parameters, stmt_parameters, _column_as_key, values, kw)
check_columns = {}
crud._scan_cols(compiler, stmt, parameters,
_getattr_col_key, _column_as_key,
_col_bind_name, check_columns, values, kw)
if stmt._has_multi_parameters:
values = crud._extend_values_for_multiparams(compiler, stmt,
values, kw)
return values |
def helioY(self,*args,**kwargs):
"""
NAME:
helioY
PURPOSE:
return Heliocentric Galactic rectangular y-coordinate (aka "Y")
INPUT:
t - (optional) time at which to get Y
obs=[X,Y,Z] - (optional) position and velocity of observer
(in kpc and km/s) (default=Object-wide default)
OR Orbit object that corresponds to the orbit
of the observer
Y is ignored and always assumed to be zero
ro= distance in kpc corresponding to R=1. (default=Object-wide default)
OUTPUT:
helioY(t) in kpc
HISTORY:
2011-02-24 - Written - Bovy (NYU)
"""
_check_roSet(self,kwargs,'helioY')
X, Y, Z= self._helioXYZ(*args,**kwargs)
return Y | NAME:
helioY
PURPOSE:
return Heliocentric Galactic rectangular y-coordinate (aka "Y")
INPUT:
t - (optional) time at which to get Y
obs=[X,Y,Z] - (optional) position and velocity of observer
(in kpc and km/s) (default=Object-wide default)
OR Orbit object that corresponds to the orbit
of the observer
Y is ignored and always assumed to be zero
ro= distance in kpc corresponding to R=1. (default=Object-wide default)
OUTPUT:
helioY(t) in kpc
HISTORY:
2011-02-24 - Written - Bovy (NYU) | Below is the the instruction that describes the task:
### Input:
NAME:
helioY
PURPOSE:
return Heliocentric Galactic rectangular y-coordinate (aka "Y")
INPUT:
t - (optional) time at which to get Y
obs=[X,Y,Z] - (optional) position and velocity of observer
(in kpc and km/s) (default=Object-wide default)
OR Orbit object that corresponds to the orbit
of the observer
Y is ignored and always assumed to be zero
ro= distance in kpc corresponding to R=1. (default=Object-wide default)
OUTPUT:
helioY(t) in kpc
HISTORY:
2011-02-24 - Written - Bovy (NYU)
### Response:
def helioY(self,*args,**kwargs):
"""
NAME:
helioY
PURPOSE:
return Heliocentric Galactic rectangular y-coordinate (aka "Y")
INPUT:
t - (optional) time at which to get Y
obs=[X,Y,Z] - (optional) position and velocity of observer
(in kpc and km/s) (default=Object-wide default)
OR Orbit object that corresponds to the orbit
of the observer
Y is ignored and always assumed to be zero
ro= distance in kpc corresponding to R=1. (default=Object-wide default)
OUTPUT:
helioY(t) in kpc
HISTORY:
2011-02-24 - Written - Bovy (NYU)
"""
_check_roSet(self,kwargs,'helioY')
X, Y, Z= self._helioXYZ(*args,**kwargs)
return Y |
def get_bugs_summaries(self, bugids):
"""
Get multiple bug objects' summaries only (faster).
param bugids: ``list`` of ``int``, bug numbers.
returns: deferred that when fired returns a list of ``AttrDict``s
representing these bugs.
"""
payload = {'ids': bugids, 'include_fields': ['id', 'summary']}
d = self.call('Bug.get', payload)
d.addCallback(self._parse_bugs_callback)
return d | Get multiple bug objects' summaries only (faster).
param bugids: ``list`` of ``int``, bug numbers.
returns: deferred that when fired returns a list of ``AttrDict``s
representing these bugs. | Below is the the instruction that describes the task:
### Input:
Get multiple bug objects' summaries only (faster).
param bugids: ``list`` of ``int``, bug numbers.
returns: deferred that when fired returns a list of ``AttrDict``s
representing these bugs.
### Response:
def get_bugs_summaries(self, bugids):
"""
Get multiple bug objects' summaries only (faster).
param bugids: ``list`` of ``int``, bug numbers.
returns: deferred that when fired returns a list of ``AttrDict``s
representing these bugs.
"""
payload = {'ids': bugids, 'include_fields': ['id', 'summary']}
d = self.call('Bug.get', payload)
d.addCallback(self._parse_bugs_callback)
return d |
def replace_rep_after(text: str) -> str:
"Replace repetitions at the character level in `text` after the repetition"
def _replace_rep(m):
c, cc = m.groups()
return f"{c}{TK_REP}{len(cc)+1}"
re_rep = re.compile(r"(\S)(\1{2,})")
return re_rep.sub(_replace_rep, text) | Replace repetitions at the character level in `text` after the repetition | Below is the the instruction that describes the task:
### Input:
Replace repetitions at the character level in `text` after the repetition
### Response:
def replace_rep_after(text: str) -> str:
"Replace repetitions at the character level in `text` after the repetition"
def _replace_rep(m):
c, cc = m.groups()
return f"{c}{TK_REP}{len(cc)+1}"
re_rep = re.compile(r"(\S)(\1{2,})")
return re_rep.sub(_replace_rep, text) |
def new(cls) -> 'Generator':
"""
Creates and returns random generator point that satisfy BLS algorithm requirements.
:return: BLS generator
"""
logger = logging.getLogger(__name__)
logger.debug("Generator::new: >>>")
c_instance = c_void_p()
do_call(cls.new_handler, byref(c_instance))
res = cls(c_instance)
logger.debug("Generator::new: <<< res: %r", res)
return res | Creates and returns random generator point that satisfy BLS algorithm requirements.
:return: BLS generator | Below is the the instruction that describes the task:
### Input:
Creates and returns random generator point that satisfy BLS algorithm requirements.
:return: BLS generator
### Response:
def new(cls) -> 'Generator':
"""
Creates and returns random generator point that satisfy BLS algorithm requirements.
:return: BLS generator
"""
logger = logging.getLogger(__name__)
logger.debug("Generator::new: >>>")
c_instance = c_void_p()
do_call(cls.new_handler, byref(c_instance))
res = cls(c_instance)
logger.debug("Generator::new: <<< res: %r", res)
return res |
def _udf_cell(args, cell_body):
"""Implements the Bigquery udf cell magic for ipython notebooks.
The supported syntax is:
%%bq udf --name <var> --language <lang>
// @param <name> <type>
// @returns <type>
// @import <gcs_path>
<js function>
Args:
args: the optional arguments following '%%bq udf'.
cell_body: the UDF declaration (inputs and outputs) and implementation in javascript.
"""
udf_name = args['name']
if not udf_name:
raise Exception('Declaration must be of the form %%bq udf --name <variable name>')
# Parse out parameters, return type, and imports
param_pattern = r'^\s*\/\/\s*@param\s+([<>\w]+)\s+([<>\w,\s]+)\s*$'
returns_pattern = r'^\s*\/\/\s*@returns\s+([<>\w,\s]+)\s*$'
import_pattern = r'^\s*\/\/\s*@import\s+(\S+)\s*$'
params = re.findall(param_pattern, cell_body, re.MULTILINE)
return_type = re.findall(returns_pattern, cell_body, re.MULTILINE)
imports = re.findall(import_pattern, cell_body, re.MULTILINE)
if len(return_type) < 1:
raise Exception('UDF return type must be defined using // @returns <type>')
if len(return_type) > 1:
raise Exception('Found more than one return type definition')
return_type = return_type[0]
# Finally build the UDF object
udf = bigquery.UDF(udf_name, cell_body, return_type, params, args['language'], imports)
google.datalab.utils.commands.notebook_environment()[udf_name] = udf | Implements the Bigquery udf cell magic for ipython notebooks.
The supported syntax is:
%%bq udf --name <var> --language <lang>
// @param <name> <type>
// @returns <type>
// @import <gcs_path>
<js function>
Args:
args: the optional arguments following '%%bq udf'.
cell_body: the UDF declaration (inputs and outputs) and implementation in javascript. | Below is the the instruction that describes the task:
### Input:
Implements the Bigquery udf cell magic for ipython notebooks.
The supported syntax is:
%%bq udf --name <var> --language <lang>
// @param <name> <type>
// @returns <type>
// @import <gcs_path>
<js function>
Args:
args: the optional arguments following '%%bq udf'.
cell_body: the UDF declaration (inputs and outputs) and implementation in javascript.
### Response:
def _udf_cell(args, cell_body):
"""Implements the Bigquery udf cell magic for ipython notebooks.
The supported syntax is:
%%bq udf --name <var> --language <lang>
// @param <name> <type>
// @returns <type>
// @import <gcs_path>
<js function>
Args:
args: the optional arguments following '%%bq udf'.
cell_body: the UDF declaration (inputs and outputs) and implementation in javascript.
"""
udf_name = args['name']
if not udf_name:
raise Exception('Declaration must be of the form %%bq udf --name <variable name>')
# Parse out parameters, return type, and imports
param_pattern = r'^\s*\/\/\s*@param\s+([<>\w]+)\s+([<>\w,\s]+)\s*$'
returns_pattern = r'^\s*\/\/\s*@returns\s+([<>\w,\s]+)\s*$'
import_pattern = r'^\s*\/\/\s*@import\s+(\S+)\s*$'
params = re.findall(param_pattern, cell_body, re.MULTILINE)
return_type = re.findall(returns_pattern, cell_body, re.MULTILINE)
imports = re.findall(import_pattern, cell_body, re.MULTILINE)
if len(return_type) < 1:
raise Exception('UDF return type must be defined using // @returns <type>')
if len(return_type) > 1:
raise Exception('Found more than one return type definition')
return_type = return_type[0]
# Finally build the UDF object
udf = bigquery.UDF(udf_name, cell_body, return_type, params, args['language'], imports)
google.datalab.utils.commands.notebook_environment()[udf_name] = udf |
def scanAllProcessesForMapping(searchPortion, isExactMatch=False, ignoreCase=False):
'''
scanAllProcessesForMapping - Scans all processes on the system for a given search pattern.
@param searchPortion <str> - A mapping for which to search, example: libc or python or libz.so.1. Give empty string to return all mappings.
@param isExactMatch <bool> Default False - If match should be exact, otherwise a partial match is performed.
@param ignoreCase <bool> Default False - If True, search will be performed case-insensitively
@return - <dict> - A dictionary of pid -> mappingResults for each pid that matched the search pattern. For format of "mappingResults", @see scanProcessForMapping
'''
pids = getAllRunningPids()
# Since processes could disappear, we run the scan as fast as possible here with a list comprehension, then assemble the return dictionary later.
mappingResults = [scanProcessForMapping(pid, searchPortion, isExactMatch, ignoreCase) for pid in pids]
ret = {}
for i in range(len(pids)):
if mappingResults[i] is not None:
ret[pids[i]] = mappingResults[i]
return ret | scanAllProcessesForMapping - Scans all processes on the system for a given search pattern.
@param searchPortion <str> - A mapping for which to search, example: libc or python or libz.so.1. Give empty string to return all mappings.
@param isExactMatch <bool> Default False - If match should be exact, otherwise a partial match is performed.
@param ignoreCase <bool> Default False - If True, search will be performed case-insensitively
@return - <dict> - A dictionary of pid -> mappingResults for each pid that matched the search pattern. For format of "mappingResults", @see scanProcessForMapping | Below is the the instruction that describes the task:
### Input:
scanAllProcessesForMapping - Scans all processes on the system for a given search pattern.
@param searchPortion <str> - A mapping for which to search, example: libc or python or libz.so.1. Give empty string to return all mappings.
@param isExactMatch <bool> Default False - If match should be exact, otherwise a partial match is performed.
@param ignoreCase <bool> Default False - If True, search will be performed case-insensitively
@return - <dict> - A dictionary of pid -> mappingResults for each pid that matched the search pattern. For format of "mappingResults", @see scanProcessForMapping
### Response:
def scanAllProcessesForMapping(searchPortion, isExactMatch=False, ignoreCase=False):
'''
scanAllProcessesForMapping - Scans all processes on the system for a given search pattern.
@param searchPortion <str> - A mapping for which to search, example: libc or python or libz.so.1. Give empty string to return all mappings.
@param isExactMatch <bool> Default False - If match should be exact, otherwise a partial match is performed.
@param ignoreCase <bool> Default False - If True, search will be performed case-insensitively
@return - <dict> - A dictionary of pid -> mappingResults for each pid that matched the search pattern. For format of "mappingResults", @see scanProcessForMapping
'''
pids = getAllRunningPids()
# Since processes could disappear, we run the scan as fast as possible here with a list comprehension, then assemble the return dictionary later.
mappingResults = [scanProcessForMapping(pid, searchPortion, isExactMatch, ignoreCase) for pid in pids]
ret = {}
for i in range(len(pids)):
if mappingResults[i] is not None:
ret[pids[i]] = mappingResults[i]
return ret |
def type_check_cmd(self, args, range=None):
"""Sets the flag to begin buffering typecheck notes & clears any
stale notes before requesting a typecheck from the server"""
self.log.debug('type_check_cmd: in')
self.start_typechecking()
self.type_check("")
self.editor.message('typechecking') | Sets the flag to begin buffering typecheck notes & clears any
stale notes before requesting a typecheck from the server | Below is the the instruction that describes the task:
### Input:
Sets the flag to begin buffering typecheck notes & clears any
stale notes before requesting a typecheck from the server
### Response:
def type_check_cmd(self, args, range=None):
"""Sets the flag to begin buffering typecheck notes & clears any
stale notes before requesting a typecheck from the server"""
self.log.debug('type_check_cmd: in')
self.start_typechecking()
self.type_check("")
self.editor.message('typechecking') |
def _get_constraints(self):
"""Get neighboring edges for each edge in the edges."""
num_edges = len(self.edges)
for k in range(num_edges):
for i in range(num_edges):
# add to constraints if i shared an edge with k
if k != i and self.edges[k].is_adjacent(self.edges[i]):
self.edges[k].neighbors.append(i) | Get neighboring edges for each edge in the edges. | Below is the the instruction that describes the task:
### Input:
Get neighboring edges for each edge in the edges.
### Response:
def _get_constraints(self):
"""Get neighboring edges for each edge in the edges."""
num_edges = len(self.edges)
for k in range(num_edges):
for i in range(num_edges):
# add to constraints if i shared an edge with k
if k != i and self.edges[k].is_adjacent(self.edges[i]):
self.edges[k].neighbors.append(i) |
def _publish_deferred_messages(self):
"""Called when pika is connected and has a channel open to publish
any requests buffered.
"""
global message_stack
if not self._rabbitmq_is_closed and message_stack:
LOGGER.info('Publishing %i deferred message(s)', len(message_stack))
while message_stack:
self._publish_message(*message_stack.pop()) | Called when pika is connected and has a channel open to publish
any requests buffered. | Below is the the instruction that describes the task:
### Input:
Called when pika is connected and has a channel open to publish
any requests buffered.
### Response:
def _publish_deferred_messages(self):
"""Called when pika is connected and has a channel open to publish
any requests buffered.
"""
global message_stack
if not self._rabbitmq_is_closed and message_stack:
LOGGER.info('Publishing %i deferred message(s)', len(message_stack))
while message_stack:
self._publish_message(*message_stack.pop()) |
def encode(self):
'''Compress the associated encodable payload,
prepend the header then encode with base64 if requested
Returns:
the b64 encoded wire encoding of the histogram (as a string)
or the compressed payload (as a string, if b64 wrappinb is disabled)
'''
# only compress the first non zero buckets
# if histogram is empty we do not encode any counter
if self.histogram.total_count:
relevant_length = \
self.histogram.get_counts_array_index(self.histogram.max_value) + 1
else:
relevant_length = 0
cpayload = self.payload.compress(relevant_length)
if self.b64_wrap:
self.header.length = len(cpayload)
header_str = ctypes.string_at(addressof(self.header), ext_header_size)
return base64.b64encode(header_str + cpayload)
return cpayload | Compress the associated encodable payload,
prepend the header then encode with base64 if requested
Returns:
the b64 encoded wire encoding of the histogram (as a string)
or the compressed payload (as a string, if b64 wrappinb is disabled) | Below is the the instruction that describes the task:
### Input:
Compress the associated encodable payload,
prepend the header then encode with base64 if requested
Returns:
the b64 encoded wire encoding of the histogram (as a string)
or the compressed payload (as a string, if b64 wrappinb is disabled)
### Response:
def encode(self):
'''Compress the associated encodable payload,
prepend the header then encode with base64 if requested
Returns:
the b64 encoded wire encoding of the histogram (as a string)
or the compressed payload (as a string, if b64 wrappinb is disabled)
'''
# only compress the first non zero buckets
# if histogram is empty we do not encode any counter
if self.histogram.total_count:
relevant_length = \
self.histogram.get_counts_array_index(self.histogram.max_value) + 1
else:
relevant_length = 0
cpayload = self.payload.compress(relevant_length)
if self.b64_wrap:
self.header.length = len(cpayload)
header_str = ctypes.string_at(addressof(self.header), ext_header_size)
return base64.b64encode(header_str + cpayload)
return cpayload |
def declare_queue(self, queue_name):
"""Declare a queue. Has no effect if a queue with the given
name already exists.
Parameters:
queue_name(str): The name of the new queue.
Raises:
ConnectionClosed: If the underlying channel or connection
has been closed.
"""
attempts = 1
while True:
try:
if queue_name not in self.queues:
self.emit_before("declare_queue", queue_name)
self._declare_queue(queue_name)
self.queues.add(queue_name)
self.emit_after("declare_queue", queue_name)
delayed_name = dq_name(queue_name)
self._declare_dq_queue(queue_name)
self.delay_queues.add(delayed_name)
self.emit_after("declare_delay_queue", delayed_name)
self._declare_xq_queue(queue_name)
break
except (pika.exceptions.AMQPConnectionError,
pika.exceptions.AMQPChannelError) as e: # pragma: no cover
# Delete the channel and the connection so that the next
# caller may initiate new ones of each.
del self.channel
del self.connection
attempts += 1
if attempts > MAX_DECLARE_ATTEMPTS:
raise ConnectionClosed(e) from None
self.logger.debug(
"Retrying declare due to closed connection. [%d/%d]",
attempts, MAX_DECLARE_ATTEMPTS,
) | Declare a queue. Has no effect if a queue with the given
name already exists.
Parameters:
queue_name(str): The name of the new queue.
Raises:
ConnectionClosed: If the underlying channel or connection
has been closed. | Below is the the instruction that describes the task:
### Input:
Declare a queue. Has no effect if a queue with the given
name already exists.
Parameters:
queue_name(str): The name of the new queue.
Raises:
ConnectionClosed: If the underlying channel or connection
has been closed.
### Response:
def declare_queue(self, queue_name):
"""Declare a queue. Has no effect if a queue with the given
name already exists.
Parameters:
queue_name(str): The name of the new queue.
Raises:
ConnectionClosed: If the underlying channel or connection
has been closed.
"""
attempts = 1
while True:
try:
if queue_name not in self.queues:
self.emit_before("declare_queue", queue_name)
self._declare_queue(queue_name)
self.queues.add(queue_name)
self.emit_after("declare_queue", queue_name)
delayed_name = dq_name(queue_name)
self._declare_dq_queue(queue_name)
self.delay_queues.add(delayed_name)
self.emit_after("declare_delay_queue", delayed_name)
self._declare_xq_queue(queue_name)
break
except (pika.exceptions.AMQPConnectionError,
pika.exceptions.AMQPChannelError) as e: # pragma: no cover
# Delete the channel and the connection so that the next
# caller may initiate new ones of each.
del self.channel
del self.connection
attempts += 1
if attempts > MAX_DECLARE_ATTEMPTS:
raise ConnectionClosed(e) from None
self.logger.debug(
"Retrying declare due to closed connection. [%d/%d]",
attempts, MAX_DECLARE_ATTEMPTS,
) |
def GetAll(cls, attr, value, e=0.000001, sort_by="__name__"):
"""Get all nested Constant class that met ``klass.attr == value``.
:param attr: attribute name.
:param value: value.
:param e: used for float value comparison.
:param sort_by: nested class is ordered by <sort_by> attribute.
.. versionadded:: 0.0.5
"""
matched = list()
for _, klass in cls.Subclasses(sort_by=sort_by):
try:
if klass.__dict__[attr] == approx(value, e):
matched.append(klass)
except: # pragma: no cover
pass
return matched | Get all nested Constant class that met ``klass.attr == value``.
:param attr: attribute name.
:param value: value.
:param e: used for float value comparison.
:param sort_by: nested class is ordered by <sort_by> attribute.
.. versionadded:: 0.0.5 | Below is the the instruction that describes the task:
### Input:
Get all nested Constant class that met ``klass.attr == value``.
:param attr: attribute name.
:param value: value.
:param e: used for float value comparison.
:param sort_by: nested class is ordered by <sort_by> attribute.
.. versionadded:: 0.0.5
### Response:
def GetAll(cls, attr, value, e=0.000001, sort_by="__name__"):
"""Get all nested Constant class that met ``klass.attr == value``.
:param attr: attribute name.
:param value: value.
:param e: used for float value comparison.
:param sort_by: nested class is ordered by <sort_by> attribute.
.. versionadded:: 0.0.5
"""
matched = list()
for _, klass in cls.Subclasses(sort_by=sort_by):
try:
if klass.__dict__[attr] == approx(value, e):
matched.append(klass)
except: # pragma: no cover
pass
return matched |
def merge(arg, *rest, **kwargs):
"""Merge a collection, with functions as items, into a single function
that takes a collection and maps its items through corresponding functions.
:param arg: A collection of functions, such as list, tuple, or dictionary
:param default: Optional default function to use for items
within merged function's arguments that do not have
corresponding functions in ``arg``
Example with two-element tuple::
>> dict_ = {'Alice': -5, 'Bob': 4}
>> func = merge((str.upper, abs))
>> dict(map(func, dict_.items()))
{'ALICE': 5, 'BOB': 4}
Example with a dictionary::
>> func = merge({'id': int, 'name': str.split})
>> data = [
{'id': '1', 'name': "John Doe"},
{'id': '2', 'name': "Anne Arbor"},
]
>> list(map(func, data))
[{'id': 1, 'name': ['John', 'Doe']},
{'id': 2, 'name': ['Anne', 'Arbor']}]
:return: Merged function
.. versionadded:: 0.0.2
"""
ensure_keyword_args(kwargs, optional=('default',))
has_default = 'default' in kwargs
if has_default:
default = ensure_callable(kwargs['default'])
# if more than one argument was given, they must all be functions;
# result will be a function that takes multiple arguments (rather than
# a single collection) and returns a tuple
unary_result = True
if rest:
fs = (ensure_callable(arg),) + tuple(imap(ensure_callable, rest))
unary_result = False
else:
fs = arg
if is_mapping(fs):
if has_default:
return lambda arg_: fs.__class__((k, fs.get(k, default)(arg_[k]))
for k in arg_)
else:
return lambda arg_: fs.__class__((k, fs[k](arg_[k]))
for k in arg_)
else:
ensure_sequence(fs)
if has_default:
# we cannot use ``izip_longest(fs, arg_, fillvalue=default)``,
# because we want to terminate the generator
# only when ``arg_`` is exhausted (not when just ``fs`` is)
func = lambda arg_: fs.__class__(
(fs[i] if i < len(fs) else default)(x)
for i, x in enumerate(arg_))
else:
# we cannot use ``izip(fs, arg_)`` because it would short-circuit
# if ``arg_`` is longer than ``fs``, rather than raising
# the required ``IndexError``
func = lambda arg_: fs.__class__(fs[i](x)
for i, x in enumerate(arg_))
return func if unary_result else lambda *args: func(args) | Merge a collection, with functions as items, into a single function
that takes a collection and maps its items through corresponding functions.
:param arg: A collection of functions, such as list, tuple, or dictionary
:param default: Optional default function to use for items
within merged function's arguments that do not have
corresponding functions in ``arg``
Example with two-element tuple::
>> dict_ = {'Alice': -5, 'Bob': 4}
>> func = merge((str.upper, abs))
>> dict(map(func, dict_.items()))
{'ALICE': 5, 'BOB': 4}
Example with a dictionary::
>> func = merge({'id': int, 'name': str.split})
>> data = [
{'id': '1', 'name': "John Doe"},
{'id': '2', 'name': "Anne Arbor"},
]
>> list(map(func, data))
[{'id': 1, 'name': ['John', 'Doe']},
{'id': 2, 'name': ['Anne', 'Arbor']}]
:return: Merged function
.. versionadded:: 0.0.2 | Below is the the instruction that describes the task:
### Input:
Merge a collection, with functions as items, into a single function
that takes a collection and maps its items through corresponding functions.
:param arg: A collection of functions, such as list, tuple, or dictionary
:param default: Optional default function to use for items
within merged function's arguments that do not have
corresponding functions in ``arg``
Example with two-element tuple::
>> dict_ = {'Alice': -5, 'Bob': 4}
>> func = merge((str.upper, abs))
>> dict(map(func, dict_.items()))
{'ALICE': 5, 'BOB': 4}
Example with a dictionary::
>> func = merge({'id': int, 'name': str.split})
>> data = [
{'id': '1', 'name': "John Doe"},
{'id': '2', 'name': "Anne Arbor"},
]
>> list(map(func, data))
[{'id': 1, 'name': ['John', 'Doe']},
{'id': 2, 'name': ['Anne', 'Arbor']}]
:return: Merged function
.. versionadded:: 0.0.2
### Response:
def merge(arg, *rest, **kwargs):
"""Merge a collection, with functions as items, into a single function
that takes a collection and maps its items through corresponding functions.
:param arg: A collection of functions, such as list, tuple, or dictionary
:param default: Optional default function to use for items
within merged function's arguments that do not have
corresponding functions in ``arg``
Example with two-element tuple::
>> dict_ = {'Alice': -5, 'Bob': 4}
>> func = merge((str.upper, abs))
>> dict(map(func, dict_.items()))
{'ALICE': 5, 'BOB': 4}
Example with a dictionary::
>> func = merge({'id': int, 'name': str.split})
>> data = [
{'id': '1', 'name': "John Doe"},
{'id': '2', 'name': "Anne Arbor"},
]
>> list(map(func, data))
[{'id': 1, 'name': ['John', 'Doe']},
{'id': 2, 'name': ['Anne', 'Arbor']}]
:return: Merged function
.. versionadded:: 0.0.2
"""
ensure_keyword_args(kwargs, optional=('default',))
has_default = 'default' in kwargs
if has_default:
default = ensure_callable(kwargs['default'])
# if more than one argument was given, they must all be functions;
# result will be a function that takes multiple arguments (rather than
# a single collection) and returns a tuple
unary_result = True
if rest:
fs = (ensure_callable(arg),) + tuple(imap(ensure_callable, rest))
unary_result = False
else:
fs = arg
if is_mapping(fs):
if has_default:
return lambda arg_: fs.__class__((k, fs.get(k, default)(arg_[k]))
for k in arg_)
else:
return lambda arg_: fs.__class__((k, fs[k](arg_[k]))
for k in arg_)
else:
ensure_sequence(fs)
if has_default:
# we cannot use ``izip_longest(fs, arg_, fillvalue=default)``,
# because we want to terminate the generator
# only when ``arg_`` is exhausted (not when just ``fs`` is)
func = lambda arg_: fs.__class__(
(fs[i] if i < len(fs) else default)(x)
for i, x in enumerate(arg_))
else:
# we cannot use ``izip(fs, arg_)`` because it would short-circuit
# if ``arg_`` is longer than ``fs``, rather than raising
# the required ``IndexError``
func = lambda arg_: fs.__class__(fs[i](x)
for i, x in enumerate(arg_))
return func if unary_result else lambda *args: func(args) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.