text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def create_conversation(
self, parameters, custom_headers=None, raw=False, **operation_config):
"""CreateConversation.
Create a new Conversation.
POST to this method with a
* Bot being the bot creating the conversation
* IsGroup set to true if this is not a direct message (default is
false)
* Array containing the members to include in the conversation
The return value is a ResourceResponse which contains a conversation id
which is suitable for use
in the message payload and REST API uris.
Most channels only support the semantics of bots initiating a direct
message conversation. An example of how to do that would be:
```
var resource = await connector.conversations.CreateConversation(new
ConversationParameters(){ Bot = bot, members = new ChannelAccount[] {
new ChannelAccount("user1") } );
await connect.Conversations.SendToConversationAsync(resource.Id, new
Activity() ... ) ;
```.
:param parameters: Parameters to create the conversation from
:type parameters:
~botframework.connector.models.ConversationParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ConversationResourceResponse or ClientRawResponse if raw=true
:rtype: ~botframework.connector.models.ConversationResourceResponse or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<botframework.connector.models.ErrorResponseException>`
"""
# Construct URL
url = self.create_conversation.metadata['url']
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(parameters, 'ConversationParameters')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 201, 202]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ConversationResourceResponse', response)
if response.status_code == 201:
deserialized = self._deserialize('ConversationResourceResponse', response)
if response.status_code == 202:
deserialized = self._deserialize('ConversationResourceResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized | 0.002446 |
def delay(fn, opts, task, *args, **kwargs):
""" delay(t=5, stddev=0., pdf="gauss")
Wraps a bound method of a task and delays its execution by *t* seconds.
"""
if opts["stddev"] <= 0:
t = opts["t"]
elif opts["pdf"] == "gauss":
t = random.gauss(opts["t"], opts["stddev"])
elif opts["pdf"] == "uniform":
t = random.uniform(opts["t"], opts["stddev"])
else:
raise ValueError("unknown delay decorator pdf '{}'".format(opts["pdf"]))
time.sleep(t)
return fn(task, *args, **kwargs) | 0.00369 |
def do_resource(self,args):
"""Go to the specified resource. resource -h for detailed help"""
parser = CommandArgumentParser("resource")
parser.add_argument('-i','--logical-id',dest='logical-id',help='logical id of the child resource');
args = vars(parser.parse_args(args))
stackName = self.wrappedStack['rawStack'].name
logicalId = args['logical-id']
self.stackResource(stackName,logicalId) | 0.020089 |
def set_public_domain(self, public_domain=None):
"""Sets the public domain flag.
:param public_domain: the public domain status
:type public_domain: ``boolean``
:raise: ``NoAccess`` -- ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
if public_domain is None:
raise NullArgument()
metadata = Metadata(**settings.METADATA['public_domain'])
if metadata.is_read_only():
raise NoAccess()
if self._is_valid_input(public_domain, metadata, array=False):
self._my_map['publicDomain'] = public_domain
else:
raise InvalidArgument() | 0.002805 |
def bell(self, percent = 0, onerror = None):
"""Ring the bell at the volume percent which is relative the base
volume. See XBell(3X11)."""
request.Bell(display = self.display,
onerror = onerror,
percent = percent) | 0.043011 |
def check_required(obj, required_parameters):
"""
Check if a parameter is available on an object
:param obj: Object
:param required_parameters: list of parameters
:return:
"""
for parameter in required_parameters:
if not hasattr(obj, parameter) or getattr(obj, parameter) is None:
raise DesignError("parameter '%s' must be set for '%s' object." % (parameter, obj.base_type)) | 0.004728 |
def onchange_partner_id(self):
'''
When you change partner_id it will update the partner_invoice_id,
partner_shipping_id and pricelist_id of the hotel reservation as well
---------------------------------------------------------------------
@param self: object pointer
'''
if not self.partner_id:
self.partner_invoice_id = False
self.partner_shipping_id = False
self.partner_order_id = False
else:
addr = self.partner_id.address_get(['delivery', 'invoice',
'contact'])
self.partner_invoice_id = addr['invoice']
self.partner_order_id = addr['contact']
self.partner_shipping_id = addr['delivery']
self.pricelist_id = self.partner_id.property_product_pricelist.id | 0.002304 |
def transform(self, X):
"""Embeds data points in the learned linear embedding space.
Transforms samples in ``X`` into ``X_embedded``, samples inside a new
embedding space such that: ``X_embedded = X.dot(L.T)``, where ``L`` is
the learned linear transformation (See :class:`MahalanobisMixin`).
Parameters
----------
X : `numpy.ndarray`, shape=(n_samples, n_features)
The data points to embed.
Returns
-------
X_embedded : `numpy.ndarray`, shape=(n_samples, num_dims)
The embedded data points.
"""
X_checked = check_input(X, type_of_inputs='classic', estimator=self,
preprocessor=self.preprocessor_,
accept_sparse=True)
return X_checked.dot(self.transformer_.T) | 0.003827 |
def prefetch(self, file_size=None):
"""
Pre-fetch the remaining contents of this file in anticipation of future
`.read` calls. If reading the entire file, pre-fetching can
dramatically improve the download speed by avoiding roundtrip latency.
The file's contents are incrementally buffered in a background thread.
The prefetched data is stored in a buffer until read via the `.read`
method. Once data has been read, it's removed from the buffer. The
data may be read in a random order (using `.seek`); chunks of the
buffer that haven't been read will continue to be buffered.
:param int file_size:
When this is ``None`` (the default), this method calls `stat` to
determine the remote file size. In some situations, doing so can
cause exceptions or hangs (see `#562
<https://github.com/paramiko/paramiko/pull/562>`_); as a
workaround, one may call `stat` explicitly and pass its value in
via this parameter.
.. versionadded:: 1.5.1
.. versionchanged:: 1.16.0
The ``file_size`` parameter was added (with no default value).
.. versionchanged:: 1.16.1
The ``file_size`` parameter was made optional for backwards
compatibility.
"""
if file_size is None:
file_size = self.stat().st_size
# queue up async reads for the rest of the file
chunks = []
n = self._realpos
while n < file_size:
chunk = min(self.MAX_REQUEST_SIZE, file_size - n)
chunks.append((n, chunk))
n += chunk
if len(chunks) > 0:
self._start_prefetch(chunks) | 0.001143 |
def club(self, sort='desc', ctype='player', defId='', start=0, count=None, page_size=itemsPerPage['club'],
level=None, category=None, assetId=None, league=None, club=None,
position=None, zone=None, nationality=None, rare=False, playStyle=None):
"""Return items in your club, excluding consumables.
:param ctype: [development / ? / ?] Card type.
:param level: (optional) [?/?/gold] Card level.
:param category: (optional) [fitness/?/?] Card category.
:param assetId: (optional) Asset id.
:param defId: (optional) Definition id.
:param min_price: (optional) Minimal price.
:param max_price: (optional) Maximum price.
:param min_buy: (optional) Minimal buy now price.
:param max_buy: (optional) Maximum buy now price.
:param league: (optional) League id.
:param club: (optional) Club id.
:param position: (optional) Position.
:param nationality: (optional) Nation id.
:param rare: (optional) [boolean] True for searching special cards.
:param playStyle: (optional) Play style.
:param start: (optional) Start page sent to server so it supposed to be 12/15, 24/30 etc. (default platform page_size*n)
:param page_size: (optional) Page size (items per page)
"""
method = 'GET'
url = 'club'
if count: # backward compatibility, will be removed in future
page_size = count
params = {'sort': sort, 'type': ctype, 'defId': defId, 'start': start, 'count': page_size}
if level:
params['level'] = level
if category:
params['cat'] = category
if assetId:
params['maskedDefId'] = assetId
if league:
params['leag'] = league
if club:
params['team'] = club
if position:
params['pos'] = position
if zone:
params['zone'] = zone
if nationality:
params['nat'] = nationality
if rare:
params['rare'] = 'SP'
if playStyle:
params['playStyle'] = playStyle
rc = self.__request__(method, url, params=params)
# pinEvent
if start == 0:
if ctype == 'player':
pgid = 'Club - Players - List View'
elif ctype == 'staff':
pgid = 'Club - Staff - List View'
elif ctype in ('item', 'kit', 'ball', 'badge', 'stadium'):
pgid = 'Club - Club Items - List View'
# else: # TODO: THIS IS probably WRONG, detect all ctypes
# pgid = 'Club - Club Items - List View'
events = [self.pin.event('page_view', 'Hub - Club'), self.pin.event('page_view', pgid)]
if rc['itemData']:
events.append(self.pin.event('page_view', 'Item - Detail View'))
self.pin.send(events)
return [itemParse({'itemData': i}) for i in rc['itemData']] | 0.003342 |
def get_zooms(src_dst, ensure_global_max_zoom=False, tilesize=256):
"""
Calculate raster min/max mercator zoom level.
Parameters
----------
src_dst: rasterio.io.DatasetReader
Rasterio io.DatasetReader object
ensure_global_max_zoom: bool, optional
Apply latitude correction factor to ensure max_zoom equality for global
datasets covering different latitudes (default: False).
tilesize: int, optional
Mercator tile size (default: 256).
Returns
-------
min_zoom, max_zoom: Tuple
Min/Max Mercator zoom levels.
"""
bounds = transform_bounds(
*[src_dst.crs, "epsg:4326"] + list(src_dst.bounds), densify_pts=21
)
center = [(bounds[0] + bounds[2]) / 2, (bounds[1] + bounds[3]) / 2]
lat = center[1] if ensure_global_max_zoom else 0
dst_affine, w, h = calculate_default_transform(
src_dst.crs, "epsg:3857", src_dst.width, src_dst.height, *src_dst.bounds
)
mercator_resolution = max(abs(dst_affine[0]), abs(dst_affine[4]))
# Correction factor for web-mercator projection latitude scale change
latitude_correction_factor = math.cos(math.radians(lat))
adjusted_resolution = mercator_resolution * latitude_correction_factor
max_zoom = zoom_for_pixelsize(adjusted_resolution, tilesize=tilesize)
ovr_resolution = adjusted_resolution * max(h, w) / tilesize
min_zoom = zoom_for_pixelsize(ovr_resolution, tilesize=tilesize)
return (min_zoom, max_zoom) | 0.001336 |
def update_identity_pool(IdentityPoolId,
IdentityPoolName=None,
AllowUnauthenticatedIdentities=False,
SupportedLoginProviders=None,
DeveloperProviderName=None,
OpenIdConnectProviderARNs=None,
region=None, key=None, keyid=None, profile=None):
'''
Updates the given IdentityPoolId's properties. All parameters except for IdentityPoolId,
is optional. SupportedLoginProviders should be a dictionary mapping provider names to
provider app IDs. OpenIdConnectProviderARNs should be a list of OpenID Connect provider
ARNs.
To clear SupportedLoginProviders pass '{}'
To clear OpenIdConnectProviderARNs pass '[]'
boto3 api prevents DeveloperProviderName to be updated after it has been set for the first time.
Returns the updated identity pool if successful
CLI Example:
.. code-block:: bash
salt myminion boto_cognitoidentity.update_identity_pool my_id_pool_id my_id_pool_name \
DeveloperProviderName=custom_developer_provider
'''
conn_params = dict(region=region, key=key, keyid=keyid, profile=profile)
response = describe_identity_pools('', IdentityPoolId=IdentityPoolId, **conn_params)
error = response.get('error')
if error is None:
error = 'No matching pool' if response.get('identity_pools') is None else None
if error:
return {'updated': False, 'error': error}
id_pool = response.get('identity_pools')[0]
request_params = id_pool.copy()
# IdentityPoolName and AllowUnauthenticatedIdentities are required for the call to update_identity_pool
if IdentityPoolName is not None and IdentityPoolName != request_params.get('IdentityPoolName'):
request_params['IdentityPoolName'] = IdentityPoolName
if AllowUnauthenticatedIdentities != request_params.get('AllowUnauthenticatedIdentities'):
request_params['AllowUnauthenticatedIdentities'] = AllowUnauthenticatedIdentities
current_val = request_params.pop('SupportedLoginProviders', None)
if SupportedLoginProviders is not None and SupportedLoginProviders != current_val:
request_params['SupportedLoginProviders'] = SupportedLoginProviders
# we can only set DeveloperProviderName one time per AWS.
current_val = request_params.pop('DeveloperProviderName', None)
if current_val is None and DeveloperProviderName is not None:
request_params['DeveloperProviderName'] = DeveloperProviderName
current_val = request_params.pop('OpenIdConnectProviderARNs', None)
if OpenIdConnectProviderARNs is not None and OpenIdConnectProviderARNs != current_val:
request_params['OpenIdConnectProviderARNs'] = OpenIdConnectProviderARNs
conn = _get_conn(**conn_params)
try:
response = conn.update_identity_pool(**request_params)
response.pop('ResponseMetadata', None)
return {'updated': True, 'identity_pool': response}
except ClientError as e:
return {'updated': False, 'error': __utils__['boto3.get_error'](e)} | 0.004442 |
def set_taskfileinfo(self, tfi):
"""Set the :class:`jukeboxcore.filesys.TaskFileInfo` that the refobject represents.
:param tfi: the taskfileinfo for the refobject or None if nothing is loaded.
:type tfi: :class:`jukeboxcore.filesys.TaskFileInfo` | None
:returns: None
:rtype: None
:raises: None
"""
self._taskfileinfo = tfi
if tfi:
self.set_element(tfi.task.element) | 0.00885 |
def uninstall(cls):
"""Remove the package manager from the system."""
if os.path.exists(cls.home):
shutil.rmtree(cls.home) | 0.013333 |
def encode_simple(d):
"""Encode strings in basic python objects."""
if isinstance(d, unicode):
return d.encode()
if isinstance(d, list):
return list(map(encode_simple, d))
if isinstance(d, dict):
return dict([(encode_simple(k), encode_simple(v)) for k, v in d.items()])
return d | 0.006211 |
def convert_time_string(date_str):
""" Change a date string from the format 2018-08-15T23:55:17 into a datetime object """
dt, _, _ = date_str.partition(".")
dt = datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S")
return dt | 0.008658 |
def clean_undefined(obj):
"""
Convert Undefined array entries to None (null)
"""
if isinstance(obj, list):
return [
None if isinstance(item, Undefined) else item
for item in obj
]
if isinstance(obj, dict):
for key in obj:
obj[key] = clean_undefined(obj[key])
return obj | 0.002833 |
def _get_parent_remote_paths(self):
"""
Get list of remote folders based on the list of all file urls
:return: set([str]): set of remote folders (that contain files)
"""
parent_paths = set([item.get_remote_parent_path() for item in self.file_urls])
if '' in parent_paths:
parent_paths.remove('')
return parent_paths | 0.007833 |
def add_key_value(self, key, value):
"""Add custom field to Indicator object.
.. note:: The key must be the exact name required by the batch schema.
Example::
file_hash = tcex.batch.file('File', '1d878cdc391461e392678ba3fc9f6f32')
file_hash.add_key_value('size', '1024')
Args:
key (str): The field key to add to the JSON batch data.
value (str): The field value to add to the JSON batch data.
"""
key = self._metadata_map.get(key, key)
if key in ['dateAdded', 'lastModified']:
self._indicator_data[key] = self._utils.format_datetime(
value, date_format='%Y-%m-%dT%H:%M:%SZ'
)
elif key == 'confidence':
self._indicator_data[key] = int(value)
elif key == 'rating':
self._indicator_data[key] = float(value)
else:
self._indicator_data[key] = value | 0.003155 |
def rebuild(self, image, root_pass=None, authorized_keys=None, **kwargs):
"""
Rebuilding an Instance deletes all existing Disks and Configs and deploys
a new :any:`Image` to it. This can be used to reset an existing
Instance or to install an Image on an empty Instance.
:param image: The Image to deploy to this Instance
:type image: str or Image
:param root_pass: The root password for the newly rebuilt Instance. If
omitted, a password will be generated and returned.
:type root_pass: str
:param authorized_keys: The ssh public keys to install in the linode's
/root/.ssh/authorized_keys file. Each entry may
be a single key, or a path to a file containing
the key.
:type authorized_keys: list or str
:returns: The newly generated password, if one was not provided
(otherwise True)
:rtype: str or bool
"""
ret_pass = None
if not root_pass:
ret_pass = Instance.generate_root_password()
root_pass = ret_pass
authorized_keys = load_and_validate_keys(authorized_keys)
params = {
'image': image.id if issubclass(type(image), Base) else image,
'root_pass': root_pass,
'authorized_keys': authorized_keys,
}
params.update(kwargs)
result = self._client.post('{}/rebuild'.format(Instance.api_endpoint), model=self, data=params)
if not 'id' in result:
raise UnexpectedResponseError('Unexpected response issuing rebuild!', json=result)
# update ourself with the newly-returned information
self._populate(result)
if not ret_pass:
return True
else:
return ret_pass | 0.00368 |
async def discover_master(self, service, timeout):
"""Perform Master discovery for specified service."""
# TODO: get lock
idle_timeout = timeout
# FIXME: single timeout used 4 times;
# meaning discovery can take up to:
# 3 * timeout * (sentinels count)
#
# having one global timeout also can leed to
# a problem when not all sentinels are checked.
# use a copy, cause pools can change
pools = self._pools[:]
for sentinel in pools:
try:
with async_timeout(timeout, loop=self._loop):
address = await self._get_masters_address(
sentinel, service)
pool = self._masters[service]
with async_timeout(timeout, loop=self._loop), \
contextlib.ExitStack() as stack:
conn = await pool._create_new_connection(address)
stack.callback(conn.close)
await self._verify_service_role(conn, 'master')
stack.pop_all()
return conn
except asyncio.CancelledError:
# we must correctly handle CancelledError(s):
# application may be stopped or function can be cancelled
# by outer timeout, so we must stop the look up.
raise
except asyncio.TimeoutError:
continue
except DiscoverError as err:
sentinel_logger.debug("DiscoverError(%r, %s): %r",
sentinel, service, err)
await asyncio.sleep(idle_timeout, loop=self._loop)
continue
except RedisError as err:
raise MasterReplyError("Service {} error".format(service), err)
except Exception:
# TODO: clear (drop) connections to schedule reconnect
await asyncio.sleep(idle_timeout, loop=self._loop)
continue
else:
raise MasterNotFoundError("No master found for {}".format(service)) | 0.000934 |
def make_energies_hdu(self, extname="ENERGIES"):
""" Builds and returns a FITs HDU with the energy bin boundries
extname : The HDU extension name
"""
if self._evals is None:
return None
cols = [fits.Column("ENERGY", "1E", unit='MeV',
array=self._evals)]
hdu = fits.BinTableHDU.from_columns(
cols, self.make_header(), name=extname)
return hdu | 0.006466 |
def _parse_ppm_segment(self, fptr):
"""Parse the PPM segment.
Parameters
----------
fptr : file
Open file object.
Returns
-------
PPMSegment
The current PPM segment.
"""
offset = fptr.tell() - 2
read_buffer = fptr.read(3)
length, zppm = struct.unpack('>HB', read_buffer)
numbytes = length - 3
read_buffer = fptr.read(numbytes)
return PPMsegment(zppm, read_buffer, length, offset) | 0.003846 |
def min_filter(data, size=7, res_g=None, sub_blocks=(1, 1, 1)):
"""
minimum filter of given size
Parameters
----------
data: 2 or 3 dimensional ndarray or OCLArray of type float32
input data
size: scalar, tuple
the size of the patch to consider
res_g: OCLArray
store result in buffer if given
sub_blocks:
perform over subblock tiling (only if data is ndarray)
Returns
-------
filtered image or None (if OCLArray)
"""
if data.ndim == 2:
_filt = make_filter(_generic_filter_gpu_2d(FUNC="(val<res?val:res)", DEFAULT="INFINITY"))
elif data.ndim == 3:
_filt = make_filter(_generic_filter_gpu_3d(FUNC="(val<res?val:res)", DEFAULT="INFINITY"))
else:
raise ValueError("currently only 2 or 3 dimensional data is supported")
return _filt(data=data, size=size, res_g=res_g, sub_blocks=sub_blocks) | 0.003279 |
def current(self, value):
"""set current cursor position"""
current = min(max(self._min, value), self._max)
self._current = current
if current > self._stop :
self._stop = current
self._start = current-self._width
elif current < self._start :
self._start = current
self._stop = current + self._width
if abs(self._start - self._min) <= self._sticky_lenght :
self._start = self._min
if abs(self._stop - self._max) <= self._sticky_lenght :
self._stop = self._max | 0.015 |
def show(self, start=0, geometry=None, output=None):
"""pretty-print data to the console
(similar to q.show, but uses python stdout by default)
>>> x = q('([k:`x`y`z]a:1 2 3;b:10 20 30)')
>>> x.show() # doctest: +NORMALIZE_WHITESPACE
k| a b
-| ----
x| 1 10
y| 2 20
z| 3 30
the first optional argument, 'start' specifies the first row to be
printed (negative means from the end)
>>> x.show(2) # doctest: +NORMALIZE_WHITESPACE
k| a b
-| ----
z| 3 30
>>> x.show(-2) # doctest: +NORMALIZE_WHITESPACE
k| a b
-| ----
y| 2 20
z| 3 30
the geometry is the height and width of the console
>>> x.show(geometry=[4, 6])
k| a..
-| -..
x| 1..
..
"""
if output is None:
output = sys.stdout
if geometry is None:
geometry = q.value(kp("\\c"))
else:
geometry = self._I(geometry)
if start < 0:
start += q.count(self)
# Make sure nil is not passed to a q function
if self._id() != nil._id():
r = self._show(geometry, start)
else:
r = '::\n'
if isinstance(output, type):
return output(r)
try:
output.write(r)
except TypeError:
output.write(str(r)) | 0.001385 |
def GetExpirationTime(self):
"""Computes the timestamp at which this breakpoint will expire."""
# TODO(emrekultursay): Move this to a common method.
if '.' not in self.definition['createTime']:
fmt = '%Y-%m-%dT%H:%M:%S%Z'
else:
fmt = '%Y-%m-%dT%H:%M:%S.%f%Z'
create_datetime = datetime.strptime(
self.definition['createTime'].replace('Z', 'UTC'), fmt)
return create_datetime + self.expiration_period | 0.006757 |
def init_comm(self):
"""
Initializes comm and attaches streams.
"""
if self.comm:
return self.comm
comm = None
if self.dynamic or self.renderer.widget_mode == 'live':
comm = self.renderer.comm_manager.get_server_comm()
return comm | 0.006452 |
async def add(gc: GroupControl, slaves):
"""Add speakers to group."""
click.echo("Adding to existing group: %s" % slaves)
click.echo(await gc.add(slaves)) | 0.006024 |
def check_datasource_perms(self, datasource_type=None, datasource_id=None):
"""
Check if user can access a cached response from explore_json.
This function takes `self` since it must have the same signature as the
the decorated method.
"""
form_data = get_form_data()[0]
datasource_id, datasource_type = get_datasource_info(
datasource_id, datasource_type, form_data)
viz_obj = get_viz(
datasource_type=datasource_type,
datasource_id=datasource_id,
form_data=form_data,
force=False,
)
security_manager.assert_datasource_permission(viz_obj.datasource) | 0.001582 |
def calc_DUP_parameter(self, modeln, label, fig=10, color='r', marker_type='*',
h_core_mass=False):
"""
Method to calculate the DUP parameter evolution for different
TPs specified specified by their model number.
Parameters
----------
fig : integer
Figure number to plot.
modeln : list
Array containing pairs of models each corresponding to a
TP. First model where h boundary mass will be taken before
DUP, second model where DUP reaches lowest mass.
leg : string
Plot label.
color : string
Color of the plot.
marker_type : string
marker type.
h_core_mass : boolean, optional
If True: plot dependence from h free core , else star mass.
The default is False.
"""
number_DUP=(old_div(len(modeln),2) -1) #START WITH SECOND
try:
h1_bnd_m=self.get('h1_boundary_mass')
except:
try:
h1_bnd_m=self.get('he_core_mass')
except:
pass
star_mass=self.get('star_mass')
age=self.get("star_age")
firstTP=h1_bnd_m[modeln[0]]
first_m_dredge=h1_bnd_m[modeln[1]]
DUP_parameter=np.zeros(number_DUP)
DUP_xaxis=np.zeros(number_DUP)
j=0
for i in np.arange(2,len(modeln),2):
TP=h1_bnd_m[modeln[i]]
m_dredge=h1_bnd_m[modeln[i+1]]
if i ==2:
last_m_dredge=first_m_dredge
#print "testest"
#print modeln[i]
if h_core_mass==True:
DUP_xaxis[j]=h1_bnd_m[modeln[i]] #age[modeln[i]] - age[modeln[0]]
else:
DUP_xaxis[j]=star_mass[modeln[i]]
#DUP_xaxis[j]=modeln[i]
DUP_parameter[j]=old_div((TP-m_dredge),(TP-last_m_dredge))
last_m_dredge=m_dredge
j+=1
pl.figure(fig)
pl.rcParams.update({'font.size': 18})
pl.rc('xtick', labelsize=18)
pl.rc('ytick', labelsize=18)
pl.plot(DUP_xaxis,DUP_parameter,marker=marker_type,markersize=12,mfc=color,color='k',linestyle='-',label=label)
if h_core_mass==True:
pl.xlabel("$M_H$",fontsize=20)
else:
pl.xlabel("M/M$_{\odot}$",fontsize=24)
pl.ylabel("$\lambda_{DUP}$",fontsize=24)
pl.minorticks_on()
pl.legend() | 0.023172 |
def sample(self, n=None, frac=None, replace=False, weights=None,
random_state=None, axis=None):
"""
Return a random sample of items from an axis of object.
You can use `random_state` for reproducibility.
Parameters
----------
n : int, optional
Number of items from axis to return. Cannot be used with `frac`.
Default = 1 if `frac` = None.
frac : float, optional
Fraction of axis items to return. Cannot be used with `n`.
replace : bool, default False
Sample with or without replacement.
weights : str or ndarray-like, optional
Default 'None' results in equal probability weighting.
If passed a Series, will align with target object on index. Index
values in weights not found in sampled object will be ignored and
index values in sampled object not in weights will be assigned
weights of zero.
If called on a DataFrame, will accept the name of a column
when axis = 0.
Unless weights are a Series, weights must be same length as axis
being sampled.
If weights do not sum to 1, they will be normalized to sum to 1.
Missing values in the weights column will be treated as zero.
Infinite values not allowed.
random_state : int or numpy.random.RandomState, optional
Seed for the random number generator (if int), or numpy RandomState
object.
axis : int or string, optional
Axis to sample. Accepts axis number or name. Default is stat axis
for given data type (0 for Series and DataFrames, 1 for Panels).
Returns
-------
Series or DataFrame
A new object of same type as caller containing `n` items randomly
sampled from the caller object.
See Also
--------
numpy.random.choice: Generates a random sample from a given 1-D numpy
array.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [2, 4, 8, 0],
... 'num_wings': [2, 0, 0, 0],
... 'num_specimen_seen': [10, 2, 1, 8]},
... index=['falcon', 'dog', 'spider', 'fish'])
>>> df
num_legs num_wings num_specimen_seen
falcon 2 2 10
dog 4 0 2
spider 8 0 1
fish 0 0 8
Extract 3 random elements from the ``Series`` ``df['num_legs']``:
Note that we use `random_state` to ensure the reproducibility of
the examples.
>>> df['num_legs'].sample(n=3, random_state=1)
fish 0
spider 8
falcon 2
Name: num_legs, dtype: int64
A random 50% sample of the ``DataFrame`` with replacement:
>>> df.sample(frac=0.5, replace=True, random_state=1)
num_legs num_wings num_specimen_seen
dog 4 0 2
fish 0 0 8
Using a DataFrame column as weights. Rows with larger value in the
`num_specimen_seen` column are more likely to be sampled.
>>> df.sample(n=2, weights='num_specimen_seen', random_state=1)
num_legs num_wings num_specimen_seen
falcon 2 2 10
fish 0 0 8
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
axis_length = self.shape[axis]
# Process random_state argument
rs = com.random_state(random_state)
# Check weights for compliance
if weights is not None:
# If a series, align with frame
if isinstance(weights, pd.Series):
weights = weights.reindex(self.axes[axis])
# Strings acceptable if a dataframe and axis = 0
if isinstance(weights, str):
if isinstance(self, pd.DataFrame):
if axis == 0:
try:
weights = self[weights]
except KeyError:
raise KeyError("String passed to weights not a "
"valid column")
else:
raise ValueError("Strings can only be passed to "
"weights when sampling from rows on "
"a DataFrame")
else:
raise ValueError("Strings cannot be passed as weights "
"when sampling from a Series or Panel.")
weights = pd.Series(weights, dtype='float64')
if len(weights) != axis_length:
raise ValueError("Weights and axis to be sampled must be of "
"same length")
if (weights == np.inf).any() or (weights == -np.inf).any():
raise ValueError("weight vector may not include `inf` values")
if (weights < 0).any():
raise ValueError("weight vector many not include negative "
"values")
# If has nan, set to zero.
weights = weights.fillna(0)
# Renormalize if don't sum to 1
if weights.sum() != 1:
if weights.sum() != 0:
weights = weights / weights.sum()
else:
raise ValueError("Invalid weights: weights sum to zero")
weights = weights.values
# If no frac or n, default to n=1.
if n is None and frac is None:
n = 1
elif n is not None and frac is None and n % 1 != 0:
raise ValueError("Only integers accepted as `n` values")
elif n is None and frac is not None:
n = int(round(frac * axis_length))
elif n is not None and frac is not None:
raise ValueError('Please enter a value for `frac` OR `n`, not '
'both')
# Check for negative sizes
if n < 0:
raise ValueError("A negative number of rows requested. Please "
"provide positive value.")
locs = rs.choice(axis_length, size=n, replace=replace, p=weights)
return self.take(locs, axis=axis, is_copy=False) | 0.000448 |
def ssh_sa_ssh_server_shutdown(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ssh_sa = ET.SubElement(config, "ssh-sa", xmlns="urn:brocade.com:mgmt:brocade-sec-services")
ssh = ET.SubElement(ssh_sa, "ssh")
server = ET.SubElement(ssh, "server")
shutdown = ET.SubElement(server, "shutdown")
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.006508 |
def get_bin_indices(self, values):
"""Returns index tuple in histogram of bin which contains value"""
return tuple([self.get_axis_bin_index(values[ax_i], ax_i)
for ax_i in range(self.dimensions)]) | 0.008547 |
def get_value_generator(self_,name): # pylint: disable-msg=E0213
"""
Return the value or value-generating object of the named
attribute.
For most parameters, this is simply the parameter's value
(i.e. the same as getattr()), but Dynamic parameters have
their value-generating object returned.
"""
cls_or_slf = self_.self_or_cls
param_obj = cls_or_slf.param.objects('existing').get(name)
if not param_obj:
value = getattr(cls_or_slf,name)
# CompositeParameter detected by being a Parameter and having 'attribs'
elif hasattr(param_obj,'attribs'):
value = [cls_or_slf.param.get_value_generator(a) for a in param_obj.attribs]
# not a Dynamic Parameter
elif not hasattr(param_obj,'_value_is_dynamic'):
value = getattr(cls_or_slf,name)
# Dynamic Parameter...
else:
internal_name = "_%s_param_value"%name
if hasattr(cls_or_slf,internal_name):
# dealing with object and it's been set on this object
value = getattr(cls_or_slf,internal_name)
else:
# dealing with class or isn't set on the object
value = param_obj.default
return value | 0.00921 |
def to_fp(self, file_pointer, comments=None):
"""
The method can be used to save a WCNF formula into a file pointer.
The file pointer is expected as an argument. Additionally,
supplementary comment lines can be specified in the ``comments``
parameter.
:param fname: a file name where to store the formula.
:param comments: additional comments to put in the file.
:type fname: str
:type comments: list(str)
Example:
.. code-block:: python
>>> from pysat.formula import WCNF
>>> wcnf = WCNF()
...
>>> # the formula is filled with a bunch of clauses
>>> with open('some-file.wcnf', 'w') as fp:
... wcnf.to_fp(fp) # writing to the file pointer
"""
# saving formula's internal comments
for c in self.comments:
print(c, file=file_pointer)
# saving externally specified comments
if comments:
for c in comments:
print(c, file=file_pointer)
print('p wcnf', self.nv, len(self.hard) + len(self.soft), self.topw, file=file_pointer)
# soft clauses are dumped first because
# some tools (e.g. LBX) cannot count them properly
for i, cl in enumerate(self.soft):
print(self.wght[i], ' '.join(str(l) for l in cl), '0', file=file_pointer)
for cl in self.hard:
print(self.topw, ' '.join(str(l) for l in cl), '0', file=file_pointer) | 0.004391 |
def choose_parent_view(self, request):
"""
Instantiates a class-based view to provide a view that allows a parent
page to be chosen for a new object, where the assigned model extends
Wagtail's Page model, and there is more than one potential parent for
new instances. The view class used can be overridden by changing the
'choose_parent_view_class' attribute.
"""
kwargs = {'model_admin': self}
view_class = self.choose_parent_view_class
return view_class.as_view(**kwargs)(request) | 0.003559 |
def get_fields_with_environment_context(self, db_name, table_name, environment_context):
"""
Parameters:
- db_name
- table_name
- environment_context
"""
self.send_get_fields_with_environment_context(db_name, table_name, environment_context)
return self.recv_get_fields_with_environment_context() | 0.009063 |
async def selfplay(state, flagfile='selfplay'):
"""Run selfplay and write a training chunk to the fsdb golden_chunk_dir.
Args:
state: the RL loop State instance.
flagfile: the name of the flagfile to use for selfplay, either 'selfplay'
(the default) or 'boostrap'.
"""
output_dir = os.path.join(fsdb.selfplay_dir(), state.output_model_name)
holdout_dir = os.path.join(fsdb.holdout_dir(), state.output_model_name)
lines = await run(
'bazel-bin/cc/selfplay',
'--flagfile={}.flags'.format(os.path.join(FLAGS.flags_dir, flagfile)),
'--model={}'.format(state.best_model_path),
'--output_dir={}'.format(output_dir),
'--holdout_dir={}'.format(holdout_dir),
'--seed={}'.format(state.seed))
result = '\n'.join(lines[-6:])
logging.info(result)
stats = parse_win_stats_table(result, 1)[0]
num_games = stats.total_wins
logging.info('Black won %0.3f, white won %0.3f',
stats.black_wins.total / num_games,
stats.white_wins.total / num_games)
# Write examples to a single record.
pattern = os.path.join(output_dir, '*', '*.zz')
random.seed(state.seed)
tf.set_random_seed(state.seed)
np.random.seed(state.seed)
# TODO(tommadams): This method of generating one golden chunk per generation
# is sub-optimal because each chunk gets reused multiple times for training,
# introducing bias. Instead, a fresh dataset should be uniformly sampled out
# of *all* games in the training window before the start of each training run.
buffer = example_buffer.ExampleBuffer(sampling_frac=1.0)
# TODO(tommadams): parallel_fill is currently non-deterministic. Make it not
# so.
logging.info('Writing golden chunk from "{}"'.format(pattern))
buffer.parallel_fill(tf.gfile.Glob(pattern))
buffer.flush(os.path.join(fsdb.golden_chunk_dir(),
state.output_model_name + '.tfrecord.zz')) | 0.013591 |
def learnObjects(self, objectPlacements):
"""
Learn each provided object in egocentric space. Touch every location on each
object.
This method doesn't try move the sensor along a path. Instead it just leaps
the sensor to each object location, resetting the location layer with each
leap.
This method simultaneously learns 4 sets of synapses:
- location -> input
- input -> location
- input -> object
- object -> input
"""
for monitor in self.monitors.values():
monitor.afterPlaceObjects(objectPlacements)
for objectName, objectDict in self.objects.iteritems():
self.reset()
objectPlacement = objectPlacements[objectName]
for locationName, featureName in objectDict.iteritems():
egocentricLocation = (locationName[0] + objectPlacement[0],
locationName[1] + objectPlacement[1])
locationSDR = self.locations[egocentricLocation]
featureSDR = self.features[featureName]
transitionSDR = np.empty(0)
self.locationLayer.reset()
self.inputLayer.reset()
for _ in xrange(10):
self.doTimestep(locationSDR, transitionSDR, featureSDR,
egocentricLocation, learn=True)
self.inputRepresentations[(featureName, egocentricLocation)] = (
self.inputLayer.getActiveCells())
self.objectRepresentations[objectName] = self.objectLayer.getActiveCells()
self.learnedObjectPlacements[objectName] = objectPlacement | 0.006549 |
def autoconfig_url_from_preferences():
"""
Get the PAC ``AutoConfigURL`` value from the macOS System Preferences.
This setting is visible as the "URL" field in
System Preferences > Network > Advanced... > Proxies > Automatic Proxy Configuration.
:return: The value from the registry, or None if the value isn't configured or available.
Note that it may be local filesystem path instead of a URL.
:rtype: str|None
:raises NotDarwinError: If called on a non-macOS/OSX platform.
"""
if not ON_DARWIN:
raise NotDarwinError()
try:
config = SystemConfiguration.SCDynamicStoreCopyProxies(None)
except AttributeError:
return # Key or value not found.
if all(('ProxyAutoConfigEnable' in config,
'ProxyAutoConfigURLString' in config,
not config.get('ProxyAutoDiscoveryEnable', 0))):
# Only return a value if it is enabled, not empty, and auto discovery is disabled.
return str(config['ProxyAutoConfigURLString']) | 0.003813 |
def contains_remove(self, item):
# type (Any, Any) -> Any
'''Takes a collection and an item and returns a new collection
of the same type with that item removed. The notion of "contains"
is defined by the object itself; the following must be ``True``:
.. code-block:: python
item not in contains_remove(obj, item)
This function is used by some lenses (particularly ContainsLens)
to remove items from containers when necessary.
The corresponding method call for this hook is
``obj._lens_contains_remove(item)``.
There is no default implementation.
'''
try:
self._lens_contains_remove
except AttributeError:
message = 'Don\'t know how to remove an item from {}'
raise NotImplementedError(message.format(type(self)))
else:
return self._lens_contains_remove(item) | 0.001163 |
def key_usage(self):
"""The :py:class:`~django_ca.extensions.KeyUsage` extension, or ``None`` if it doesn't exist."""
try:
ext = self.x509.extensions.get_extension_for_oid(ExtensionOID.KEY_USAGE)
except x509.ExtensionNotFound:
return None
return KeyUsage(ext) | 0.012698 |
def _get_admin_change_url(self, model, context):
"""
Returns the admin change url.
"""
app_label = model._meta.app_label
return reverse('%s:%s_%s_changelist' % (get_admin_site_name(context),
app_label,
model.__name__.lower())) | 0.005525 |
def _make_exception(self, response):
"""
In case of exception, construct the exception
object that holds all important values returned by
the response.
:return: The exception instance
:rtype: PocketException
"""
headers = response.headers
limit_headers = []
if 'X-Limit-User-Limit' in headers:
limit_headers = [
headers['X-Limit-User-Limit'],
headers['X-Limit-User-Remaining'],
headers['X-Limit-User-Reset'],
headers['X-Limit-Key-Limit'],
headers['X-Limit-Key-Remaining'],
headers['X-Limit-Key-Reset']
]
x_error_code = int(headers['X-Error-Code'])
exc = PocketException
if x_error_code in self.auth_error_codes:
exc = PocketAutException
return exc(
response.status_code,
x_error_code,
headers['X-Error'],
*limit_headers
) | 0.001953 |
def netconf_state_sessions_session_source_host(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
netconf_state = ET.SubElement(config, "netconf-state", xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-monitoring")
sessions = ET.SubElement(netconf_state, "sessions")
session = ET.SubElement(sessions, "session")
session_id_key = ET.SubElement(session, "session-id")
session_id_key.text = kwargs.pop('session_id')
source_host = ET.SubElement(session, "source-host")
source_host.text = kwargs.pop('source_host')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.004274 |
def xmlSetup(self, logType, logList):
"""Create xml file with fields from logbook form."""
from xml.etree.ElementTree import Element, SubElement, ElementTree
from datetime import datetime
curr_time = datetime.now()
if logType == "MCC":
# Set up xml tags
log_entry = Element('log_entry')
title = SubElement(log_entry, 'title')
program = SubElement(log_entry, 'program')
timestamp = SubElement(log_entry, 'timestamp')
priority = SubElement(log_entry, 'priority')
os_user = SubElement(log_entry, 'os_user')
hostname = SubElement(log_entry, 'hostname')
text = SubElement(log_entry, 'text')
log_user = SubElement(log_entry, 'log_user')
# Check for multiple logbooks and parse into seperate tags
logbook = []
for i in range(len(logList)):
logbook.append(SubElement(log_entry, 'logbook'))
logbook[i].text = logList[i].lower()
# Take care of dummy, unchanging tags first
log_entry.attrib['type'] = "LOGENTRY"
program.text = "152"
priority.text = "NORMAL"
os_user.text = "nobody"
hostname.text = "mccelog"
text.attrib['type'] = "text/plain"
# Handle attachment if image exists
if not self.imagePixmap.isNull():
attachment = SubElement(log_entry, 'attachment')
attachment.attrib['name'] = "Figure 1"
attachment.attrib['type'] = "image/" + self.imageType
attachment.text = curr_time.strftime("%Y%m%d_%H%M%S_") + str(curr_time.microsecond) + "." + self.imageType
# Set timestamp format
timestamp.text = curr_time.strftime("%Y/%m/%d %H:%M:%S")
fileName = "/tmp/" + curr_time.strftime("%Y%m%d_%H%M%S_") + str(curr_time.microsecond) + ".xml"
else: # If using Physics logbook
timeString = curr_time.strftime("%Y-%m-%dT%H:%M:%S")
# Set up xml tags
log_entry = Element(None)
severity = SubElement(log_entry, 'severity')
location = SubElement(log_entry, 'location')
keywords = SubElement(log_entry, 'keywords')
time = SubElement(log_entry, 'time')
isodate = SubElement(log_entry, 'isodate')
log_user = SubElement(log_entry, 'author')
category = SubElement(log_entry, 'category')
title = SubElement(log_entry, 'title')
metainfo = SubElement(log_entry, 'metainfo')
# Handle attachment if image exists
if not self.imagePixmap.isNull():
imageFile = SubElement(log_entry, 'link')
imageFile.text = timeString + "-00." + self.imageType
thumbnail = SubElement(log_entry, 'file')
thumbnail.text = timeString + "-00.png"
text = SubElement(log_entry, 'text') # Logbook expects Text tag to come last (for some strange reason)
# Take care of dummy, unchanging tags first
log_entry.attrib['type'] = "LOGENTRY"
category.text = "USERLOG"
location.text = "not set"
severity.text = "NONE"
keywords.text = "none"
time.text = curr_time.strftime("%H:%M:%S")
isodate.text = curr_time.strftime("%Y-%m-%d")
metainfo.text = timeString + "-00.xml"
fileName = "/tmp/" + metainfo.text
# Fill in user inputs
log_user.text = str(self.logui.userName.text())
title.text = str(self.logui.titleEntry.text())
if title.text == "":
QMessageBox().warning(self, "No Title entered", "Please enter a title for the entry...")
return None
text.text = str(self.logui.textEntry.toPlainText())
# If text field is truly empty, ElementTree leaves off tag entirely which causes logbook parser to fail
if text.text == "":
text.text = " "
# Create xml file
xmlFile = open(fileName, "w")
if logType == "MCC":
ElementTree(log_entry).write(xmlFile)
else:
xmlString = self.prettify(log_entry)
xmlFile.write(xmlString)
xmlFile.write("\n") # Close with newline so cron job parses correctly
xmlFile.close()
return fileName.rstrip(".xml") | 0.008681 |
def do_or_fake_filter( value, formatter ):
"""
call a faker if value is None
uses:
{{ myint|or_fake:'randomInt' }}
"""
if not value:
value = Faker.getGenerator().format( formatter )
return value | 0.021186 |
def update_import_request(self, import_request_to_update, project, repository_id, import_request_id):
"""UpdateImportRequest.
[Preview API] Retry or abandon a failed import request.
:param :class:`<GitImportRequest> <azure.devops.v5_0.git.models.GitImportRequest>` import_request_to_update: The updated version of the import request. Currently, the only change allowed is setting the Status to Queued or Abandoned.
:param str project: Project ID or project name
:param str repository_id: The name or ID of the repository.
:param int import_request_id: The unique identifier for the import request to update.
:rtype: :class:`<GitImportRequest> <azure.devops.v5_0.git.models.GitImportRequest>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if repository_id is not None:
route_values['repositoryId'] = self._serialize.url('repository_id', repository_id, 'str')
if import_request_id is not None:
route_values['importRequestId'] = self._serialize.url('import_request_id', import_request_id, 'int')
content = self._serialize.body(import_request_to_update, 'GitImportRequest')
response = self._send(http_method='PATCH',
location_id='01828ddc-3600-4a41-8633-99b3a73a0eb3',
version='5.0-preview.1',
route_values=route_values,
content=content)
return self._deserialize('GitImportRequest', response) | 0.00672 |
def agents_email_show(self, email_id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/chat/agents#get-agent-by-email-id"
api_path = "/api/v2/agents/email/{email_id}"
api_path = api_path.format(email_id=email_id)
return self.call(api_path, **kwargs) | 0.010417 |
def stochastic_event_set(sources, source_site_filter=nofilter):
"""
Generates a 'Stochastic Event Set' (that is a collection of earthquake
ruptures) representing a possible *realization* of the seismicity as
described by a source model.
The calculator loops over sources. For each source, it loops over ruptures.
For each rupture, the number of occurrence is randomly sampled by
calling
:meth:`openquake.hazardlib.source.rupture.BaseProbabilisticRupture.sample_number_of_occurrences`
.. note::
This calculator is using random numbers. In order to reproduce the
same results numpy random numbers generator needs to be seeded, see
http://docs.scipy.org/doc/numpy/reference/generated/numpy.random.seed.html
:param sources:
An iterator of seismic sources objects (instances of subclasses
of :class:`~openquake.hazardlib.source.base.BaseSeismicSource`).
:param source_site_filter:
The source filter to use (default noop filter)
:returns:
Generator of :class:`~openquake.hazardlib.source.rupture.Rupture`
objects that are contained in an event set. Some ruptures can be
missing from it, others can appear one or more times in a row.
"""
for source, s_sites in source_site_filter(sources):
try:
for rupture in source.iter_ruptures():
[n_occ] = rupture.sample_number_of_occurrences()
for _ in range(n_occ):
yield rupture
except Exception as err:
etype, err, tb = sys.exc_info()
msg = 'An error occurred with source id=%s. Error: %s'
msg %= (source.source_id, str(err))
raise_(etype, msg, tb) | 0.000574 |
def is_identity_matrix(mat,
ignore_phase=False,
rtol=RTOL_DEFAULT,
atol=ATOL_DEFAULT):
"""Test if an array is an identity matrix."""
if atol is None:
atol = ATOL_DEFAULT
if rtol is None:
rtol = RTOL_DEFAULT
mat = np.array(mat)
if mat.ndim != 2:
return False
if ignore_phase:
# If the matrix is equal to an identity up to a phase, we can
# remove the phase by multiplying each entry by the complex
# conjugate of the phase of the [0, 0] entry.
theta = np.angle(mat[0, 0])
mat = np.exp(-1j * theta) * mat
# Check if square identity
iden = np.eye(len(mat))
return np.allclose(mat, iden, rtol=rtol, atol=atol) | 0.001292 |
def downloadFile(url,outfile=None):
''' Copied from http://stackoverflow.com/questions/16694907/how-to-download-large-file-in-python-with-requests-py '''
if not outfile:
outfile = url.split('/')[-1]
r = requests.get(url, stream=True)
with open(outfile, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
return outfile | 0.014354 |
def execute(self, operation, parameters=()):
"""
Wraps execute method to record the query, execution duration and
stackframe.
"""
__traceback_hide__ = True # NOQ
# Time the exection of the query
start = time.time()
try:
return self.cursor.execute(operation, parameters)
finally:
end = time.time()
# Save the data
data = {
'name': operation,
'args': parameters,
'start': start,
'end': end,
}
self._record(data) | 0.003221 |
def set_redraw_lag(self, lag_sec):
"""Set lag time for redrawing the canvas.
Parameters
----------
lag_sec : float
Number of seconds to wait.
"""
self.defer_redraw = (lag_sec > 0.0)
if self.defer_redraw:
self.defer_lagtime = lag_sec | 0.006369 |
def makefractalCIJ(mx_lvl, E, sz_cl, seed=None):
'''
This function generates a directed network with a hierarchical modular
organization. All modules are fully connected and connection density
decays as 1/(E^n), with n = index of hierarchical level.
Parameters
----------
mx_lvl : int
number of hierarchical levels, N = 2^mx_lvl
E : int
connection density fall off per level
sz_cl : int
size of clusters (must be power of 2)
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
CIJ : NxN np.ndarray
connection matrix
K : int
number of connections present in output CIJ
'''
rng = get_rng(seed)
# make a stupid little template
t = np.ones((2, 2)) * 2
# compute N and cluster size
n = 2**mx_lvl
sz_cl -= 1
for lvl in range(1, mx_lvl):
s = 2**(lvl + 1)
CIJ = np.ones((s, s))
grp1 = range(int(s / 2))
grp2 = range(int(s / 2), s)
ix1 = np.add.outer(np.array(grp1) * s, grp1).flatten()
ix2 = np.add.outer(np.array(grp2) * s, grp2).flatten()
CIJ.flat[ix1] = t # numpy indexing is teh sucks :(
CIJ.flat[ix2] = t
CIJ += 1
t = CIJ.copy()
CIJ -= (np.ones((s, s)) + mx_lvl * np.eye(s))
# assign connection probabilities
ee = mx_lvl - CIJ - sz_cl
ee = (ee > 0) * ee
prob = (1 / E**ee) * (np.ones((s, s)) - np.eye(s))
CIJ = (prob > rng.random_sample((n, n)))
# count connections
k = np.sum(CIJ)
return np.array(CIJ, dtype=int), k | 0.001724 |
def _detsat_one(filename, ext, sigma=2.0, low_thresh=0.1, h_thresh=0.5,
small_edge=60, line_len=200, line_gap=75,
percentile=(4.5, 93.0), buf=200, plot=False, verbose=False):
"""Called by :func:`detsat`."""
if verbose:
t_beg = time.time()
fname = '{0}[{1}]'.format(filename, ext)
# check extension
if ext not in (1, 4, 'SCI', ('SCI', 1), ('SCI', 2)):
warnings.warn('{0} is not a valid science extension for '
'ACS/WFC'.format(ext), AstropyUserWarning)
# get the data
image = fits.getdata(filename, ext)
# image = im.astype('float64')
# rescale the image
p1, p2 = np.percentile(image, percentile)
# there should always be some counts in the image, anything lower should
# be set to one. Makes things nicer for finding edges.
if p1 < 0:
p1 = 0.0
if verbose:
print('Rescale intensity percentiles: {0}, {1}'.format(p1, p2))
image = exposure.rescale_intensity(image, in_range=(p1, p2))
# get the edges
immax = np.max(image)
edge = canny(image, sigma=sigma,
low_threshold=immax * low_thresh,
high_threshold=immax * h_thresh)
# clean up the small objects, will make less noise
morph.remove_small_objects(edge, min_size=small_edge, connectivity=8,
in_place=True)
# create an array of angles from 0 to 180, exactly 0 will get bad columns
# but it is unlikely that a satellite will be exactly at 0 degrees, so
# don't bother checking.
# then, convert to radians.
angle = np.radians(np.arange(2, 178, 0.5, dtype=float))
# perform Hough Transform to detect straight lines.
# only do if plotting to visualize the image in hough space.
# otherwise just preform a Probabilistic Hough Transform.
if plot and plt is not None:
h, theta, d = transform.hough_line(edge, theta=angle)
plt.ion()
# perform Probabilistic Hough Transformation to get line segments.
# NOTE: Results are slightly different from run to run!
result = transform.probabilistic_hough_line(
edge, threshold=210, line_length=line_len,
line_gap=line_gap, theta=angle)
result = np.asarray(result)
n_result = len(result)
# initially assume there is no satellite
satellite = False
# only continue if there was more than one point (at least a line)
# returned from the PHT
if n_result > 1:
if verbose:
print('Length of PHT result: {0}'.format(n_result))
# create lists for X and Y positions of lines and build points
x0 = result[:, 0, 0]
y0 = result[:, 0, 1]
x1 = result[:, 1, 0]
y1 = result[:, 1, 1]
# set some boundries
ymax, xmax = image.shape
topx = xmax - buf
topy = ymax - buf
if verbose:
print('min(x0)={0:4d}, min(x1)={1:4d}, min(y0)={2:4d}, '
'min(y1)={3:4d}'.format(min(x0), min(x1), min(y0), min(y1)))
print('max(x0)={0:4d}, max(x1)={1:4d}, max(y0)={2:4d}, '
'max(y1)={3:4d}'.format(max(x0), max(x1), max(y0), max(y1)))
print('buf={0}'.format(buf))
print('topx={0}, topy={1}'.format(topx, topy))
# set up trail angle "tracking" arrays.
# find the angle of each segment and filter things out.
# TODO: this may be wrong. Try using arctan2.
trail_angle = np.degrees(np.arctan((y1 - y0) / (x1 - x0)))
# round to the nearest 5 degrees, trail should not be that curved
round_angle = (5 * np.round(trail_angle * 0.2)).astype(int)
# take out 90 degree things
mask = round_angle % 90 != 0
if not np.any(mask):
if verbose:
print('No round_angle found')
return np.empty(0)
round_angle = round_angle[mask]
trail_angle = trail_angle[mask]
result = result[mask]
ang, num = stats.mode(round_angle)
# do the filtering
truth = round_angle == ang[0]
if verbose:
print('trail_angle: {0}'.format(trail_angle))
print('round_angle: {0}'.format(round_angle))
print('mode(round_angle): {0}'.format(ang[0]))
# filter out the outliers
trail_angle = trail_angle[truth]
result = result[truth]
n_result = len(result)
if verbose:
print('Filtered trail_angle: {0}'.format(trail_angle))
if n_result < 1:
return np.empty(0)
# if there is an unreasonable amount of points, it picked up garbage
elif n_result > 300:
warnings.warn(
'Way too many segments results to be correct ({0}). '
'Rejecting detection on {1}.'.format(n_result, fname),
AstropyUserWarning)
return np.empty(0)
# remake the point lists with things taken out
x0 = result[:, 0, 0]
y0 = result[:, 0, 1]
x1 = result[:, 1, 0]
y1 = result[:, 1, 1]
min_x0 = min(x0)
min_y0 = min(y0)
min_x1 = min(x1)
min_y1 = min(y1)
max_x0 = max(x0)
max_y0 = max(y0)
max_x1 = max(x1)
max_y1 = max(y1)
mean_angle = np.mean(trail_angle)
# make decisions on where the trail went and determine if a trail
# traversed the image
# top to bottom
if (((min_y0 < buf) or (min_y1 < buf)) and
((max_y0 > topy) or (max_y1 > topy))):
satellite = True
if verbose:
print('Trail Direction: Top to Bottom')
# right to left
elif (((min_x0 < buf) or (min_x1 < buf)) and
((max_x0 > topx) or (max_x1 > topx))):
satellite = True
if verbose:
print('Trail Direction: Right to Left')
# bottom to left
elif (((min_x0 < buf) or (min_x1 < buf)) and
((min_y0 < buf) or (min_y1 < buf)) and
(-1 > mean_angle > -89)):
satellite = True
if verbose:
print('Trail Direction: Bottom to Left')
# top to left
elif (((min_x0 < buf) or (min_x1 < buf)) and
((max_y0 > topy) or (max_y1 > topy)) and
(89 > mean_angle > 1)):
satellite = True
if verbose:
print('Trail Direction: Top to Left')
# top to right
elif (((max_x0 > topx) or (max_x1 > topx)) and
((max_y0 > topy) or (max_y1 > topy)) and
(-1 > mean_angle > -89)):
satellite = True
if verbose:
print('Trail Direction: Top to Right')
# bottom to right
elif (((max_x0 > topx) or (max_x1 > topx)) and
((min_y0 < buf) or (min_y1 < buf)) and
(89 > mean_angle > 1)):
satellite = True
if verbose:
print('Trail Direction: Bottom to Right')
if satellite:
if verbose:
print('{0} trail segment(s) detected'.format(n_result))
print('Trail angle list (not returned): ')
print(trail_angle)
print('End point list:')
for i, ((px0, py0), (px1, py1)) in enumerate(result, 1):
print('{0:5d}. ({1:4d}, {2:4d}), ({3:4d}, {4:4d})'.format(
i, px0, py0, px1, py1))
if plot and plt is not None:
mean = np.median(image)
stddev = image.std()
lower = mean - stddev
upper = mean + stddev
fig1, ax1 = plt.subplots()
ax1.imshow(edge, cmap=plt.cm.gray)
ax1.set_title('Edge image for {0}'.format(fname))
for (px0, py0), (px1, py1) in result: # Draw trails
ax1.plot((px0, px1), (py0, py1), scalex=False, scaley=False)
fig2, ax2 = plt.subplots()
ax2.imshow(
np.log(1 + h),
extent=(np.rad2deg(theta[-1]), np.rad2deg(theta[0]),
d[-1], d[0]), aspect=0.02)
ax2.set_title('Hough Transform')
ax2.set_xlabel('Angles (degrees)')
ax2.set_ylabel('Distance from Origin (pixels)')
fig3, ax3 = plt.subplots()
ax3.imshow(image, vmin=lower, vmax=upper, cmap=plt.cm.gray)
ax3.set_title(fname)
for (px0, py0), (px1, py1) in result: # Draw trails
ax3.plot((px0, px1), (py0, py1), scalex=False, scaley=False)
plt.draw()
else: # length of result was too small
result = np.empty(0)
if verbose:
print('No trail detected; found {0} segments'.format(n_result))
if plot and plt is not None:
fig1, ax1 = plt.subplots()
ax1.imshow(edge, cmap=plt.cm.gray)
ax1.set_title(fname)
# Draw trails
for (px0, py0), (px1, py1) in result:
ax1.plot((px0, px1), (py0, py1), scalex=False, scaley=False)
if verbose:
t_end = time.time()
print('Run time: {0} s'.format(t_end - t_beg))
return result | 0.000109 |
def __locate_driver_named(name):
"""Searchs __modules for a driver named @arg name.
@returns the package for driver @arg name or None if one can't be found.
"""
global __modules
if type(__modules) is not list:
__modules = list(__modules)
ms = [d for d in __modules if d.ahioDriverInfo.NAME == name]
if not ms:
return None
return ms[0] | 0.002597 |
def create_context(self, message_queue, task_id):
"""
Create values to be used by upload_folder_run function.
:param message_queue: Queue: queue background process can send messages to us on
:param task_id: int: id of this command's task so message will be routed correctly
"""
params = (self.remote_folder.name, self.parent.kind, self.parent.remote_id)
return UploadContext(self.settings, params, message_queue, task_id) | 0.010482 |
def fetch(self):
"""
Fetch a SampleInstance
:returns: Fetched SampleInstance
:rtype: twilio.rest.autopilot.v1.assistant.task.sample.SampleInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return SampleInstance(
self._version,
payload,
assistant_sid=self._solution['assistant_sid'],
task_sid=self._solution['task_sid'],
sid=self._solution['sid'],
) | 0.00346 |
def convex_comb_dis(model,a,b):
"""convex_comb_dis -- add piecewise relation with convex combination formulation
Parameters:
- model: a model where to include the piecewise linear relation
- a[k]: x-coordinate of the k-th point in the piecewise linear relation
- b[k]: y-coordinate of the k-th point in the piecewise linear relation
Returns the model with the piecewise linear relation on added variables X, Y, and z.
"""
K = len(a)-1
wL,wR,z = {},{},{}
for k in range(K):
wL[k] = model.addVar(lb=0, ub=1, vtype="C")
wR[k] = model.addVar(lb=0, ub=1, vtype="C")
z[k] = model.addVar(vtype="B")
X = model.addVar(lb=a[0], ub=a[K], vtype="C")
Y = model.addVar(lb=-model.infinity(), vtype="C")
model.addCons(X == quicksum(a[k]*wL[k] + a[k+1]*wR[k] for k in range(K)))
model.addCons(Y == quicksum(b[k]*wL[k] + b[k+1]*wR[k] for k in range(K)))
for k in range(K):
model.addCons(wL[k] + wR[k] == z[k])
model.addCons(quicksum(z[k] for k in range(K)) == 1)
return X,Y,z | 0.010261 |
def pack_tuple(self, values):
"""
Pack tuple of values
<tuple> ::= <cardinality><field>+
:param value: tuple to be packed
:type value: tuple of scalar values (bytes, str or int)
:return: packed tuple
:rtype: bytes
"""
assert isinstance(values, (tuple, list))
cardinality = [struct_L.pack(len(values))]
packed_items = [self.pack_field(v) for v in values]
return b''.join(itertools.chain(cardinality, packed_items)) | 0.003914 |
def is_dot(ip):
"""Return true if the IP address is in dotted decimal notation."""
octets = str(ip).split('.')
if len(octets) != 4:
return False
for i in octets:
try:
val = int(i)
except ValueError:
return False
if val > 255 or val < 0:
return False
return True | 0.002865 |
def __parse_blacklist(self, json):
"""Parse blacklist entries using Sorting Hat format.
The Sorting Hat blacklist format is a JSON stream that
stores a list of blacklisted entries.
Next, there is an example of a valid stream:
{
"blacklist": [
"John Doe",
"John Smith",
"[email protected]"
]
}
:param stream: stream to parse
:raises InvalidFormatError: raised when the format of the stream is
not valid.
"""
try:
for entry in json['blacklist']:
if not entry:
msg = "invalid json format. Blacklist entries cannot be null or empty"
raise InvalidFormatError(cause=msg)
excluded = self.__encode(entry)
bl = self._blacklist.get(excluded, None)
if not bl:
bl = MatchingBlacklist(excluded=excluded)
self._blacklist[excluded] = bl
except KeyError as e:
msg = "invalid json format. Attribute %s not found" % e.args
raise InvalidFormatError(cause=msg) | 0.002502 |
def fft_convolve(data, h, res_g = None,
plan = None, inplace = False,
kernel_is_fft = False,
kernel_is_fftshifted = False):
""" convolves data with kernel h via FFTs
data should be either a numpy array or a OCLArray (see doc for fft)
both data and h should be same shape
if data/h are OCLArrays, then:
- type should be complex64
- shape should be equal and power of two
- h is assumed to be already fftshifted
(otherwise set kernel_is_fftshifted to true)
"""
if isinstance(data,np.ndarray):
return _fft_convolve_numpy(data, h,
plan = plan,
kernel_is_fft = kernel_is_fft,
kernel_is_fftshifted = kernel_is_fftshifted)
elif isinstance(data,OCLArray):
return _fft_convolve_gpu(data,h, res_g = res_g,
plan = plan, inplace = inplace,
kernel_is_fft = kernel_is_fft)
else:
raise TypeError("array argument (1) has bad type: %s"%type(data)) | 0.029185 |
def search_media(self, series, query_string):
"""Search for media from a series starting with query_string, case-sensitive
@param crunchyroll.models.Series series the series to search in
@param str query_string the search query, same restrictions
as `search_anime_series`
@return list<crunchyroll.models.Media>
"""
params = {
'sort': ANDROID.FILTER_PREFIX + query_string,
}
params.update(self._get_series_query_dict(series))
result = self._android_api.list_media(**params)
return result | 0.007587 |
def _calc_waves(self, angular_freqs, profile):
"""Compute the wave numbers and amplitudes (up- and down-going).
Parameters
----------
angular_freqs: :class:`numpy.ndarray`
Angular frequency at which the waves are computed.
profile: :class:`~.base.site.Profile`
Site profile.
"""
# Compute the complex wave numbers of the system
wave_nums = np.empty((len(profile), len(angular_freqs)), np.complex)
for i, l in enumerate(profile):
wave_nums[i, :] = angular_freqs / l.comp_shear_vel
# Compute the waves. In the top surface layer, the up-going and
# down-going waves have an amplitude of 1 as they are completely
# reflected at the surface.
waves_a = np.ones_like(wave_nums, np.complex)
waves_b = np.ones_like(wave_nums, np.complex)
for i, l in enumerate(profile[:-1]):
# Complex impedance -- wave number can be zero which causes an
# error.
with np.errstate(invalid='ignore'):
cimped = ((wave_nums[i] * l.comp_shear_mod) /
(wave_nums[i + 1] * profile[i + 1].comp_shear_mod))
# Complex term to simplify equations -- uses full layer height
cterm = 1j * wave_nums[i, :] * l.thickness
waves_a[i + 1, :] = (
0.5 * waves_a[i] *
(1 + cimped) * np.exp(cterm) + 0.5 * waves_b[i] *
(1 - cimped) * np.exp(-cterm))
waves_b[i + 1, :] = (
0.5 * waves_a[i] *
(1 - cimped) * np.exp(cterm) + 0.5 * waves_b[i] *
(1 + cimped) * np.exp(-cterm))
# Set wave amplitudes with zero frequency to 1
mask = ~np.isfinite(cimped)
waves_a[i + 1, mask] = 1.
waves_b[i + 1, mask] = 1.
# fixme: Better way to handle this?
# Set wave amplitudes to 1 at frequencies near 0
mask = np.isclose(angular_freqs, 0)
waves_a[-1, mask] = 1.
waves_b[-1, mask] = 1.
self._waves_a = waves_a
self._waves_b = waves_b
self._wave_nums = wave_nums | 0.000913 |
def has_offline_historical_manager_or_raise(self):
"""Raises an exception if model uses a history manager and
historical model history_id is not a UUIDField.
Note: expected to use edc_model.HistoricalRecords instead of
simple_history.HistoricalRecords.
"""
try:
model = self.instance.__class__.history.model
except AttributeError:
model = self.instance.__class__
field = [field for field in model._meta.fields if field.name == "history_id"]
if field and not isinstance(field[0], UUIDField):
raise OfflineHistoricalManagerError(
f"Field 'history_id' of historical model "
f"'{model._meta.app_label}.{model._meta.model_name}' "
"must be an UUIDfield. "
"For history = HistoricalRecords() use edc_model.HistoricalRecords instead of "
"simple_history.HistoricalRecords(). "
f"See '{self.instance._meta.app_label}.{self.instance._meta.model_name}'."
) | 0.004704 |
def ignore( self, other ):
"""
Define expression to be ignored (e.g., comments) while doing pattern
matching; may be called repeatedly, to define multiple comment or other
ignorable patterns.
Example::
patt = OneOrMore(Word(alphas))
patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj']
patt.ignore(cStyleComment)
patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd']
"""
if isinstance(other, basestring):
other = Suppress(other)
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
self.ignoreExprs.append(other)
else:
self.ignoreExprs.append( Suppress( other.copy() ) )
return self | 0.013665 |
def element(self, inp=None, data_ptr=None, order=None):
"""Create a new element.
Parameters
----------
inp : `array-like`, optional
Input used to initialize the new element.
If ``inp`` is `None`, an empty element is created with no
guarantee of its state (memory allocation only).
The new element will use ``order`` as storage order if
provided, otherwise `default_order`.
Otherwise, a copy is avoided whenever possible. This requires
correct `shape` and `dtype`, and if ``order`` is provided,
also contiguousness in that ordering. If any of these
conditions is not met, a copy is made.
data_ptr : int, optional
Pointer to the start memory address of a contiguous Numpy array
or an equivalent raw container with the same total number of
bytes. For this option, ``order`` must be either ``'C'`` or
``'F'``.
The option is also mutually exclusive with ``inp``.
order : {None, 'C', 'F'}, optional
Storage order of the returned element. For ``'C'`` and ``'F'``,
contiguous memory in the respective ordering is enforced.
The default ``None`` enforces no contiguousness.
Returns
-------
element : `NumpyTensor`
The new element, created from ``inp`` or from scratch.
Examples
--------
Without arguments, an uninitialized element is created. With an
array-like input, the element can be initialized:
>>> space = odl.rn(3)
>>> empty = space.element()
>>> empty.shape
(3,)
>>> empty.space
rn(3)
>>> x = space.element([1, 2, 3])
>>> x
rn(3).element([ 1., 2., 3.])
If the input already is a `numpy.ndarray` of correct `dtype`, it
will merely be wrapped, i.e., both array and space element access
the same memory, such that mutations will affect both:
>>> arr = np.array([1, 2, 3], dtype=float)
>>> elem = odl.rn(3).element(arr)
>>> elem[0] = 0
>>> elem
rn(3).element([ 0., 2., 3.])
>>> arr
array([ 0., 2., 3.])
Elements can also be constructed from a data pointer, resulting
again in shared memory:
>>> int_space = odl.tensor_space((2, 3), dtype=int)
>>> arr = np.array([[1, 2, 3],
... [4, 5, 6]], dtype=int, order='F')
>>> ptr = arr.ctypes.data
>>> y = int_space.element(data_ptr=ptr, order='F')
>>> y
tensor_space((2, 3), dtype=int).element(
[[1, 2, 3],
[4, 5, 6]]
)
>>> y[0, 1] = -1
>>> arr
array([[ 1, -1, 3],
[ 4, 5, 6]])
"""
if order is not None and str(order).upper() not in ('C', 'F'):
raise ValueError("`order` {!r} not understood".format(order))
if inp is None and data_ptr is None:
if order is None:
arr = np.empty(self.shape, dtype=self.dtype,
order=self.default_order)
else:
arr = np.empty(self.shape, dtype=self.dtype, order=order)
return self.element_type(self, arr)
elif inp is None and data_ptr is not None:
if order is None:
raise ValueError('`order` cannot be None for element '
'creation from pointer')
ctype_array_def = ctypes.c_byte * self.nbytes
as_ctype_array = ctype_array_def.from_address(data_ptr)
as_numpy_array = np.ctypeslib.as_array(as_ctype_array)
arr = as_numpy_array.view(dtype=self.dtype)
arr = arr.reshape(self.shape, order=order)
return self.element_type(self, arr)
elif inp is not None and data_ptr is None:
if inp in self and order is None:
# Short-circuit for space elements and no enforced ordering
return inp
# Try to not copy but require dtype and order if given
# (`order=None` is ok as np.array argument)
arr = np.array(inp, copy=False, dtype=self.dtype, ndmin=self.ndim,
order=order)
# Make sure the result is writeable, if not make copy.
# This happens for e.g. results of `np.broadcast_to()`.
if not arr.flags.writeable:
arr = arr.copy()
if arr.shape != self.shape:
raise ValueError('shape of `inp` not equal to space shape: '
'{} != {}'.format(arr.shape, self.shape))
return self.element_type(self, arr)
else:
raise TypeError('cannot provide both `inp` and `data_ptr`') | 0.000408 |
def main(pub_port=None, sub_port=None):
'''main of forwarder
:param sub_port: port for subscribers
:param pub_port: port for publishers
'''
try:
if sub_port is None:
sub_port = get_sub_port()
if pub_port is None:
pub_port = get_pub_port()
context = zmq.Context(1)
frontend = context.socket(zmq.SUB)
backend = context.socket(zmq.PUB)
frontend.bind('tcp://*:{pub_port}'.format(pub_port=pub_port))
frontend.setsockopt(zmq.SUBSCRIBE, b'')
backend.bind('tcp://*:{sub_port}'.format(sub_port=sub_port))
zmq.device(zmq.FORWARDER, frontend, backend)
except KeyboardInterrupt:
pass
finally:
frontend.close()
backend.close()
context.term() | 0.001272 |
def _get_val(val, full=False):
"""
Get the value(s) of a dataset as a single value or as 1-d list of
values. In the special case of timeseries, when a check is for time-based
criteria, you can return the entire timeseries.
"""
try:
val = val.strip()
except:
pass
logging.debug("%s, type=%s", val, type(val))
if isinstance(val, float):
return val
if isinstance(val, int):
return val
if isinstance(val, np.ndarray):
return list(val)
try:
val = float(val)
return val
except:
pass
try:
val = int(val)
return val
except:
pass
if type(val) == pd.DataFrame:
if full:
return val
newval = []
values = val.values
for v in values:
newv = _get_val(v)
if type(newv) == list:
newval.extend(newv)
else:
newval.append(newv)
val = newval
elif type(val) == dict:
if full:
return val
newval = []
for v in val.values():
newv = _get_val(v)
if type(newv) == list:
newval.extend(newv)
else:
newval.append(newv)
val = newval
elif type(val) == list or type(val) == np.ndarray:
newval = []
for arr_val in val:
v = _get_val(arr_val)
newval.append(v)
val = newval
return val | 0.003958 |
def learn(
network,
env,
seed=None,
nsteps=5,
total_timesteps=int(80e6),
vf_coef=0.5,
ent_coef=0.01,
max_grad_norm=0.5,
lr=7e-4,
lrschedule='linear',
epsilon=1e-5,
alpha=0.99,
gamma=0.99,
log_interval=100,
load_path=None,
**network_kwargs):
'''
Main entrypoint for A2C algorithm. Train a policy with given network architecture on a given environment using a2c algorithm.
Parameters:
-----------
network: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list)
specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns
tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward
neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets.
See baselines.common/policies.py/lstm for more details on using recurrent nets in policies
env: RL environment. Should implement interface similar to VecEnv (baselines.common/vec_env) or be wrapped with DummyVecEnv (baselines.common/vec_env/dummy_vec_env.py)
seed: seed to make random number sequence in the alorightm reproducible. By default is None which means seed from system noise generator (not reproducible)
nsteps: int, number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where
nenv is number of environment copies simulated in parallel)
total_timesteps: int, total number of timesteps to train on (default: 80M)
vf_coef: float, coefficient in front of value function loss in the total loss function (default: 0.5)
ent_coef: float, coeffictiant in front of the policy entropy in the total loss function (default: 0.01)
max_gradient_norm: float, gradient is clipped to have global L2 norm no more than this value (default: 0.5)
lr: float, learning rate for RMSProp (current implementation has RMSProp hardcoded in) (default: 7e-4)
lrschedule: schedule of learning rate. Can be 'linear', 'constant', or a function [0..1] -> [0..1] that takes fraction of the training progress as input and
returns fraction of the learning rate (specified as lr) as output
epsilon: float, RMSProp epsilon (stabilizes square root computation in denominator of RMSProp update) (default: 1e-5)
alpha: float, RMSProp decay parameter (default: 0.99)
gamma: float, reward discounting parameter (default: 0.99)
log_interval: int, specifies how frequently the logs are printed out (default: 100)
**network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network
For instance, 'mlp' network architecture has arguments num_hidden and num_layers.
'''
set_global_seeds(seed)
# Get the nb of env
nenvs = env.num_envs
policy = build_policy(env, network, **network_kwargs)
# Instantiate the model object (that creates step_model and train_model)
model = Model(policy=policy, env=env, nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef,
max_grad_norm=max_grad_norm, lr=lr, alpha=alpha, epsilon=epsilon, total_timesteps=total_timesteps, lrschedule=lrschedule)
if load_path is not None:
model.load(load_path)
# Instantiate the runner object
runner = Runner(env, model, nsteps=nsteps, gamma=gamma)
epinfobuf = deque(maxlen=100)
# Calculate the batch_size
nbatch = nenvs*nsteps
# Start total timer
tstart = time.time()
for update in range(1, total_timesteps//nbatch+1):
# Get mini batch of experiences
obs, states, rewards, masks, actions, values, epinfos = runner.run()
epinfobuf.extend(epinfos)
policy_loss, value_loss, policy_entropy = model.train(obs, states, rewards, masks, actions, values)
nseconds = time.time()-tstart
# Calculate the fps (frame per second)
fps = int((update*nbatch)/nseconds)
if update % log_interval == 0 or update == 1:
# Calculates if value function is a good predicator of the returns (ev > 1)
# or if it's just worse than predicting nothing (ev =< 0)
ev = explained_variance(values, rewards)
logger.record_tabular("nupdates", update)
logger.record_tabular("total_timesteps", update*nbatch)
logger.record_tabular("fps", fps)
logger.record_tabular("policy_entropy", float(policy_entropy))
logger.record_tabular("value_loss", float(value_loss))
logger.record_tabular("explained_variance", float(ev))
logger.record_tabular("eprewmean", safemean([epinfo['r'] for epinfo in epinfobuf]))
logger.record_tabular("eplenmean", safemean([epinfo['l'] for epinfo in epinfobuf]))
logger.dump_tabular()
return model | 0.005838 |
def function_name(self):
"""
Returns name of the function to invoke. If no function identifier is provided, this method will return name of
the only function from the template
:return string: Name of the function
:raises InvokeContextException: If function identifier is not provided
"""
if self._function_identifier:
return self._function_identifier
# Function Identifier is *not* provided. If there is only one function in the template,
# default to it.
all_functions = [f for f in self._function_provider.get_all()]
if len(all_functions) == 1:
return all_functions[0].name
# Get all the available function names to print helpful exception message
all_function_names = [f.name for f in all_functions]
# There are more functions in the template, and function identifier is not provided, hence raise.
raise InvokeContextException("You must provide a function identifier (function's Logical ID in the template). "
"Possible options in your template: {}".format(all_function_names)) | 0.006861 |
def setCache(self, val = 1):
""" Sets cache on (or updates), or turns off """
# first clear the old cached values
self.cacheConnections = []
self.cacheLayers = []
if val:
for layer in self.layers:
if layer.active and not layer.frozen:
self.cacheLayers.append( layer )
for connection in self.connections:
if connection.active and not connection.frozen:
self.cacheConnections.append( connection ) | 0.015094 |
def lock(self):
"""Lock the Sesame. Return True on success, else False."""
endpoint = API_SESAME_CONTROL_ENDPOINT.format(self.device_id)
payload = {'type': 'lock'}
response = self.account.request('POST', endpoint, payload=payload)
if response is None:
return False
if response.status_code == 200 or response.status_code == 204:
return True
return False | 0.00463 |
def tab(self):
"""Move to the next tab space, or the end of the screen if there
aren't anymore left.
"""
for stop in sorted(self.tabstops):
if self.cursor.x < stop:
column = stop
break
else:
column = self.columns - 1
self.cursor.x = column | 0.005814 |
def HSV_to_RGB(cobj, target_rgb, *args, **kwargs):
"""
HSV to RGB conversion.
H values are in degrees and are 0 to 360.
S values are a percentage, 0.0 to 1.0.
V values are a percentage, 0.0 to 1.0.
"""
H = cobj.hsv_h
S = cobj.hsv_s
V = cobj.hsv_v
h_floored = int(math.floor(H))
h_sub_i = int(h_floored / 60) % 6
var_f = (H / 60.0) - (h_floored // 60)
var_p = V * (1.0 - S)
var_q = V * (1.0 - var_f * S)
var_t = V * (1.0 - (1.0 - var_f) * S)
if h_sub_i == 0:
rgb_r = V
rgb_g = var_t
rgb_b = var_p
elif h_sub_i == 1:
rgb_r = var_q
rgb_g = V
rgb_b = var_p
elif h_sub_i == 2:
rgb_r = var_p
rgb_g = V
rgb_b = var_t
elif h_sub_i == 3:
rgb_r = var_p
rgb_g = var_q
rgb_b = V
elif h_sub_i == 4:
rgb_r = var_t
rgb_g = var_p
rgb_b = V
elif h_sub_i == 5:
rgb_r = V
rgb_g = var_p
rgb_b = var_q
else:
raise ValueError("Unable to convert HSL->RGB due to value error.")
# TODO: Investigate intent of following code block.
# In the event that they define an HSV color and want to convert it to
# a particular RGB space, let them override it here.
# if target_rgb is not None:
# rgb_type = target_rgb
# else:
# rgb_type = cobj.rgb_type
return target_rgb(rgb_r, rgb_g, rgb_b) | 0.000694 |
def get_user_id_from_email(self, email):
""" Uses the get-all-user-accounts Portals API to retrieve the
user-id by supplying an email. """
accts = self.get_all_user_accounts()
for acct in accts:
if acct['email'] == email:
return acct['id']
return None | 0.00625 |
def _analyze_read_write(self):
""" Compute variables read/written/...
"""
write_var = [x.variables_written_as_expression for x in self.nodes]
write_var = [x for x in write_var if x]
write_var = [item for sublist in write_var for item in sublist]
write_var = list(set(write_var))
# Remove dupplicate if they share the same string representation
write_var = [next(obj) for i, obj in groupby(sorted(write_var, key=lambda x: str(x)), lambda x: str(x))]
self._expression_vars_written = write_var
write_var = [x.variables_written for x in self.nodes]
write_var = [x for x in write_var if x]
write_var = [item for sublist in write_var for item in sublist]
write_var = list(set(write_var))
# Remove dupplicate if they share the same string representation
write_var = [next(obj) for i, obj in\
groupby(sorted(write_var, key=lambda x: str(x)), lambda x: str(x))]
self._vars_written = write_var
read_var = [x.variables_read_as_expression for x in self.nodes]
read_var = [x for x in read_var if x]
read_var = [item for sublist in read_var for item in sublist]
# Remove dupplicate if they share the same string representation
read_var = [next(obj) for i, obj in\
groupby(sorted(read_var, key=lambda x: str(x)), lambda x: str(x))]
self._expression_vars_read = read_var
read_var = [x.variables_read for x in self.nodes]
read_var = [x for x in read_var if x]
read_var = [item for sublist in read_var for item in sublist]
# Remove dupplicate if they share the same string representation
read_var = [next(obj) for i, obj in\
groupby(sorted(read_var, key=lambda x: str(x)), lambda x: str(x))]
self._vars_read = read_var
self._state_vars_written = [x for x in self.variables_written if\
isinstance(x, StateVariable)]
self._state_vars_read = [x for x in self.variables_read if\
isinstance(x, (StateVariable))]
self._solidity_vars_read = [x for x in self.variables_read if\
isinstance(x, (SolidityVariable))]
self._vars_read_or_written = self._vars_written + self._vars_read
slithir_variables = [x.slithir_variables for x in self.nodes]
slithir_variables = [x for x in slithir_variables if x]
self._slithir_variables = [item for sublist in slithir_variables for item in sublist] | 0.006126 |
def prompt_4_yes_no(question):
"""
Ask a question and prompt for yes or no
:param question: Question to ask; answer is yes/no
:return: :boolean
"""
while True:
sys.stdout.write(question + ' (y/n)? ')
try:
choice = raw_input().lower()
except:
choice = input().lower()
if choice == 'yes' or choice == 'y':
return True
elif choice == 'no' or choice == 'n':
return False
else:
printError('\'%s\' is not a valid answer. Enter \'yes\'(y) or \'no\'(n).' % choice) | 0.004724 |
def select_server(self,
selector,
server_selection_timeout=None,
address=None):
"""Like select_servers, but choose a random server if several match."""
return random.choice(self.select_servers(selector,
server_selection_timeout,
address)) | 0.011962 |
def set_credentials(self, client_id=None, client_secret=None):
"""
set given credentials and reset the session
"""
self._client_id = client_id
self._client_secret = client_secret
# make sure to reset session due to credential change
self._session = None | 0.006452 |
def get_parent_gradebooks(self, gradebook_id):
"""Gets the parents of the given gradebook.
arg: gradebook_id (osid.id.Id): the ``Id`` of a gradebook
return: (osid.grading.GradebookList) - the parents of the
gradebook
raise: NotFound - ``gradebook_id`` is not found
raise: NullArgument - ``gradebook_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchySession.get_parent_bins
if self._catalog_session is not None:
return self._catalog_session.get_parent_catalogs(catalog_id=gradebook_id)
return GradebookLookupSession(
self._proxy,
self._runtime).get_gradebooks_by_ids(
list(self.get_parent_gradebook_ids(gradebook_id))) | 0.003033 |
def Barati_high(Re):
r'''Calculates drag coefficient of a smooth sphere using the method in
[1]_.
.. math::
C_D = 8\times 10^{-6}\left[(Re/6530)^2 + \tanh(Re) - 8\ln(Re)/\ln(10)\right]
- 0.4119\exp(-2.08\times10^{43}/[Re + Re^2]^4)
-2.1344\exp(-\{[\ln(Re^2 + 10.7563)/\ln(10)]^2 + 9.9867\}/Re)
+0.1357\exp(-[(Re/1620)^2 + 10370]/Re)
- 8.5\times 10^{-3}\{2\ln[\tanh(\tanh(Re))]/\ln(10) - 2825.7162\}/Re
+ 2.4795
Parameters
----------
Re : float
Particle Reynolds number of the sphere using the surrounding fluid
density and viscosity, [-]
Returns
-------
Cd : float
Drag coefficient [-]
Notes
-----
Range is Re <= 1E6
This model is the wider-range model the authors developed.
At sufficiently low diameters or Re values, drag is no longer a phenomena.
Examples
--------
Matching example in [1]_, in a table of calculated values.
>>> Barati_high(200.)
0.7730544082789523
References
----------
.. [1] Barati, Reza, Seyed Ali Akbar Salehi Neyshabouri, and Goodarz
Ahmadi. "Development of Empirical Models with High Accuracy for
Estimation of Drag Coefficient of Flow around a Smooth Sphere: An
Evolutionary Approach." Powder Technology 257 (May 2014): 11-19.
doi:10.1016/j.powtec.2014.02.045.
'''
Re2 = Re*Re
t0 = 1.0/Re
t1 = (Re/6530.)
t2 = (Re/1620.)
t3 = log10(Re2 + 10.7563)
tanhRe = tanh(Re)
Cd = (8E-6*(t1*t1 + tanhRe - 8.0*log10(Re))
- 0.4119*exp(-2.08E43/(Re+Re2)**4)
- 2.1344*exp(-t0*(t3*t3 + 9.9867))
+ 0.1357*exp(-t0*(t2*t2 + 10370.))
- 8.5E-3*t0*(2.0*log10(tanh(tanhRe)) - 2825.7162) + 2.4795)
return Cd | 0.003413 |
def unregister_signal_handlers():
""" set signal handlers to default """
signal.signal(SIGNAL_STACKTRACE, signal.SIG_IGN)
signal.signal(SIGNAL_PDB, signal.SIG_IGN) | 0.005714 |
def save_plain_image_as_file(self, filepath, format='png', quality=90):
"""Used for generating thumbnails. Does not include overlaid
graphics.
"""
img_w = self.get_plain_image_as_widget()
# assumes that the image widget has some method for saving to
# a file
img_w.save(filepath, format=format, quality=quality) | 0.005435 |
def get_failed_requests(self, results):
"""Return the requests that failed.
:param results: the results of a membership request check
:type results: :class:`list`
:return: the failed requests
:rtype: generator
"""
data = {member['guid']: member for member in results}
for request in self.requests:
if request['guid'] not in data:
yield request | 0.004587 |
def create(self):
"""
Create the link on the nodes
"""
node1 = self._nodes[0]["node"]
adapter_number1 = self._nodes[0]["adapter_number"]
port_number1 = self._nodes[0]["port_number"]
node2 = self._nodes[1]["node"]
adapter_number2 = self._nodes[1]["adapter_number"]
port_number2 = self._nodes[1]["port_number"]
# Get an IP allowing communication between both host
try:
(node1_host, node2_host) = yield from node1.compute.get_ip_on_same_subnet(node2.compute)
except ValueError as e:
raise aiohttp.web.HTTPConflict(text=str(e))
# Reserve a UDP port on both side
response = yield from node1.compute.post("/projects/{}/ports/udp".format(self._project.id))
self._node1_port = response.json["udp_port"]
response = yield from node2.compute.post("/projects/{}/ports/udp".format(self._project.id))
self._node2_port = response.json["udp_port"]
node1_filters = {}
node2_filters = {}
filter_node = self._get_filter_node()
if filter_node == node1:
node1_filters = self.get_active_filters()
elif filter_node == node2:
node2_filters = self.get_active_filters()
# Create the tunnel on both side
self._link_data.append({
"lport": self._node1_port,
"rhost": node2_host,
"rport": self._node2_port,
"type": "nio_udp",
"filters": node1_filters
})
yield from node1.post("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number1, port_number=port_number1), data=self._link_data[0], timeout=120)
self._link_data.append({
"lport": self._node2_port,
"rhost": node1_host,
"rport": self._node1_port,
"type": "nio_udp",
"filters": node2_filters
})
try:
yield from node2.post("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number2, port_number=port_number2), data=self._link_data[1], timeout=120)
except Exception as e:
# We clean the first NIO
yield from node1.delete("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number1, port_number=port_number1), timeout=120)
raise e
self._created = True | 0.003284 |
def load(*files):
"""
Loads configuration from one or more files by merging right to left.
:Parameters:
*files : `file-like`
A YAML file to read.
:Returns:
`dict` : the configuration document
"""
if len(files) == 0:
raise errors.ConfigError("No config files provided.")
doc = merge(*(yaml.safe_load(f) for f in files))
return propagate_defaults(doc) | 0.002387 |
def process_response(self, request, response):
""" Sets the cache and deals with caching headers if needed
"""
if not self.should_cache(request, response):
# We don't need to update the cache, just return
return response
response = self.patch_headers(response)
self.set_cache(request, response)
return response | 0.007792 |
def pos(self, x=None, y=None):
u'''Move or query the window cursor.'''
if x is not None:
System.Console.CursorLeft=x
else:
x = System.Console.CursorLeft
if y is not None:
System.Console.CursorTop=y
else:
y = System.Console.CursorTop
return x, y | 0.011429 |
def all_state_variables_read(self):
""" recursive version of variables_read
"""
if self._all_state_variables_read is None:
self._all_state_variables_read = self._explore_functions(
lambda x: x.state_variables_read)
return self._all_state_variables_read | 0.00641 |
def rewrite(self, source, token=None, client=None):
"""Rewrite source blob into this one.
If :attr:`user_project` is set on the bucket, bills the API request
to that project.
:type source: :class:`Blob`
:param source: blob whose contents will be rewritten into this blob.
:type token: str
:param token: Optional. Token returned from an earlier, not-completed
call to rewrite the same source blob. If passed,
result will include updated status, total bytes written.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the blob's bucket.
:rtype: tuple
:returns: ``(token, bytes_rewritten, total_bytes)``, where ``token``
is a rewrite token (``None`` if the rewrite is complete),
``bytes_rewritten`` is the number of bytes rewritten so far,
and ``total_bytes`` is the total number of bytes to be
rewritten.
"""
client = self._require_client(client)
headers = _get_encryption_headers(self._encryption_key)
headers.update(_get_encryption_headers(source._encryption_key, source=True))
query_params = self._query_params
if "generation" in query_params:
del query_params["generation"]
if token:
query_params["rewriteToken"] = token
if source.generation:
query_params["sourceGeneration"] = source.generation
if self.kms_key_name is not None:
query_params["destinationKmsKeyName"] = self.kms_key_name
api_response = client._connection.api_request(
method="POST",
path=source.path + "/rewriteTo" + self.path,
query_params=query_params,
data=self._properties,
headers=headers,
_target_object=self,
)
rewritten = int(api_response["totalBytesRewritten"])
size = int(api_response["objectSize"])
# The resource key is set if and only if the API response is
# completely done. Additionally, there is no rewrite token to return
# in this case.
if api_response["done"]:
self._set_properties(api_response["resource"])
return None, rewritten, size
return api_response["rewriteToken"], rewritten, size | 0.001175 |
def get_re_experiment(case, minor=1):
""" Returns an experiment that uses the Roth-Erev learning method.
"""
locAdj = "ac"
experimentation = 0.55
recency = 0.3
tau = 100.0
decay = 0.999
nStates = 3 # stateless RE?
Pd0 = get_pd_max(case, profile)
Pd_min = get_pd_min(case, profile)
market = pyreto.SmartMarket(case, priceCap=cap, decommit=decommit,
auctionType=auctionType,
locationalAdjustment=locAdj)
experiment = pyreto.continuous.MarketExperiment([], [], market)
portfolios, sync_cond = get_portfolios3()
for gidx in portfolios:
g = [case.generators[i] for i in gidx]
learner = VariantRothErev(experimentation, recency)
learner.explorer = BoltzmannExplorer(tau, decay)
task, agent = get_discrete_task_agent(g, market, nStates, nOffer,
markups, withholds, maxSteps, learner, Pd0, Pd_min)
print "ALL ACTIONS:", len(task.env._allActions) * nStates
experiment.tasks.append(task)
experiment.agents.append(agent)
passive = [case.generators[i] for i in sync_cond]
passive[0].p_min = 0.001 # Avoid invalid offer withholding.
passive[0].p_max = 0.002
task, agent = get_zero_task_agent(passive, market, 1, maxSteps)
experiment.tasks.append(task)
experiment.agents.append(agent)
return experiment | 0.003531 |
def make_bindings_type(filenames,color_input,colorkey,file_dictionary,sidebar,bounds):
# instantiating string the main string block for the javascript block of html code
string = ''
'''
# logic for instantiating variable colorkey input
if not colorkeyfields == False:
colorkey = 'selectedText'
'''
# iterating through each geojson filename
count = 0
for row in filenames:
color_input = ''
colorkeyfields = False
count += 1
filename = row
zoomrange = ['','']
# reading in geojson file into memory
with open(filename) as data_file:
data = json.load(data_file)
#pprint(data)
# getting the featuretype which will later dictate what javascript splices are needed
data = data['features']
data = data[0]
featuretype = data['geometry']
featuretype = featuretype['type']
data = data['properties']
# logic for overwriting colorkey fields if it exists for the filename
# in the file dictionary
try:
colorkeyfields = file_dictionary[filename][str('colorkeyfields')]
except KeyError:
colorkeyfields = False
except TypeError:
colorkeyfields = False
if not colorkeyfields == False:
if len(colorkeyfields) == 1:
colorkey = colorkeyfields[0]
colorkeyfields = False
try:
zoomrange = file_dictionary[filename][str('zooms')]
except KeyError:
zoomrange = ['','']
except TypeError:
zoomrange = ['','']
# code for if the file_dictionary input isn't false
#(i.e. getting the color inputs out of dictionary variable)
if file_dictionary==False and colorkey == False:
# logic for getting the colorline for different feature types
# the point feature requires a different line of code
if featuretype == 'Point':
colorline = get_colorline_marker(color_input)
else:
colorline = get_colorline_marker2(color_input)
# setting minzoom and maxzoom to be sent into js parsing
minzoom,maxzoom = zoomrange
# getting filter file dictionary if filter_dictonary exists
if not file_dictionary == False:
filter_file_dictionary = file_dictionary[filename]
else:
filter_file_dictionary = False
# checking to see if a chart_dictionary exists
try:
chart_dictionary = filter_file_dictionary['chart_dictionary']
except KeyError:
chart_dictionary = False
except TypeError:
chart_dictionary = False
# sending min and max zoom into the function that makes the zoom block
zoomblock = make_zoom_block(minzoom,maxzoom,count,colorkeyfields,bounds,filter_file_dictionary)
# logic for if a color key is given
# HINT look here for rgb raw color integration in a color line
if not colorkey == '':
if row == filenames[0]:
if colorkey == 'selectedText':
colorkey = """feature.properties[%s]""" % colorkey
else:
colorkey = """feature.properties['%s']""" % colorkey
if featuretype == 'Point':
colorline = get_colorline_marker(str(colorkey))
else:
colorline = get_colorline_marker2(str(colorkey))
# this may be able to be deleted
# test later
# im not sure what the fuck its here for
if file_dictionary == False and colorkey == '':
if featuretype == 'Point':
colorline = get_colorline_marker(color_input)
else:
colorline = get_colorline_marker2(color_input)
if colorkey == '' and colorkeyfields == False:
if featuretype == 'Point':
colorline = get_colorline_marker(color_input)
else:
colorline = get_colorline_marker2(color_input)
# iterating through each header
headers = []
for row in data:
headers.append(str(row))
# logic for getting sidebar string that will be added in make_blockstr()
if sidebar == True:
sidebarstring = make_sidebar_string(headers,chart_dictionary)
else:
sidebarstring = ''
# section of javascript code dedicated to the adding the data layer
if count == 1:
blocky = """
function add%s() {
\n\tfunction addDataToMap%s(data, map) {
\t\tvar dataLayer = L.geoJson(data);
\t\tvar map = L.mapbox.map('map', 'mapbox.streets',{
\t\t\tzoom: 5
\t\t\t}).fitBounds(dataLayer.getBounds());
\t\tdataLayer.addTo(map)
\t}\n""" % (count,count)
else:
blocky = """
function add%s() {
\n\tfunction addDataToMap%s(data, map) {
\t\tvar dataLayer = L.geoJson(data);
\t\tdataLayer.addTo(map)
\t}\n""" % (count,count)
# making the string section that locally links the geojson file to the html document
'''
if not time == '':
preloc='\tfunction add%s() {\n' % (str(count))
loc = """\t$.getJSON('http://localhost:8000/%s',function(data) { addDataToMap%s(data,map); });""" % (filename,count)
loc = preloc + loc
else:
'''
loc = """\t$.getJSON('http://localhost:8000/%s',function(data) { addDataToMap%s(data,map); });""" % (filename,count)
# creating block to be added to the total or constituent string block
if featuretype == 'Point':
bindings = make_bindings(headers,count,colorline,featuretype,zoomblock,filename,sidebarstring,colorkeyfields)+'\n'
stringblock = blocky + loc + bindings
else:
bindings = make_bindings(headers,count,colorline,featuretype,zoomblock,filename,sidebarstring,colorkeyfields)+'\n'
stringblock = blocky + loc + bindings
# adding the stringblock (one geojson file javascript block) to the total string block
string += stringblock
# adding async function to end of string block
string = string + async_function_call(count)
return string | 0.049836 |
def objects_patch(self, bucket, key, info):
"""Updates the metadata associated with an object.
Args:
bucket: the name of the bucket containing the object.
key: the key of the object being updated.
info: the metadata to update.
Returns:
A parsed object information dictionary.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + (Api._OBJECT_PATH % (bucket, Api._escape_key(key)))
return google.datalab.utils.Http.request(url, method='PATCH', data=info,
credentials=self._credentials) | 0.001597 |
def set(self):
"""Set event flag to true and resolve future(s) returned by until_set()
Notes
-----
A call to set() may result in control being transferred to
done_callbacks attached to the future returned by until_set().
"""
self._flag = True
old_future = self._waiting_future
# Replace _waiting_future with a fresh one incase someone woken up by set_result()
# sets this AsyncEvent to False before waiting on it to be set again.
self._waiting_future = tornado_Future()
old_future.set_result(True) | 0.005042 |
def _make_minimal(dictionary):
"""
This function removes all the keys whose value is either None or an empty
dictionary.
"""
new_dict = {}
for key, value in dictionary.items():
if value is not None:
if isinstance(value, dict):
new_value = _make_minimal(value)
if new_value:
new_dict[key] = new_value
else:
new_dict[key] = value
return new_dict | 0.002123 |
def _get_cgroup_from_proc(self, cgroup, pid, filename):
"""Find a specific cgroup file, containing metrics to extract."""
params = {
"file": filename,
}
return DockerUtil.find_cgroup_from_proc(self._mountpoints, pid, cgroup, self.docker_util._docker_root) % (params) | 0.009677 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.