filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_28477 | import code
import os
import sys
from importlib import import_module
from pathlib import Path
from typing import Any, Callable, Iterable, List, Optional, TYPE_CHECKING
import click
try:
from dotenv import load_dotenv
except ImportError:
pass
from .__about__ import __version__
from .helpers import get_debug_flag
if TYPE_CHECKING:
from .app import Quart # noqa: F401
class NoAppException(click.UsageError):
def __init__(self) -> None:
super().__init__(
'Could not locate a Quart application as the QUART_APP environment '
'variable has either not been set or provided or does not point to '
'a valid application.\n'
'Please set it to module_name:app_name or module_name:factory_function()\n'
'Note `quart` is not a valid module_name.'
)
class ScriptInfo:
def __init__(
self,
app_import_path: Optional[str]=None,
create_app: Optional[Callable]=None,
) -> None:
self.load_dotenv_if_exists()
self.app_import_path = app_import_path or os.environ.get('QUART_APP')
self.create_app = create_app
self.data: dict = {}
self._app: Optional['Quart'] = None
def load_app(self) -> 'Quart':
if self._app is None:
if self.create_app is not None:
self._app = self.create_app()
else:
try:
module_name, app_name = self.app_import_path.split(':', 1)
except ValueError:
module_name, app_name = self.app_import_path, 'app'
except AttributeError:
raise NoAppException()
module_path = Path(module_name).resolve()
sys.path.insert(0, str(module_path.parent))
if module_path.is_file():
import_name = module_path.with_suffix('').name
else:
import_name = module_path.name
try:
module = import_module(import_name)
except ModuleNotFoundError as error:
if error.name == import_name:
raise NoAppException()
else:
raise
try:
self._app = eval(app_name, vars(module))
except NameError:
raise NoAppException()
from .app import Quart
if not isinstance(self._app, Quart):
self._app = None
raise NoAppException()
if self._app is None:
raise NoAppException()
self._app.debug = get_debug_flag()
return self._app
def load_dotenv_if_exists(self) -> None:
if os.environ.get('QUART_SKIP_DOTENV') == '1':
return
if not Path(".env").is_file() and not Path(".quartenv").is_file():
return
try:
if Path(".env").is_file():
load_dotenv()
if Path(".quartenv").is_file():
load_dotenv(dotenv_path=Path(".") / ".quartenv")
except NameError:
print( # noqa: T001
"* Tip: There are .env files present. "
"Do \"pip install python-dotenv\" to use them."
)
pass_script_info = click.make_pass_decorator(ScriptInfo, ensure=True)
class AppGroup(click.Group):
def group(self, *args: Any, **kwargs: Any) -> Any:
kwargs.setdefault('cls', AppGroup)
return super().group(self, *args, **kwargs)
def get_version(ctx: Any, param: Any, value: Any) -> None:
if not value or ctx.resilient_parsing:
return
message = f"Quart {__version__}"
click.echo(message, color=ctx.color)
ctx.exit()
version_option = click.Option(
['--version'], help='Show the Quart version', expose_value=False,
callback=get_version, is_flag=True, is_eager=True,
)
class QuartGroup(AppGroup):
def __init__(
self,
add_default_commands: bool=True,
create_app: Optional[Callable]=None,
add_version_option: bool=True,
*,
params: Optional[List]=None,
**kwargs: Any,
) -> None:
params = params or []
if add_version_option:
params.append(version_option)
super().__init__(params=params, **kwargs)
self.create_app = create_app
if add_default_commands:
self.add_command(run_command)
self.add_command(shell_command)
def get_command(self, ctx: click.Context, name: str) -> click.Command:
"""Return the relevant command given the context and name.
.. warning::
This differs substaintially from Flask in that it allows
for the inbuilt commands to be overridden.
"""
info = ctx.ensure_object(ScriptInfo)
command = None
try:
command = info.load_app().cli.get_command(ctx, name)
except NoAppException:
pass
if command is None:
command = super().get_command(ctx, name)
return command
def list_commands(self, ctx: click.Context) -> Iterable[str]:
commands = set(click.Group.list_commands(self, ctx))
info = ctx.ensure_object(ScriptInfo)
commands.update(info.load_app().cli.list_commands(ctx))
return commands
def main(self, *args: Any, **kwargs: Any) -> Any:
kwargs.setdefault('obj', ScriptInfo(create_app=self.create_app))
return super().main(*args, **kwargs)
@click.command('run', short_help='Start and run a development server.')
@click.option('--host', '-h', default='127.0.0.1', help='The interface to serve on.')
@click.option('--port', '-p', default=5000, help='The port to serve on.')
@pass_script_info
def run_command(info: ScriptInfo, host: str, port: int) -> None:
app = info.load_app()
app.run(debug=True, host=host, port=port, use_reloader=True)
@click.command('shell', short_help='Open a shell within the app context.')
@pass_script_info
def shell_command(info: ScriptInfo) -> None:
app = info.load_app()
context = {}
context.update(app.make_shell_context())
banner = f"Python {sys.version} on {sys.platform} running {app.import_name}"
code.interact(banner=banner, local=context)
cli = QuartGroup(
help="""\
Utility functions for Quart applications.
This will load the app defined in the QUART_APP environment
variable. The QUART_APP variable follows the Gunicorn standard of
`module_name:application_name` e.g. `hello:app`.
\b
{prefix}{cmd} QUART_APP=hello:app
{prefix}{cmd} QUART_DEBUG=1
{prefix}quart run
""".format(
cmd='export' if os.name == 'posix' else 'set',
prefix='$ ' if os.name == 'posix' else '> ',
),
)
def main(as_module: bool=False) -> None:
args = sys.argv[1:]
if as_module:
name = 'python -m quart'
sys.argv = ['-m', 'quart'] + args
else:
name = None
cli.main(args=args, prog_name=name)
if __name__ == '__main__':
main(as_module=True)
|
the-stack_106_28478 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2013 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import datetime
import six
from bson.objectid import ObjectId
from girder import events, logger
from girder.api import rest
from girder.constants import SettingKey
from .model_base import Model
from girder.exceptions import GirderException, ValidationException
from girder.utility import RequestBodyStream
from girder.utility.progress import noProgress
class Upload(Model):
"""
This model stores temporary records for uploads that have been approved
but are not yet complete, so that they can be uploaded in chunks of
arbitrary size. The chunks must be uploaded in order.
"""
def initialize(self):
self.name = 'upload'
self.ensureIndex('sha512')
def _getChunkSize(self, minSize=32 * 1024**2):
"""
Return a chunk size to use in file uploads. This is the maximum of
the setting for minimum upload chunk size and the specified size.
:param minSize: the minimum size to return.
:return: chunk size to use for file uploads.
"""
from .setting import Setting
minChunkSize = Setting().get(SettingKey.UPLOAD_MINIMUM_CHUNK_SIZE)
return max(minChunkSize, minSize)
def uploadFromFile(self, obj, size, name, parentType=None, parent=None,
user=None, mimeType=None, reference=None,
assetstore=None, attachParent=False):
"""
This method wraps the entire upload process into a single function to
facilitate "internal" uploads from a file-like object. Example:
.. code-block:: python
size = os.path.getsize(filename)
with open(filename, 'rb') as f:
Upload().uploadFromFile(f, size, filename, 'item', parentItem, user)
:param obj: The object representing the content to upload.
:type obj: file-like
:param size: The total size of the file.
:type size: int
:param name: The name of the file to create.
:type name: str
:param parentType: The type of the parent: "folder" or "item".
:type parentType: str
:param parent: The parent (item or folder) to upload into.
:type parent: dict
:param user: The user who is creating the file.
:type user: dict
:param mimeType: MIME type of the file.
:type mimeType: str
:param reference: An optional reference string that will be sent to the
data.process event.
:param assetstore: An optional assetstore to use to store the file. If
unspecified, the current assetstore is used.
:type reference: str
:param attachParent: if True, instead of creating an item within the
parent or giving the file an itemId, set itemId to None and set
attachedToType and attachedToId instead (using the values passed in
parentType and parent). This is intended for files that shouldn't
appear as direct children of the parent, but are still associated
with it.
:type attachParent: boolean
"""
upload = self.createUpload(
user=user, name=name, parentType=parentType, parent=parent,
size=size, mimeType=mimeType, reference=reference,
assetstore=assetstore, attachParent=attachParent)
# The greater of 32 MB or the the upload minimum chunk size.
chunkSize = self._getChunkSize()
while True:
data = obj.read(chunkSize)
if not data:
break
upload = self.handleChunk(upload, RequestBodyStream(six.BytesIO(data), len(data)))
return upload
def validate(self, doc):
if doc['size'] < 0:
raise ValidationException('File size must not be negative.')
if doc['received'] > doc['size']:
raise ValidationException('Received too many bytes.')
doc['updated'] = datetime.datetime.utcnow()
return doc
def handleChunk(self, upload, chunk, filter=False, user=None):
"""
When a chunk is uploaded, this should be called to process the chunk.
If this is the final chunk of the upload, this method will finalize
the upload automatically.
This method will return EITHER an upload or a file document. If this
is the final chunk of the upload, the upload is finalized and the created
file document is returned. Otherwise, it returns the upload document
with the relevant fields modified.
:param upload: The upload document to update.
:type upload: dict
:param chunk: The file object representing the chunk that was uploaded.
:type chunk: file
:param filter: Whether the model should be filtered. Only affects
behavior when returning a file model, not the upload model.
:type filter: bool
:param user: The current user. Only affects behavior if filter=True.
:type user: dict or None
"""
from .assetstore import Assetstore
from .file import File
from girder.utility import assetstore_utilities
assetstore = Assetstore().load(upload['assetstoreId'])
adapter = assetstore_utilities.getAssetstoreAdapter(assetstore)
upload = adapter.uploadChunk(upload, chunk)
if '_id' in upload or upload['received'] != upload['size']:
upload = self.save(upload)
# If upload is finished, we finalize it
if upload['received'] == upload['size']:
file = self.finalizeUpload(upload, assetstore)
if filter:
return File().filter(file, user=user)
else:
return file
else:
return upload
def requestOffset(self, upload):
"""
Requests the offset that should be used to resume uploading. This
makes the request from the assetstore adapter.
"""
from .assetstore import Assetstore
from girder.utility import assetstore_utilities
assetstore = Assetstore().load(upload['assetstoreId'])
adapter = assetstore_utilities.getAssetstoreAdapter(assetstore)
return adapter.requestOffset(upload)
def finalizeUpload(self, upload, assetstore=None):
"""
This should only be called manually in the case of creating an
empty file, i.e. one that has no chunks.
:param upload: The upload document.
:type upload: dict
:param assetstore: If known, the containing assetstore for the upload.
:type assetstore: dict
:returns: The file object that was created.
"""
from .assetstore import Assetstore
from .file import File
from .item import Item
from girder.utility import assetstore_utilities
events.trigger('model.upload.finalize', upload)
if assetstore is None:
assetstore = Assetstore().load(upload['assetstoreId'])
if 'fileId' in upload: # Updating an existing file's contents
file = File().load(upload['fileId'], force=True)
# Delete the previous file contents from the containing assetstore
assetstore_utilities.getAssetstoreAdapter(
Assetstore().load(file['assetstoreId'])).deleteFile(file)
item = Item().load(file['itemId'], force=True)
File().propagateSizeChange(item, upload['size'] - file['size'])
# Update file info
file['creatorId'] = upload['userId']
file['created'] = datetime.datetime.utcnow()
file['assetstoreId'] = assetstore['_id']
file['size'] = upload['size']
# If the file was previously imported, it is no longer.
if file.get('imported'):
file['imported'] = False
else: # Creating a new file record
if upload.get('attachParent'):
item = None
elif upload['parentType'] == 'folder':
# Create a new item with the name of the file.
item = Item().createItem(
name=upload['name'], creator={'_id': upload['userId']},
folder={'_id': upload['parentId']})
elif upload['parentType'] == 'item':
item = Item().load(id=upload['parentId'], force=True)
else:
item = None
file = File().createFile(
item=item, name=upload['name'], size=upload['size'],
creator={'_id': upload['userId']}, assetstore=assetstore,
mimeType=upload['mimeType'], saveFile=False)
if upload.get('attachParent'):
if upload['parentType'] and upload['parentId']:
file['attachedToType'] = upload['parentType']
file['attachedToId'] = upload['parentId']
adapter = assetstore_utilities.getAssetstoreAdapter(assetstore)
file = adapter.finalizeUpload(upload, file)
event_document = {'file': file, 'upload': upload}
events.trigger('model.file.finalizeUpload.before', event_document)
file = File().save(file)
events.trigger('model.file.finalizeUpload.after', event_document)
if '_id' in upload:
self.remove(upload)
logger.info('Upload complete. Upload=%s File=%s User=%s' % (
upload['_id'], file['_id'], upload['userId']))
# Add an async event for handlers that wish to process this file.
eventParams = {
'file': file,
'assetstore': assetstore,
'currentToken': rest.getCurrentToken(),
'currentUser': rest.getCurrentUser()
}
if 'reference' in upload:
eventParams['reference'] = upload['reference']
events.daemon.trigger('data.process', eventParams)
return file
def getTargetAssetstore(self, modelType, resource, assetstore=None):
"""
Get the assetstore for a particular target resource, i.e. where new
data within the resource should be stored. In Girder core, this is
always just the current assetstore, but plugins may override this
behavior to allow for more granular assetstore selection.
:param modelType: the type of the resource that will be stored.
:param resource: the resource to be stored.
:param assetstore: if specified, the preferred assetstore where the
resource should be located. This may be overridden.
:returns: the selected assetstore.
"""
from .assetstore import Assetstore
eventParams = {'model': modelType, 'resource': resource}
event = events.trigger('model.upload.assetstore', eventParams)
if event.responses:
assetstore = event.responses[-1]
elif not assetstore:
assetstore = Assetstore().getCurrent()
return assetstore
def createUploadToFile(self, file, user, size, reference=None,
assetstore=None):
"""
Creates a new upload record into a file that already exists. This
should be used when updating the contents of a file. Deletes any
previous file content from the assetstore it was in. This will upload
into the current assetstore rather than assetstore the file was
previously contained in.
:param file: The file record to update.
:param user: The user performing this upload.
:param size: The size of the new file contents.
:param reference: An optional reference string that will be sent to the
data.process event.
:type reference: str
:param assetstore: An optional assetstore to use to store the file. If
unspecified, the current assetstore is used.
"""
from girder.utility import assetstore_utilities
assetstore = self.getTargetAssetstore('file', file, assetstore)
adapter = assetstore_utilities.getAssetstoreAdapter(assetstore)
now = datetime.datetime.utcnow()
upload = {
'created': now,
'updated': now,
'userId': user['_id'],
'fileId': file['_id'],
'assetstoreId': assetstore['_id'],
'size': size,
'name': file['name'],
'mimeType': file['mimeType'],
'received': 0
}
if reference is not None:
upload['reference'] = reference
upload = adapter.initUpload(upload)
return self.save(upload)
def createUpload(self, user, name, parentType, parent, size, mimeType=None,
reference=None, assetstore=None, attachParent=False,
save=True):
"""
Creates a new upload record, and creates its temporary file
that the chunks will be written into. Chunks should then be sent
in order using the _id of the upload document generated by this method.
:param user: The user performing the upload.
:type user: dict
:param name: The name of the file being uploaded.
:type name: str
:param parentType: The type of the parent being uploaded into.
:type parentType: str ('folder' or 'item')
:param parent: The document representing the parent.
:type parent: dict.
:param size: Total size in bytes of the whole file.
:type size: int
:param mimeType: The mimeType of the file.
:type mimeType: str
:param reference: An optional reference string that will be sent to the
data.process event.
:type reference: str
:param assetstore: An optional assetstore to use to store the file. If
unspecified, the current assetstore is used.
:param attachParent: if True, instead of creating an item within the
parent or giving the file an itemId, set itemId to None and set
attachedToType and attachedToId instead (using the values passed in
parentType and parent). This is intended for files that shouldn't
appear as direct children of the parent, but are still associated
with it.
:type attachParent: boolean
:param save: if True, save the document after it is created.
:type save: boolean
:returns: The upload document that was created.
"""
from girder.utility import assetstore_utilities
assetstore = self.getTargetAssetstore(parentType, parent, assetstore)
adapter = assetstore_utilities.getAssetstoreAdapter(assetstore)
now = datetime.datetime.utcnow()
if not mimeType:
mimeType = 'application/octet-stream'
upload = {
'created': now,
'updated': now,
'assetstoreId': assetstore['_id'],
'size': size,
'name': name,
'mimeType': mimeType,
'received': 0
}
if reference is not None:
upload['reference'] = reference
if parentType and parent:
upload['parentType'] = parentType.lower()
upload['parentId'] = parent['_id']
else:
upload['parentType'] = None
upload['parentId'] = None
if attachParent:
upload['attachParent'] = attachParent
if user:
upload['userId'] = user['_id']
else:
upload['userId'] = None
upload = adapter.initUpload(upload)
if save:
upload = self.save(upload)
return upload
def moveFileToAssetstore(self, file, user, assetstore, progress=noProgress):
"""
Move a file from whatever assetstore it is located in to a different
assetstore. This is done by downloading and re-uploading the file.
:param file: the file to move.
:param user: the user that is authorizing the move.
:param assetstore: the destination assetstore.
:param progress: optional progress context.
:returns: the original file if it is not moved, or the newly 'uploaded'
file if it is.
"""
from .file import File
if file['assetstoreId'] == assetstore['_id']:
return file
# Allow an event to cancel the move. This could be done, for instance,
# on files that could change dynamically.
event = events.trigger('model.upload.movefile', {
'file': file, 'assetstore': assetstore})
if event.defaultPrevented:
raise GirderException(
'The file %s could not be moved to assetstore %s' % (
file['_id'], assetstore['_id']))
# Create a new upload record into the existing file
upload = self.createUploadToFile(
file=file, user=user, size=int(file['size']), assetstore=assetstore)
if file['size'] == 0:
return File().filter(self.finalizeUpload(upload), user)
# Uploads need to be chunked for some assetstores
chunkSize = self._getChunkSize()
chunk = None
for data in File().download(file, headers=False)():
if chunk is not None:
chunk += data
else:
chunk = data
if len(chunk) >= chunkSize:
upload = self.handleChunk(upload, RequestBodyStream(six.BytesIO(chunk), len(chunk)))
progress.update(increment=len(chunk))
chunk = None
if chunk is not None:
upload = self.handleChunk(upload, RequestBodyStream(six.BytesIO(chunk), len(chunk)))
progress.update(increment=len(chunk))
return upload
def list(self, limit=0, offset=0, sort=None, filters=None):
"""
Search for uploads or simply list all visible uploads.
:param limit: Result set size limit.
:param offset: Offset into the results.
:param sort: The sort direction.
:param filters: if not None, a dictionary that can contain ids that
must match the uploads, plus an minimumAge value.
"""
query = {}
if filters:
for key in ('uploadId', 'userId', 'parentId', 'assetstoreId'):
if key in filters:
id = filters[key]
if id and not isinstance(id, ObjectId):
id = ObjectId(id)
if id:
if key == 'uploadId':
query['_id'] = id
else:
query[key] = id
if 'minimumAge' in filters:
query['updated'] = {
'$lte': datetime.datetime.utcnow() -
datetime.timedelta(days=float(filters['minimumAge']))
}
# Perform the find; we'll do access-based filtering of the result
# set afterward.
return self.find(query, limit=limit, sort=sort, offset=offset)
def cancelUpload(self, upload):
"""
Discard an upload that is in progress. This asks the assetstore to
discard the data, then removes the item from the upload database.
:param upload: The upload document to remove.
:type upload: dict
"""
from .assetstore import Assetstore
from girder.utility import assetstore_utilities
assetstore = Assetstore().load(upload['assetstoreId'])
# If the assetstore was deleted, the upload may still be in our
# database
if assetstore:
adapter = assetstore_utilities.getAssetstoreAdapter(assetstore)
try:
adapter.cancelUpload(upload)
except ValidationException:
# this assetstore is currently unreachable, so skip it
pass
if '_id' in upload:
self.remove(upload)
def untrackedUploads(self, action='list', assetstoreId=None):
"""
List or discard any uploads that an assetstore knows about but that our
database doesn't have in it.
:param action: 'delete' to discard the untracked uploads, anything else
to just return with a list of them.
:type action: str
:param assetstoreId: if present, only include untracked items from the
specified assetstore.
:type assetstoreId: str
:returns: a list of items that were removed or could be removed.
"""
from .assetstore import Assetstore
from girder.utility import assetstore_utilities
results = []
knownUploads = list(self.list())
# Iterate through all assetstores
for assetstore in Assetstore().list():
if assetstoreId and assetstoreId != assetstore['_id']:
continue
adapter = assetstore_utilities.getAssetstoreAdapter(assetstore)
try:
results.extend(adapter.untrackedUploads(
knownUploads, delete=(action == 'delete')))
except ValidationException:
# this assetstore is currently unreachable, so skip it
pass
return results
|
the-stack_106_28482 | # -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for ml vision product search surface."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import copy
from googlecloudsdk.api_lib.ml.vision import api_utils
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope.concepts import concepts
from googlecloudsdk.command_lib.util.concepts import concept_parsers
from googlecloudsdk.core import exceptions as core_exceptions
class Error(core_exceptions.Error):
"""Base Error for this Module."""
class LabelsFormatError(Error):
"""Raises when labels are not formatted correctly."""
def ProductLabelsArgumentsForCreate():
return [
base.Argument(
'--product-labels',
metavar='KEY=VALUE',
type=arg_parsers.ArgList(min_length=1, element_type=str),
action='append',
help="""\
Labels that can be attached to the product. Labels are specified as
key-value pairs. Multiple values can be assigned to the same key and
one product may have up to 100 product labels.""")
]
def GetClearLabelsFlag(labels_name='product-labels'):
return base.Argument(
'--clear-{labels}'.format(labels=labels_name),
action='store_true',
help="""\
Remove all product labels. If `--add-{labels}` is also specified, then
`--clear-{labels}` is applied first.
For example, to remove all product labels:
$ {{command}} --clear-{labels}
To set the product labels to exactly "foo" and "baz":
$ {{command}} --clear-{labels} --add-{labels}='foo=bar,baz=qux'
""".format(labels=labels_name))
def GetRemoveLabelsFlag(labels_name='product-labels'):
return base.Argument(
'--remove-{labels}'.format(labels=labels_name),
metavar='KEY=VALUE',
type=arg_parsers.ArgList(),
action='append',
help="""\
List of product labels to remove. If `--add-{labels}` is also
specified, then `--remove-{labels}` is applied first. If a label does
not exist it is silently ignored. Because each key can be associated
with multiple values, both key and value need to be specified to
remove the product label.
For example, to remove the product labels 'foo=baz' and 'baz=qux':
$ {{command}} --remove-{labels}='foo=baz,baz=qux'
""".format(labels=labels_name))
def GetAddLabelsFlag(labels_name='product-labels'):
return base.Argument(
'--add-{}'.format(labels_name),
metavar='KEY=VALUE',
type=arg_parsers.ArgList(),
action='append',
help="""\
List of product labels to add. If a label already exists, it is
silently ignored.
For example, to add the product labels 'foo=baz' and 'baz=qux':
$ {{command}} --add-{labels}='foo=baz,baz=qux'
To change the product label 'foo=baz' to 'foo=qux':
$ {{command}} --remove-{labels}='foo=baz' --add-{labels}='foo-qux'
""".format(labels=labels_name))
def ProductLabelsArgumentsForUpdate():
remove_group = base.ArgumentGroup(mutex=True)
remove_group.AddArgument(GetClearLabelsFlag())
remove_group.AddArgument(GetRemoveLabelsFlag())
return [GetAddLabelsFlag(), remove_group]
def _FormatLabelsArgsToKeyValuePairs(labels):
"""Flattens the labels specified in cli to a list of (k, v) pairs."""
labels = [] if labels is None else labels
labels_flattened = []
for labels_sublist in labels:
labels_flattened.extend([label.strip() for label in labels_sublist])
labels_flattened_unique = list(set(labels_flattened))
return [_ExtractKeyValueFromLabel(label) for label in labels_flattened_unique]
def _FormatKeyValuePairsToLabelsMessage(labels):
"""Converts the list of (k, v) pairs into labels API message."""
sorted_labels = sorted(labels, key=lambda x: x[0] + x[1])
return [
api_utils.GetMessage().KeyValue(key=k, value=v) for k, v in sorted_labels
]
def _ExtractKeyValuePairsFromLabelsMessage(labels):
"""Extracts labels as a list of (k, v) pairs from the labels API message."""
labels = [] if labels is None else labels
return [(label.key, label.value) for label in labels]
def _ExtractKeyValueFromLabel(label):
"""Extracts key and value from label like 'key=value'.
Args:
label: str, the label to extract key and values, i.e. 'foo=buz'.
Returns:
(k, v): k is the substring before '=', v is the substring after '='.
Raises:
LabelsFormatError, raises when label is not formatted as 'key=value', or
key or value is empty.
"""
try:
k, v = label.split('=')
if k and v:
return k, v
raise ValueError('Key or value cannot be empty string.')
except ValueError:
raise LabelsFormatError('Each label must be formatted as "key=value".'
' key and value cannot be empty.')
def PrepareProductLabelsForProductCreationRequest(ref, args, request):
"""Sets labels if user specifies the --product-labels in product creation."""
del ref # Unused
if not args.IsSpecified('product_labels'):
return request
else:
labels = _FormatLabelsArgsToKeyValuePairs(args.product_labels)
request.product.productLabels = _FormatKeyValuePairsToLabelsMessage(labels)
return request
def _ClearLabels(existing_labels):
del existing_labels # Unused
return []
def _RemoveLabels(existing_labels, labels_to_remove):
"""Removes labels in labels_to_remove from existing_labels.
Args:
existing_labels: list of (k,v) pairs, existing labels.
labels_to_remove: list of (k, v) pairs, labels to remove.
Returns:
List of remaining labels after removal.
"""
return [label for label in existing_labels if label not in labels_to_remove]
def _AddLabels(existing_labels, labels_to_add):
"""Adds labels in labels_to_add to existing_labels."""
updated_labels = existing_labels + labels_to_add
return list(set(updated_labels))
def _LabelsUpdated(existing_labels, updated_labels):
return set(existing_labels) != set(updated_labels)
def _AddFieldToUpdateMask(field, patch_request):
update_mask = patch_request.updateMask
if update_mask:
if update_mask.count(field) == 0:
patch_request.updateMask = update_mask + ',' + field
else:
patch_request.updateMask = field
return patch_request
def _GetExistingProductLabels(product_ref):
"""Fetches the existing product labels to update."""
get_request_message = api_utils.GetMessage(
).VisionProjectsLocationsProductsGetRequest(name=product_ref.RelativeName())
product = api_utils.GetClient().projects_locations_products.Get(
get_request_message)
return product.productLabels
def UpdateLabelsAndUpdateMaskForProductUpdateRequest(product_ref, args,
patch_request):
"""Updates product labels field."""
if not args.IsSpecified('add_product_labels') and not args.IsSpecified(
'remove_product_labels') and not args.IsSpecified('clear_product_labels'):
return patch_request
existing_labels = _GetExistingProductLabels(product_ref)
existing_labels = _ExtractKeyValuePairsFromLabelsMessage(existing_labels)
existing_labels_copy = copy.deepcopy(existing_labels)
if args.clear_product_labels:
existing_labels = _ClearLabels(existing_labels)
if args.remove_product_labels:
labels_to_remove = _FormatLabelsArgsToKeyValuePairs(
args.remove_product_labels)
existing_labels = _RemoveLabels(existing_labels, labels_to_remove)
if args.add_product_labels:
labels_to_add = _FormatLabelsArgsToKeyValuePairs(args.add_product_labels)
existing_labels = _AddLabels(existing_labels, labels_to_add)
if _LabelsUpdated(existing_labels, existing_labels_copy):
patch_request = _AddFieldToUpdateMask('productLabels', patch_request)
updated_labels_message = _FormatKeyValuePairsToLabelsMessage(
existing_labels)
if patch_request.product is None:
patch_request.product = api_utils.GetMessage().Product()
patch_request.product.productLabels = updated_labels_message
return patch_request
def AddBoundingPolygonsArg():
return [
base.Argument(
'--bounding-polygon',
type=arg_parsers.ArgDict(
spec={
'vertices': list,
'normalized-vertices': list
},
min_length=1),
action='append',
help="""\
Bounding polygon around the areas of interest in the reference image.
If this field is empty, the system will try to detect regions of interest.
This flag is repeatable to specify multiple bounding polygons. At most 10
bounding polygons will be used.
A bounding polygon can be specified by a list of vertices or normalized
vertices or both. A vertex (x, y) represents a 2D point in the image. x, y
are integers and are in the same scale as the original image.
The normalized vertex coordinates are relative to original image and
range from 0 to 1.
Because of the complexity of this flag, it should be specified
with the `--flags-file`. See $ gcloud topic flags-file for details.
See the examples section for how to use `--bounding-polygon` in
`--flags-file`.""")
]
def AddBoundingPolygonsToReferenceImageCreationRequest(ref, args, request):
"""Populate the boundingPolygon message."""
del ref # Unused
if not args.IsSpecified('bounding_polygon'):
return request
bounding_polygon_message = []
for bounding_polygon in args.bounding_polygon:
bounding_polygon_message.append(
_PrepareBoundingPolygonMessage(bounding_polygon))
request.referenceImage.boundingPolys = bounding_polygon_message
return request
def _PrepareBoundingPolygonMessage(bounding_polygon):
"""Prepares the bounding polygons message given user's input."""
bounding_polygon_message = api_utils.GetMessage().BoundingPoly()
vertices_message = []
normalized_vertices_message = []
if 'vertices' in bounding_polygon:
for vertex in bounding_polygon['vertices']:
vertex_int = Vertex(vertex['x'], vertex['y'])
vertices_message.append(api_utils.GetMessage().Vertex(
x=vertex_int.x, y=vertex_int.y))
if 'normalized-vertices' in bounding_polygon:
for normalized_vertex in bounding_polygon['normalized-vertices']:
normalized_vertex_float = NormalizedVertex(normalized_vertex['x'],
normalized_vertex['y'])
normalized_vertices_message.append(
api_utils.GetMessage().NormalizedVertex(
x=normalized_vertex_float.x, y=normalized_vertex_float.y))
bounding_polygon_message.vertices = vertices_message
bounding_polygon_message.normalizedVertices = normalized_vertices_message
return bounding_polygon_message
class BoundingPolygonFormatError(Error):
"""Raises when the specified polygon is incorrect."""
class VertexFormatError(BoundingPolygonFormatError):
"""Raises when the vertex is not specified correctly."""
class NormalizedVertexFormatError(BoundingPolygonFormatError):
"""Raises when the normalized vertex is not specified correctly."""
class Vertex(object):
"""Vertex to define the polygon.
Attributes:
x: int, x coordinate of a point on a image.
y: int, y coordinate of a point on a image.
"""
def __init__(self, x, y):
self.x = _ValidateAndConvertCoordinateToInteger(x)
self.y = _ValidateAndConvertCoordinateToInteger(y)
def _ValidateAndConvertCoordinateToInteger(coordinate):
try:
coordinate_int = int(coordinate)
if coordinate_int < 0:
raise ValueError
except ValueError:
raise VertexFormatError('Coordinates must be non-negative integers.')
return coordinate_int
class NormalizedVertex(object):
"""Normalized Vertex to define the polygon.
Attributes:
x: float, a float from 0 to 1, inclusive. x coordinate of a point on a
image.
y: float, a float from 0 to 1, inclusive. y coordinate of a point on a
image.
"""
def __init__(self, x, y):
self.x = _ValidateAndConvertCoordinateToFloat(x)
self.y = _ValidateAndConvertCoordinateToFloat(y)
def _ValidateAndConvertCoordinateToFloat(coordinate):
try:
coordinate_float = float(coordinate)
if coordinate_float < 0 or coordinate_float > 1:
raise ValueError
except ValueError:
raise NormalizedVertexFormatError(
'Coordinates must be floats from 0 to 1, inclusive')
return coordinate_float
def FixOperationNameInGetOperationRequest(ref, args, request):
del ref, args # Unused
name = request.name
if name.count('operations') == 2 and name.startswith('operations/'):
name = name[len('operations/'):]
request.name = name
return request
def _GetProductFullName(ref, args):
return 'projects/{}/locations/{}/products/{}'.format(
ref.projectsId, ref.locationsId, args.product)
def FixProductInAddProductToProductSetRequest(ref, args, request):
"""Sets product field to the full name of the product."""
product_name = _GetProductFullName(ref, args)
request.addProductToProductSetRequest.product = product_name
return request
def FixProductInRemoveProductFromProductSetRequest(ref, args, request):
"""Sets product field to the full name of the product."""
product_name = _GetProductFullName(ref, args)
request.removeProductFromProductSetRequest.product = product_name
return request
def FixNameInListProductsInProductSetRequest(ref, args, request):
"""Removes the redundant suffix."""
del ref, args # Unused
name = request.name
if name[-9:] == '/products':
name = name[:-9]
request.name = name
return request
def _LocationAttributeConfig(name='location'):
return concepts.ResourceParameterAttributeConfig(
name=name, help_text='The location of the {resource}.')
def _ProductSetAttributeConfig(name='product-set'):
return concepts.ResourceParameterAttributeConfig(
name=name, help_text='The product set for the {resource}.')
def _GetProductSetResourceSpec(resource_name='product set'):
return concepts.ResourceSpec(
'vision.projects.locations.productSets',
resource_name=resource_name,
productSetsId=_ProductSetAttributeConfig(),
locationsId=_LocationAttributeConfig(),
projectsId=concepts.DEFAULT_PROJECT_ATTRIBUTE_CONFIG,
)
def _GetProductSetConcept():
return concept_parsers.ConceptParser.ForResource(
'--product-set',
_GetProductSetResourceSpec(),
'The product set to be searched for similar images.',
required=True,
prefixes=True)
def ProductSetArgsForDetectProduct():
return [_GetProductSetConcept()]
def AddProductSetToDetectProductRequest(ref, args, request):
"""Adds productSet field to the detect product request."""
del ref # Unused
try:
single_request = request.requests[0]
except IndexError:
return request
product_set_ref = args.CONCEPTS.product_set.Parse()
product_set_name = product_set_ref.RelativeName()
single_request = _InstantiateProductSearchParams(single_request)
single_request.imageContext.productSearchParams.productSet = product_set_name
return request
def AddBoundingPolygonToDetectProductRequest(ref, args, request):
"""Adds the boundingPoly field to detect product request."""
del ref # Unused
try:
single_request = request.requests[0]
except IndexError:
return request
if not args.IsSpecified('bounding_polygon'):
return request
polygon = _ValidateAndExtractFromBoundingPolygonArgs(args.bounding_polygon)
if not polygon:
return request
single_request = _InstantiateProductSearchParams(single_request)
product_search_params = single_request.imageContext.productSearchParams
if not product_search_params.boundingPoly:
product_search_params.boundingPoly = api_utils.GetMessage().BoundingPoly()
bounding_poly = product_search_params.boundingPoly
if isinstance(polygon[0], Vertex):
vertices = [api_utils.GetMessage().Vertex(x=v.x, y=v.y) for v in polygon]
bounding_poly.vertices = vertices
else:
normalized_vertices = [
api_utils.GetMessage().NormalizedVertex(x=v.x, y=v.y) for v in polygon
]
bounding_poly.normalizedVertices = normalized_vertices
return request
def _InstantiateProductSearchParams(request):
if not request.imageContext:
request.imageContext = api_utils.GetMessage().ImageContext()
if not request.imageContext.productSearchParams:
request.imageContext.productSearchParams = api_utils.GetMessage(
).ProductSearchParams()
return request
def _ValidateAndExtractFromBoundingPolygonArgs(bounding_polygon_arg):
"""Extracts coordinates from users' input."""
if not bounding_polygon_arg:
return []
coordinates = bounding_polygon_arg.split(',')
grouped_coordinates = GroupCoordinates(coordinates)
if _IsPolygonSpecifiedAsVertex(coordinates):
return [Vertex(x, y) for x, y in grouped_coordinates]
if _IsPolygonSpecifiedAsNormalizedVertex(coordinates):
return [NormalizedVertex(x, y) for x, y in grouped_coordinates]
raise BoundingPolygonFormatError(
'Coordinates of normalized vertex should have decimal points, '
'Coordinates of vertex should be integers and cannot have decimal points.'
)
def GroupCoordinates(coordinates):
if len(coordinates) % 2 != 0:
raise BoundingPolygonFormatError(
'There must be an even number of values in the list.')
grouped_coordinates = []
for i in range(0, len(coordinates), 2):
grouped_coordinates.append((coordinates[i], coordinates[i + 1]))
return grouped_coordinates
def _IsPolygonSpecifiedAsVertex(bounding_polygon_coordinates):
coordinate_with_decimal_point = [
c for c in bounding_polygon_coordinates if '.' in c
]
return not coordinate_with_decimal_point
def _IsPolygonSpecifiedAsNormalizedVertex(bounding_polygon_coordinates):
coordinate_with_decimal_point = [
c for c in bounding_polygon_coordinates if '.' in c
]
return len(coordinate_with_decimal_point) == len(bounding_polygon_coordinates)
|
the-stack_106_28483 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from airflow.hooks.base_hook import BaseHook
from airflow.exceptions import AirflowException
from azure.common.client_factory import get_client_from_auth_file
from azure.common.credentials import ServicePrincipalCredentials
from azure.mgmt.containerinstance import ContainerInstanceManagementClient
from zope.deprecation import deprecation
class AzureContainerInstanceHook(BaseHook):
"""
A hook to communicate with Azure Container Instances.
This hook requires a service principal in order to work.
After creating this service principal
(Azure Active Directory/App Registrations), you need to fill in the
client_id (Application ID) as login, the generated password as password,
and tenantId and subscriptionId in the extra's field as a json.
:param conn_id: connection id of a service principal which will be used
to start the container instance
:type conn_id: str
"""
def __init__(self, conn_id='azure_default'):
self.conn_id = conn_id
self.connection = self.get_conn()
def get_conn(self):
conn = self.get_connection(self.conn_id)
key_path = conn.extra_dejson.get('key_path', False)
if key_path:
if key_path.endswith('.json'):
self.log.info('Getting connection using a JSON key file.')
return get_client_from_auth_file(ContainerInstanceManagementClient,
key_path)
else:
raise AirflowException('Unrecognised extension for key file.')
if os.environ.get('AZURE_AUTH_LOCATION'):
key_path = os.environ.get('AZURE_AUTH_LOCATION')
if key_path.endswith('.json'):
self.log.info('Getting connection using a JSON key file.')
return get_client_from_auth_file(ContainerInstanceManagementClient,
key_path)
else:
raise AirflowException('Unrecognised extension for key file.')
credentials = ServicePrincipalCredentials(
client_id=conn.login,
secret=conn.password,
tenant=conn.extra_dejson['tenantId']
)
subscription_id = conn.extra_dejson['subscriptionId']
return ContainerInstanceManagementClient(credentials, str(subscription_id))
def create_or_update(self, resource_group, name, container_group):
"""
Create a new container group
:param resource_group: the name of the resource group
:type resource_group: str
:param name: the name of the container group
:type name: str
:param container_group: the properties of the container group
:type container_group: azure.mgmt.containerinstance.models.ContainerGroup
"""
self.connection.container_groups.create_or_update(resource_group,
name,
container_group)
@deprecation.deprecate("get_state_exitcode_details() is deprecated. Related method is get_state()")
def get_state_exitcode_details(self, resource_group, name):
"""
Get the state and exitcode of a container group
:param resource_group: the name of the resource group
:type resource_group: str
:param name: the name of the container group
:type name: str
:return: A tuple with the state, exitcode, and details.
If the exitcode is unknown 0 is returned.
:rtype: tuple(state,exitcode,details)
"""
cg_state = self.get_state(resource_group, name)
c_state = cg_state.containers[0].instance_view.current_state
return (c_state.state, c_state.exit_code, c_state.detail_status)
@deprecation.deprecate("get_messages() is deprecated. Related method is get_state()")
def get_messages(self, resource_group, name):
"""
Get the messages of a container group
:param resource_group: the name of the resource group
:type resource_group: str
:param name: the name of the container group
:type name: str
:return: A list of the event messages
:rtype: list[str]
"""
cg_state = self.get_state(resource_group, name)
instance_view = cg_state.containers[0].instance_view
return [event.message for event in instance_view.events]
def get_state(self, resource_group, name):
"""
Get the state of a container group
:param resource_group: the name of the resource group
:type resource_group: str
:param name: the name of the container group
:type name: str
:return: ContainerGroup
:rtype: ~azure.mgmt.containerinstance.models.ContainerGroup
"""
return self.connection.container_groups.get(resource_group,
name,
raw=False)
def get_logs(self, resource_group, name, tail=1000):
"""
Get the tail from logs of a container group
:param resource_group: the name of the resource group
:type resource_group: str
:param name: the name of the container group
:type name: str
:param tail: the size of the tail
:type tail: int
:return: A list of log messages
:rtype: list[str]
"""
logs = self.connection.container.list_logs(resource_group, name, name, tail=tail)
return logs.content.splitlines(True)
def delete(self, resource_group, name):
"""
Delete a container group
:param resource_group: the name of the resource group
:type resource_group: str
:param name: the name of the container group
:type name: str
"""
self.connection.container_groups.delete(resource_group, name)
def exists(self, resource_group, name):
"""
Test if a container group exists
:param resource_group: the name of the resource group
:type resource_group: str
:param name: the name of the container group
:type name: str
"""
for container in self.connection.container_groups.list_by_resource_group(resource_group):
if container.name == name:
return True
return False
|
the-stack_106_28486 | import datetime
from abc import abstractmethod
import six
import sqlalchemy as db
from dagster import check, seven
from dagster.core.errors import DagsterEventLogInvalidForRun
from dagster.core.events import DagsterEventType
from dagster.core.events.log import EventRecord
from dagster.core.serdes import deserialize_json_to_dagster_namedtuple, serialize_dagster_namedtuple
from dagster.utils import datetime_as_float
from ..pipeline_run import PipelineRunStatsSnapshot
from .base import EventLogStorage
from .schema import SqlEventLogStorageTable
class SqlEventLogStorage(EventLogStorage):
'''Base class for SQL backed event log storages.
'''
@abstractmethod
def connect(self, run_id=None):
'''Context manager yielding a connection.
Args:
run_id (Optional[str]): Enables those storages which shard based on run_id, e.g.,
SqliteEventLogStorage, to connect appropriately.
'''
@abstractmethod
def upgrade(self):
'''This method should perform any schema or data migrations necessary to bring an
out-of-date instance of the storage up to date.
'''
def store_event(self, event):
'''Store an event corresponding to a pipeline run.
Args:
event (EventRecord): The event to store.
'''
check.inst_param(event, 'event', EventRecord)
dagster_event_type = None
if event.is_dagster_event:
dagster_event_type = event.dagster_event.event_type_value
run_id = event.run_id
# https://stackoverflow.com/a/54386260/324449
event_insert = SqlEventLogStorageTable.insert().values( # pylint: disable=no-value-for-parameter
run_id=run_id,
event=serialize_dagster_namedtuple(event),
dagster_event_type=dagster_event_type,
timestamp=datetime.datetime.fromtimestamp(event.timestamp),
)
with self.connect(run_id) as conn:
conn.execute(event_insert)
def get_logs_for_run(self, run_id, cursor=-1):
'''Get all of the logs corresponding to a run.
Args:
run_id (str): The id of the run for which to fetch logs.
cursor (Optional[int]): Zero-indexed logs will be returned starting from cursor + 1,
i.e., if cursor is -1, all logs will be returned. (default: -1)
'''
check.str_param(run_id, 'run_id')
check.int_param(cursor, 'cursor')
check.invariant(
cursor >= -1,
'Don\'t know what to do with negative cursor {cursor}'.format(cursor=cursor),
)
# cursor starts at 0 & auto-increment column starts at 1 so adjust
cursor = cursor + 1
query = (
db.select([SqlEventLogStorageTable.c.event])
.where(SqlEventLogStorageTable.c.run_id == run_id)
.where(SqlEventLogStorageTable.c.id > cursor)
.order_by(SqlEventLogStorageTable.c.id.asc())
)
with self.connect(run_id) as conn:
results = conn.execute(query).fetchall()
events = []
try:
for (json_str,) in results:
events.append(
check.inst_param(
deserialize_json_to_dagster_namedtuple(json_str), 'event', EventRecord
)
)
except (seven.JSONDecodeError, check.CheckError) as err:
six.raise_from(DagsterEventLogInvalidForRun(run_id=run_id), err)
return events
def get_stats_for_run(self, run_id):
check.str_param(run_id, 'run_id')
query = (
db.select(
[
SqlEventLogStorageTable.c.dagster_event_type,
db.func.count().label('n_events_of_type'),
db.func.max(SqlEventLogStorageTable.c.timestamp).label('last_event_timestamp'),
]
)
.where(SqlEventLogStorageTable.c.run_id == run_id)
.group_by('dagster_event_type')
)
with self.connect(run_id) as conn:
results = conn.execute(query).fetchall()
try:
counts = {}
times = {}
for result in results:
(dagster_event_type, n_events_of_type, last_event_timestamp) = result
if dagster_event_type:
counts[dagster_event_type] = n_events_of_type
times[dagster_event_type] = last_event_timestamp
start_time = times.get(DagsterEventType.PIPELINE_START.value, None)
end_time = times.get(
DagsterEventType.PIPELINE_SUCCESS.value,
times.get(DagsterEventType.PIPELINE_FAILURE.value, None),
)
return PipelineRunStatsSnapshot(
run_id=run_id,
steps_succeeded=counts.get(DagsterEventType.STEP_SUCCESS.value, 0),
steps_failed=counts.get(DagsterEventType.STEP_FAILURE.value, 0),
materializations=counts.get(DagsterEventType.STEP_MATERIALIZATION.value, 0),
expectations=counts.get(DagsterEventType.STEP_EXPECTATION_RESULT.value, 0),
start_time=datetime_as_float(start_time) if start_time else None,
end_time=datetime_as_float(end_time) if end_time else None,
)
except (seven.JSONDecodeError, check.CheckError) as err:
six.raise_from(DagsterEventLogInvalidForRun(run_id=run_id), err)
def wipe(self):
'''Clears the event log storage.'''
# Should be overridden by SqliteEventLogStorage and other storages that shard based on
# run_id
# https://stackoverflow.com/a/54386260/324449
with self.connect() as conn:
conn.execute(SqlEventLogStorageTable.delete()) # pylint: disable=no-value-for-parameter
def delete_events(self, run_id):
check.str_param(run_id, 'run_id')
statement = SqlEventLogStorageTable.delete().where( # pylint: disable=no-value-for-parameter
SqlEventLogStorageTable.c.run_id == run_id
)
with self.connect(run_id) as conn:
conn.execute(statement)
@property
def is_persistent(self):
return True
|
the-stack_106_28489 | from __future__ import absolute_import, division, print_function, unicode_literals
import warnings
from canvasapi.account import Account
from canvasapi.course import Course
from canvasapi.course_epub_export import CourseEpubExport
from canvasapi.current_user import CurrentUser
from canvasapi.exceptions import RequiredFieldMissing
from canvasapi.file import File
from canvasapi.folder import Folder
from canvasapi.group import Group, GroupCategory
from canvasapi.paginated_list import PaginatedList
from canvasapi.requester import Requester
from canvasapi.section import Section
from canvasapi.user import User
from canvasapi.util import combine_kwargs, get_institution_url, obj_or_id
class Canvas(object):
"""
The main class to be instantiated to provide access to Canvas's API.
"""
def __init__(self, base_url, access_token):
"""
:param base_url: The base URL of the Canvas instance's API.
:type base_url: str
:param access_token: The API key to authenticate requests with.
:type access_token: str
"""
new_url = get_institution_url(base_url)
if "api/v1" in base_url:
warnings.warn(
"`base_url` no longer requires an API version be specified. "
"Rewriting `base_url` to {}".format(new_url),
DeprecationWarning,
)
if "http://" in base_url:
warnings.warn(
"Canvas may respond unexpectedly when making requests to HTTP "
"URLs. If possible, please use HTTPS.",
UserWarning,
)
if not base_url.strip():
warnings.warn(
"Canvas needs a valid URL, please provide a non-blank `base_url`.",
UserWarning,
)
if "://" not in base_url:
warnings.warn(
"An invalid `base_url` for the Canvas API Instance was used. "
"Please provide a valid HTTP or HTTPS URL if possible.",
UserWarning,
)
# Ensure that the user-supplied access token contains no leading or
# trailing spaces that may cause issues when communicating with
# the API.
access_token = access_token.strip()
base_url = new_url + "/api/v1/"
self.__requester = Requester(base_url, access_token)
def clear_course_nicknames(self):
"""
Remove all stored course nicknames.
:calls: `DELETE /api/v1/users/self/course_nicknames \
<https://canvas.instructure.com/doc/api/users.html#method.course_nicknames.clear>`_
:returns: True if the nicknames were cleared, False otherwise.
:rtype: bool
"""
response = self.__requester.request("DELETE", "users/self/course_nicknames")
return response.json().get("message") == "OK"
def conversations_batch_update(self, conversation_ids, event):
"""
:calls: `PUT /api/v1/conversations \
<https://canvas.instructure.com/doc/api/conversations.html#method.conversations.batch_update>`_
:param conversation_ids: List of conversations to update. Limited to 500 conversations.
:type conversation_ids: `list` of `str`
:param event: The action to take on each conversation.
:type event: `str`
:rtype: :class:`canvasapi.progress.Progress`
"""
from canvasapi.progress import Progress
ALLOWED_EVENTS = [
"mark_as_read",
"mark_as_unread",
"star",
"unstar",
"archive",
"destroy",
]
try:
if event not in ALLOWED_EVENTS:
raise ValueError(
"{} is not a valid action. Please use one of the following: {}".format(
event, ",".join(ALLOWED_EVENTS)
)
)
if len(conversation_ids) > 500:
raise ValueError(
"You have requested {} updates, which exceeds the limit of 500".format(
len(conversation_ids)
)
)
response = self.__requester.request(
"PUT",
"conversations",
event=event,
**{"conversation_ids[]": conversation_ids}
)
return_progress = Progress(self.__requester, response.json())
return return_progress
except ValueError as e:
return e
def conversations_get_running_batches(self):
"""
Returns any currently running conversation batches for the current user.
Conversation batches are created when a bulk private message is sent
asynchronously.
:calls: `GET /api/v1/conversations/batches \
<https://canvas.instructure.com/doc/api/conversations.html#method.conversations.batches>`_
:returns: dict with list of batch objects - not currently a Class
:rtype: `dict`
"""
response = self.__requester.request("GET", "conversations/batches")
return response.json()
def conversations_mark_all_as_read(self):
"""
Mark all conversations as read.
:calls: `POST /api/v1/conversations/mark_all_as_read \
<https://canvas.instructure.com/doc/api/conversations.html#method.conversations.mark_all_as_read>`_
:rtype: `bool`
"""
response = self.__requester.request("POST", "conversations/mark_all_as_read")
return response.json() == {}
def conversations_unread_count(self):
"""
Get the number of unread conversations for the current user
:calls: `GET /api/v1/conversations/unread_count \
<https://canvas.instructure.com/doc/api/conversations.html#method.conversations.unread_count>`_
:returns: simple object with unread_count, example: {'unread_count': '7'}
:rtype: `dict`
"""
response = self.__requester.request("GET", "conversations/unread_count")
return response.json()
def create_account(self, **kwargs):
"""
Create a new root account.
:calls: `POST /api/v1/accounts \
<https://canvas.instructure.com/doc/api/accounts.html#method.accounts.create>`_
:rtype: :class:`canvasapi.account.Account`
"""
response = self.__requester.request(
"POST", "accounts", _kwargs=combine_kwargs(**kwargs)
)
return Account(self.__requester, response.json())
def create_appointment_group(self, appointment_group, **kwargs):
"""
Create a new Appointment Group.
:calls: `POST /api/v1/appointment_groups \
<https://canvas.instructure.com/doc/api/appointment_groups.html#method.appointment_groups.create>`_
:param appointment_group: The attributes of the appointment group.
:type appointment_group: `dict`
:param title: The title of the appointment group.
:type title: `str`
:rtype: :class:`canvasapi.appointment_group.AppointmentGroup`
"""
from canvasapi.appointment_group import AppointmentGroup
if (
isinstance(appointment_group, dict)
and "context_codes" in appointment_group
and "title" in appointment_group
):
kwargs["appointment_group"] = appointment_group
elif (
isinstance(appointment_group, dict)
and "context_codes" not in appointment_group
):
raise RequiredFieldMissing(
"Dictionary with key 'context_codes' is missing."
)
elif isinstance(appointment_group, dict) and "title" not in appointment_group:
raise RequiredFieldMissing("Dictionary with key 'title' is missing.")
response = self.__requester.request(
"POST", "appointment_groups", _kwargs=combine_kwargs(**kwargs)
)
return AppointmentGroup(self.__requester, response.json())
def create_calendar_event(self, calendar_event, **kwargs):
"""
Create a new Calendar Event.
:calls: `POST /api/v1/calendar_events \
<https://canvas.instructure.com/doc/api/calendar_events.html#method.calendar_events_api.create>`_
:param calendar_event: The attributes of the calendar event.
:type calendar_event: `dict`
:rtype: :class:`canvasapi.calendar_event.CalendarEvent`
"""
from canvasapi.calendar_event import CalendarEvent
if isinstance(calendar_event, dict) and "context_code" in calendar_event:
kwargs["calendar_event"] = calendar_event
else:
raise RequiredFieldMissing(
"Dictionary with key 'context_codes' is required."
)
response = self.__requester.request(
"POST", "calendar_events", _kwargs=combine_kwargs(**kwargs)
)
return CalendarEvent(self.__requester, response.json())
def create_conversation(self, recipients, body, **kwargs):
"""
Create a new Conversation.
:calls: `POST /api/v1/conversations \
<https://canvas.instructure.com/doc/api/conversations.html#method.conversations.create>`_
:param recipients: An array of recipient ids.
These may be user ids or course/group ids prefixed
with 'course\\_' or 'group\\_' respectively,
e.g. recipients=['1', '2', 'course_3']
:type recipients: `list` of `str`
:param body: The body of the message being added.
:type body: `str`
:rtype: list of :class:`canvasapi.conversation.Conversation`
"""
from canvasapi.conversation import Conversation
kwargs["recipients"] = recipients
kwargs["body"] = body
response = self.__requester.request(
"POST", "conversations", _kwargs=combine_kwargs(**kwargs)
)
return [Conversation(self.__requester, convo) for convo in response.json()]
def create_group(self, **kwargs):
"""
Create a group
:calls: `POST /api/v1/groups/ \
<https://canvas.instructure.com/doc/api/groups.html#method.groups.create>`_
:rtype: :class:`canvasapi.group.Group`
"""
response = self.__requester.request(
"POST", "groups", _kwargs=combine_kwargs(**kwargs)
)
return Group(self.__requester, response.json())
def create_planner_note(self, **kwargs):
"""
Create a planner note for the current user
:calls: `POST /api/v1/planner_notes \
<https://canvas.instructure.com/doc/api/planner.html#method.planner_notes.create>`_
:rtype: :class:`canvasapi.planner.PlannerNote`
"""
from canvasapi.planner import PlannerNote
response = self.__requester.request(
"POST", "planner_notes", _kwargs=combine_kwargs(**kwargs)
)
return PlannerNote(self.__requester, response.json())
def create_planner_override(self, plannable_type, plannable_id, **kwargs):
"""
Create a planner override for the current user
:calls: `POST /api/v1/planner/overrides \
<https://canvas.instructure.com/doc/api/planner.html#method.planner_overrides.create>`_
:param plannable_type: Type of the item that you are overriding in the planner
:type plannable_type: str
:param plannable_id: ID of the item that you are overriding in the planner
:type plannable_id: int or :class:`canvasapi.planner.PlannerOverride`
:rtype: :class:`canvasapi.planner.PlannerOverride`
"""
from canvasapi.planner import PlannerOverride
from six import text_type, integer_types
if isinstance(plannable_type, text_type):
kwargs["plannable_type"] = plannable_type
else:
raise RequiredFieldMissing("plannable_type is required as a str.")
if isinstance(plannable_id, integer_types):
kwargs["plannable_id"] = plannable_id
else:
raise RequiredFieldMissing("plannable_id is required as an int.")
response = self.__requester.request(
"POST", "planner/overrides", _kwargs=combine_kwargs(**kwargs)
)
return PlannerOverride(self.__requester, response.json())
def create_poll(self, poll, **kwargs):
"""
Create a new poll for the current user.
:calls: `POST /api/v1/polls \
<https://canvas.instructure.com/doc/api/polls.html#method.polling/polls.create>`_
:param poll: List of polls to create. `'question'` key is required.
:type poll: list of dict
:rtype: :class:`canvasapi.poll.Poll`
"""
from canvasapi.poll import Poll
if (
isinstance(poll, list)
and isinstance(poll[0], dict)
and "question" in poll[0]
):
kwargs["poll"] = poll
else:
raise RequiredFieldMissing(
"Dictionary with key 'question' and is required."
)
response = self.__requester.request(
"POST", "polls", _kwargs=combine_kwargs(**kwargs)
)
return Poll(self.__requester, response.json()["polls"][0])
def get_account(self, account, use_sis_id=False, **kwargs):
"""
Retrieve information on an individual account.
:calls: `GET /api/v1/accounts/:id \
<https://canvas.instructure.com/doc/api/accounts.html#method.accounts.show>`_
:param account: The object or ID of the account to retrieve.
:type account: int, str or :class:`canvasapi.account.Account`
:param use_sis_id: Whether or not account_id is an sis ID.
Defaults to `False`.
:type use_sis_id: bool
:rtype: :class:`canvasapi.account.Account`
"""
if use_sis_id:
account_id = account
uri_str = "accounts/sis_account_id:{}"
else:
account_id = obj_or_id(account, "account", (Account,))
uri_str = "accounts/{}"
response = self.__requester.request(
"GET", uri_str.format(account_id), _kwargs=combine_kwargs(**kwargs)
)
return Account(self.__requester, response.json())
def get_accounts(self, **kwargs):
"""
List accounts that the current user can view or manage.
Typically, students and teachers will get an empty list in
response. Only account admins can view the accounts that they
are in.
:calls: `GET /api/v1/accounts \
<https://canvas.instructure.com/doc/api/accounts.html#method.accounts.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.account.Account`
"""
return PaginatedList(
Account,
self.__requester,
"GET",
"accounts",
_kwargs=combine_kwargs(**kwargs),
)
def get_activity_stream_summary(self):
"""
Return a summary of the current user's global activity stream.
:calls: `GET /api/v1/users/self/activity_stream/summary \
<https://canvas.instructure.com/doc/api/users.html#method.users.activity_stream_summary>`_
:rtype: dict
"""
response = self.__requester.request("GET", "users/self/activity_stream/summary")
return response.json()
def get_announcements(self, **kwargs):
"""
List announcements.
:calls: `GET /api/v1/announcements \
<https://canvas.instructure.com/doc/api/announcements.html#method.announcements_api.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.discussion_topic.DiscussionTopic`
"""
from canvasapi.discussion_topic import DiscussionTopic
return PaginatedList(
DiscussionTopic,
self.__requester,
"GET",
"announcements",
_kwargs=combine_kwargs(**kwargs),
)
def get_appointment_group(self, appointment_group):
"""
Return single Appointment Group by id
:calls: `GET /api/v1/appointment_groups/:id \
<https://canvas.instructure.com/doc/api/appointment_groups.html#method.appointment_groups.show>`_
:param appointment_group: The ID of the appointment group.
:type appointment_group: :class:`canvasapi.appointment_group.AppointmentGroup` or int
:rtype: :class:`canvasapi.appointment_group.AppointmentGroup`
"""
from canvasapi.appointment_group import AppointmentGroup
appointment_group_id = obj_or_id(
appointment_group, "appointment_group", (AppointmentGroup,)
)
response = self.__requester.request(
"GET", "appointment_groups/{}".format(appointment_group_id)
)
return AppointmentGroup(self.__requester, response.json())
def get_appointment_groups(self, **kwargs):
"""
List appointment groups.
:calls: `GET /api/v1/appointment_groups \
<https://canvas.instructure.com/doc/api/appointment_groups.html#method.appointment_groups.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.appointment_group.AppointmentGroup`
"""
from canvasapi.appointment_group import AppointmentGroup
return PaginatedList(
AppointmentGroup,
self.__requester,
"GET",
"appointment_groups",
_kwargs=combine_kwargs(**kwargs),
)
def get_brand_variables(self):
"""
Get account brand variables
:calls: `GET /api/v1/brand_variables \
<https://canvas.instructure.com/doc/api/brand_configs.html>`_
:returns: JSON with brand variables for the account.
:rtype: dict
"""
response = self.__requester.request("GET", "brand_variables")
return response.json()
def get_calendar_event(self, calendar_event):
"""
Return single Calendar Event by id
:calls: `GET /api/v1/calendar_events/:id \
<https://canvas.instructure.com/doc/api/calendar_events.html#method.calendar_events_api.show>`_
:param calendar_event: The object or ID of the calendar event.
:type calendar_event: :class:`canvasapi.calendar_event.CalendarEvent` or int
:rtype: :class:`canvasapi.calendar_event.CalendarEvent`
"""
from canvasapi.calendar_event import CalendarEvent
calendar_event_id = obj_or_id(
calendar_event, "calendar_event", (CalendarEvent,)
)
response = self.__requester.request(
"GET", "calendar_events/{}".format(calendar_event_id)
)
return CalendarEvent(self.__requester, response.json())
def get_calendar_events(self, **kwargs):
"""
List calendar events.
:calls: `GET /api/v1/calendar_events \
<https://canvas.instructure.com/doc/api/calendar_events.html#method.calendar_events_api.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.calendar_event.CalendarEvent`
"""
from canvasapi.calendar_event import CalendarEvent
return PaginatedList(
CalendarEvent,
self.__requester,
"GET",
"calendar_events",
_kwargs=combine_kwargs(**kwargs),
)
def get_conversation(self, conversation, **kwargs):
"""
Return single Conversation
:calls: `GET /api/v1/conversations/:id \
<https://canvas.instructure.com/doc/api/conversations.html#method.conversations.show>`_
:param conversation: The object or ID of the conversation.
:type conversation: :class:`canvasapi.conversation.Conversation` or int
:rtype: :class:`canvasapi.conversation.Conversation`
"""
from canvasapi.conversation import Conversation
conversation_id = obj_or_id(conversation, "conversation", (Conversation,))
response = self.__requester.request(
"GET",
"conversations/{}".format(conversation_id),
_kwargs=combine_kwargs(**kwargs),
)
return Conversation(self.__requester, response.json())
def get_conversations(self, **kwargs):
"""
Return list of conversations for the current user, most resent ones first.
:calls: `GET /api/v1/conversations \
<https://canvas.instructure.com/doc/api/conversations.html#method.conversations.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of \
:class:`canvasapi.conversation.Conversation`
"""
from canvasapi.conversation import Conversation
return PaginatedList(
Conversation,
self.__requester,
"GET",
"conversations",
_kwargs=combine_kwargs(**kwargs),
)
def get_course(self, course, use_sis_id=False, **kwargs):
"""
Retrieve a course by its ID.
:calls: `GET /api/v1/courses/:id \
<https://canvas.instructure.com/doc/api/courses.html#method.courses.show>`_
:param course: The object or ID of the course to retrieve.
:type course: int, str or :class:`canvasapi.course.Course`
:param use_sis_id: Whether or not course_id is an sis ID.
Defaults to `False`.
:type use_sis_id: bool
:rtype: :class:`canvasapi.course.Course`
"""
if use_sis_id:
course_id = course
uri_str = "courses/sis_course_id:{}"
else:
course_id = obj_or_id(course, "course", (Course,))
uri_str = "courses/{}"
response = self.__requester.request(
"GET", uri_str.format(course_id), _kwargs=combine_kwargs(**kwargs)
)
return Course(self.__requester, response.json())
def get_course_accounts(self):
"""
List accounts that the current user can view through their
admin course enrollments (Teacher, TA or designer enrollments).
Only returns `id`, `name`, `workflow_state`, `root_account_id`
and `parent_account_id`.
:calls: `GET /api/v1/course_accounts \
<https://canvas.instructure.com/doc/api/accounts.html#method.accounts.course_accounts>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.account.Account`
"""
return PaginatedList(Account, self.__requester, "GET", "course_accounts")
def get_course_nickname(self, course):
"""
Return the nickname for the given course.
:calls: `GET /api/v1/users/self/course_nicknames/:course_id \
<https://canvas.instructure.com/doc/api/users.html#method.course_nicknames.show>`_
:param course: The object or ID of the course.
:type course: :class:`canvasapi.course.Course` or int
:rtype: :class:`canvasapi.course.CourseNickname`
"""
from canvasapi.course import CourseNickname
course_id = obj_or_id(course, "course", (Course,))
response = self.__requester.request(
"GET", "users/self/course_nicknames/{}".format(course_id)
)
return CourseNickname(self.__requester, response.json())
def get_course_nicknames(self):
"""
Return all course nicknames set by the current account.
:calls: `GET /api/v1/users/self/course_nicknames \
<https://canvas.instructure.com/doc/api/users.html#method.course_nicknames.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.course.CourseNickname`
"""
from canvasapi.course import CourseNickname
return PaginatedList(
CourseNickname, self.__requester, "GET", "users/self/course_nicknames"
)
def get_courses(self, **kwargs):
"""
Return a list of active courses for the current user.
:calls: `GET /api/v1/courses \
<https://canvas.instructure.com/doc/api/courses.html#method.courses.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.course.Course`
"""
return PaginatedList(
Course, self.__requester, "GET", "courses", _kwargs=combine_kwargs(**kwargs)
)
def get_current_user(self):
return CurrentUser(self.__requester)
def get_epub_exports(self, **kwargs):
"""
Return a list of epub exports for the associated course.
:calls: `GET /api/v1/epub_exports\
<https://canvas.instructure.com/doc/api/e_pub_exports.html#method.epub_exports.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.course_epub_export.CourseEpubExport`
"""
return PaginatedList(
CourseEpubExport,
self.__requester,
"GET",
"epub_exports",
_root="courses",
kwargs=combine_kwargs(**kwargs),
)
def get_file(self, file, **kwargs):
"""
Return the standard attachment json object for a file.
:calls: `GET /api/v1/files/:id \
<https://canvas.instructure.com/doc/api/files.html#method.files.api_show>`_
:param file: The object or ID of the file to retrieve.
:type file: :class:`canvasapi.file.File` or int
:rtype: :class:`canvasapi.file.File`
"""
file_id = obj_or_id(file, "file", (File,))
response = self.__requester.request(
"GET", "files/{}".format(file_id), _kwargs=combine_kwargs(**kwargs)
)
return File(self.__requester, response.json())
def get_folder(self, folder):
"""
Return the details for a folder
:calls: `GET /api/v1/folders/:id \
<https://canvas.instructure.com/doc/api/files.html#method.folders.show>`_
:param folder: The object or ID of the folder to retrieve.
:type folder: :class:`canvasapi.folder.Folder` or int
:rtype: :class:`canvasapi.folder.Folder`
"""
folder_id = obj_or_id(folder, "folder", (Folder,))
response = self.__requester.request("GET", "folders/{}".format(folder_id))
return Folder(self.__requester, response.json())
def get_group(self, group, use_sis_id=False, **kwargs):
"""
Return the data for a single group. If the caller does not
have permission to view the group a 401 will be returned.
:calls: `GET /api/v1/groups/:group_id \
<https://canvas.instructure.com/doc/api/groups.html#method.groups.show>`_
:param group: The object or ID of the group to get.
:type group: :class:`canvasapi.group.Group` or int
:param use_sis_id: Whether or not group_id is an sis ID.
Defaults to `False`.
:type use_sis_id: bool
:rtype: :class:`canvasapi.group.Group`
"""
if use_sis_id:
group_id = group
uri_str = "groups/sis_group_id:{}"
else:
group_id = obj_or_id(group, "group", (Group,))
uri_str = "groups/{}"
response = self.__requester.request(
"GET", uri_str.format(group_id), _kwargs=combine_kwargs(**kwargs)
)
return Group(self.__requester, response.json())
def get_group_category(self, category):
"""
Get a single group category.
:calls: `GET /api/v1/group_categories/:group_category_id \
<https://canvas.instructure.com/doc/api/group_categories.html#method.group_categories.show>`_
:param category: The object or ID of the category.
:type category: :class:`canvasapi.group.GroupCategory` or int
:rtype: :class:`canvasapi.group.GroupCategory`
"""
category_id = obj_or_id(category, "category", (GroupCategory,))
response = self.__requester.request(
"GET", "group_categories/{}".format(category_id)
)
return GroupCategory(self.__requester, response.json())
def get_group_participants(self, appointment_group, **kwargs):
"""
List student group participants in this appointment group.
:calls: `GET /api/v1/appointment_groups/:id/groups \
<https://canvas.instructure.com/doc/api/appointment_groups.html#method.appointment_groups.groups>`_
:param appointment_group: The object or ID of the appointment group.
:type appointment_group: :class:`canvasapi.appointment_group.AppointmentGroup` or int
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.group.Group`
"""
from canvasapi.appointment_group import AppointmentGroup
from canvasapi.group import Group
appointment_group_id = obj_or_id(
appointment_group, "appointment_group", (AppointmentGroup,)
)
return PaginatedList(
Group,
self.__requester,
"GET",
"appointment_groups/{}/groups".format(appointment_group_id),
_kwargs=combine_kwargs(**kwargs),
)
def get_outcome(self, outcome):
"""
Returns the details of the outcome with the given id.
:calls: `GET /api/v1/outcomes/:id \
<https://canvas.instructure.com/doc/api/outcomes.html#method.outcomes_api.show>`_
:param outcome: The outcome object or ID to return.
:type outcome: :class:`canvasapi.outcome.Outcome` or int
:returns: An Outcome object.
:rtype: :class:`canvasapi.outcome.Outcome`
"""
from canvasapi.outcome import Outcome
outcome_id = obj_or_id(outcome, "outcome", (Outcome,))
response = self.__requester.request("GET", "outcomes/{}".format(outcome_id))
return Outcome(self.__requester, response.json())
def get_outcome_group(self, group):
"""
Returns the details of the Outcome Group with the given id.
:calls: `GET /api/v1/global/outcome_groups/:id \
<https://canvas.instructure.com/doc/api/outcome_groups.html#method.outcome_groups_api.show>`_
:param group: The outcome group object or ID to return.
:type group: :class:`canvasapi.outcome.OutcomeGroup` or int
:returns: An outcome group object.
:rtype: :class:`canvasapi.outcome.OutcomeGroup`
"""
from canvasapi.outcome import OutcomeGroup
outcome_group_id = obj_or_id(group, "group", (OutcomeGroup,))
response = self.__requester.request(
"GET", "global/outcome_groups/{}".format(outcome_group_id)
)
return OutcomeGroup(self.__requester, response.json())
def get_planner_note(self, planner_note, **kwargs):
"""
Retrieve a planner note for the current user
:calls: `GET /api/v1/planner_notes/:id \
<https://canvas.instructure.com/doc/api/planner.html#method.planner_notes.show>`_
:param planner_note: The ID of the planner note to retrieve.
:type planner_note: int or :class:`canvasapi.planner.PlannerNote`
:rtype: :class:`canvasapi.planner.PlannerNote`
"""
from canvasapi.planner import PlannerNote
if isinstance(planner_note, int) or isinstance(planner_note, PlannerNote):
planner_note_id = obj_or_id(planner_note, "planner_note", (PlannerNote,))
else:
raise RequiredFieldMissing(
"planner_note is required as an object or as an int."
)
response = self.__requester.request(
"GET",
"planner_notes/{}".format(planner_note_id),
_kwargs=combine_kwargs(**kwargs),
)
return PlannerNote(self.__requester, response.json())
def get_planner_notes(self, **kwargs):
"""
Retrieve the paginated list of planner notes
:calls: `GET /api/v1/planner_notes \
<https://canvas.instructure.com/doc/api/planner.html#method.planner_notes.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.planner.PlannerNote`
"""
from canvasapi.planner import PlannerNote
return PaginatedList(
PlannerNote,
self.__requester,
"GET",
"planner_notes",
_kwargs=combine_kwargs(**kwargs),
)
def get_planner_override(self, planner_override, **kwargs):
"""
Retrieve a planner override for the current user
:calls: `GET /api/v1/planner/overrides/:id \
<https://canvas.instructure.com/doc/api/planner.html#method.planner_overrides.show>`_
:param planner_override: The override or the ID of the planner override to retrieve.
:type planner_override: int or :class:`canvasapi.planner.PlannerOverride`
:rtype: :class:`canvasapi.planner.PlannerOverride`
"""
from canvasapi.planner import PlannerOverride
if isinstance(planner_override, int) or isinstance(
planner_override, PlannerOverride
):
planner_override_id = obj_or_id(
planner_override, "planner_override", (PlannerOverride,)
)
else:
raise RequiredFieldMissing(
"planner_override is required as an object or as an int."
)
response = self.__requester.request(
"GET",
"planner/overrides/{}".format(planner_override_id),
_kwargs=combine_kwargs(**kwargs),
)
return PlannerOverride(self.__requester, response.json())
def get_planner_overrides(self, **kwargs):
"""
Retrieve a planner override for the current user
:calls: `GET /api/v1/planner/overrides \
<https://canvas.instructure.com/doc/api/planner.html#method.planner_overrides.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.planner.PlannerOverride`
"""
from canvasapi.planner import PlannerOverride
return PaginatedList(
PlannerOverride,
self.__requester,
"GET",
"planner/overrides",
_kwargs=combine_kwargs(**kwargs),
)
def get_poll(self, poll, **kwargs):
"""
Get a single poll, based on the poll id.
:calls: `GET /api/v1/polls/:id \
<https://canvas.instructure.com/doc/api/polls.html#method.polling/polls.show>`_
:param poll: The ID of the poll or the poll to change.
:type poll: int
:rtype: :class:`canvasapi.poll.Poll`
"""
from canvasapi.poll import Poll
poll_id = obj_or_id(poll, "poll", (Poll,))
response = self.__requester.request(
"GET", "polls/{}".format(poll_id), _kwargs=combine_kwargs(**kwargs)
)
return Poll(self.__requester, response.json()["polls"][0])
def get_polls(self, **kwargs):
"""
Returns a paginated list of polls for the current user
:calls: `GET /api/1/polls \
<https://canvas.instructure.com/doc/api/polls.html#method.polling/polls.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.poll.Poll`
"""
from canvasapi.poll import Poll
return PaginatedList(
Poll,
self.__requester,
"GET",
"polls",
_root="polls",
_kwargs=combine_kwargs(**kwargs),
)
def get_progress(self, progress, **kwargs):
"""
Get a specific progress.
:calls: `GET /api/v1/progress/:id
<https://canvas.instructure.com/doc/api/progress.html#method.progress.show>`_
:param progress: The object or ID of the progress to retrieve.
:type progress: int, str or :class:`canvasapi.progress.Progress`
:rtype: :class:`canvasapi.progress.Progress`
"""
from canvasapi.progress import Progress
progress_id = obj_or_id(progress, "progress", (Progress,))
response = self.__requester.request(
"GET", "progress/{}".format(progress_id), _kwargs=combine_kwargs(**kwargs)
)
return Progress(self.__requester, response.json())
def get_root_outcome_group(self):
"""
Redirect to root outcome group for context
:calls: `GET /api/v1/global/root_outcome_group \
<https://canvas.instructure.com/doc/api/outcome_groups.html#method.outcome_groups_api.redirect>`_
:returns: The OutcomeGroup of the context.
:rtype: :class:`canvasapi.outcome.OutcomeGroup`
"""
from canvasapi.outcome import OutcomeGroup
response = self.__requester.request("GET", "global/root_outcome_group")
return OutcomeGroup(self.__requester, response.json())
def get_section(self, section, use_sis_id=False, **kwargs):
"""
Get details about a specific section.
:calls: `GET /api/v1/sections/:id \
<https://canvas.instructure.com/doc/api/sections.html#method.sections.show>`_
:param section: The object or ID of the section to get.
:type section: :class:`canvasapi.section.Section` or int
:param use_sis_id: Whether or not section_id is an sis ID.
Defaults to `False`.
:type use_sis_id: bool
:rtype: :class:`canvasapi.section.Section`
"""
if use_sis_id:
section_id = section
uri_str = "sections/sis_section_id:{}"
else:
section_id = obj_or_id(section, "section", (Section,))
uri_str = "sections/{}"
response = self.__requester.request(
"GET", uri_str.format(section_id), _kwargs=combine_kwargs(**kwargs)
)
return Section(self.__requester, response.json())
def get_todo_items(self):
"""
Return the current user's list of todo items, as seen on the user dashboard.
:calls: `GET /api/v1/users/self/todo \
<https://canvas.instructure.com/doc/api/users.html#method.users.todo_items>`_
:rtype: dict
"""
response = self.__requester.request("GET", "users/self/todo")
return response.json()
def get_upcoming_events(self):
"""
Return the current user's upcoming events, i.e. the same things shown
in the dashboard 'Coming Up' sidebar.
:calls: `GET /api/v1/users/self/upcoming_events \
<https://canvas.instructure.com/doc/api/users.html#method.users.upcoming_events>`_
:rtype: dict
"""
response = self.__requester.request("GET", "users/self/upcoming_events")
return response.json()
def get_user(self, user, id_type=None):
"""
Retrieve a user by their ID. `id_type` denotes which endpoint to try as there are
several different IDs that can pull the same user record from Canvas.
Refer to API documentation's
`User <https://canvas.instructure.com/doc/api/users.html#User>`_
example to see the ID types a user can be retrieved with.
:calls: `GET /api/v1/users/:id \
<https://canvas.instructure.com/doc/api/users.html#method.users.api_show>`_
:param user: The user's object or ID.
:type user: :class:`canvasapi.user.User` or int
:param id_type: The ID type.
:type id_type: str
:rtype: :class:`canvasapi.user.User`
"""
if id_type:
uri = "users/{}:{}".format(id_type, user)
elif user == "self":
uri = "users/self"
else:
user_id = obj_or_id(user, "user", (User,))
uri = "users/{}".format(user_id)
response = self.__requester.request("GET", uri)
return User(self.__requester, response.json())
def get_user_participants(self, appointment_group, **kwargs):
"""
List user participants in this appointment group.
:calls: `GET /api/v1/appointment_groups/:id/users \
<https://canvas.instructure.com/doc/api/appointment_groups.html#method.appointment_groups.users>`_
:param appointment_group: The object or ID of the appointment group.
:type appointment_group: :class:`canvasapi.appointment_group.AppointmentGroup` or int
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.user.User`
"""
from canvasapi.appointment_group import AppointmentGroup
from canvasapi.user import User
appointment_group_id = obj_or_id(
appointment_group, "appointment_group", (AppointmentGroup,)
)
return PaginatedList(
User,
self.__requester,
"GET",
"appointment_groups/{}/users".format(appointment_group_id),
_kwargs=combine_kwargs(**kwargs),
)
def list_appointment_groups(self, **kwargs):
"""
List appointment groups.
.. warning::
.. deprecated:: 0.10.0
Use :func:`canvasapi.canvas.Canvas.get_appointment_groups` instead.
:calls: `GET /api/v1/appointment_groups \
<https://canvas.instructure.com/doc/api/appointment_groups.html#method.appointment_groups.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.appointment_group.AppointmentGroup`
"""
warnings.warn(
"`list_appointment_groups` is being deprecated and will be removed"
" in a future version. Use `get_appointment_groups` instead.",
DeprecationWarning,
)
return self.get_appointment_groups(**kwargs)
def list_calendar_events(self, **kwargs):
"""
List calendar events.
.. warning::
.. deprecated:: 0.10.0
Use :func:`canvasapi.canvas.Canvas.get_calendar_events` instead.
:calls: `GET /api/v1/calendar_events \
<https://canvas.instructure.com/doc/api/calendar_events.html#method.calendar_events_api.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.calendar_event.CalendarEvent`
"""
warnings.warn(
"`list_calendar_events` is being deprecated and will be removed "
"in a future version. Use `get_calendar_events` instead",
DeprecationWarning,
)
return self.get_calendar_events(**kwargs)
def list_group_participants(self, appointment_group, **kwargs):
"""
List student group participants in this appointment group.
.. warning::
.. deprecated:: 0.10.0
Use :func:`canvasapi. canvas.Canvas.get_group_participants` instead.
:calls: `GET /api/v1/appointment_groups/:id/groups \
<https://canvas.instructure.com/doc/api/appointment_groups.html#method.appointment_groups.groups>`_
:param appointment_group: The object or ID of the appointment group.
:type appointment_group: :class:`canvasapi.appointment_group.AppointmentGroup` or int
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.group.Group`
"""
warnings.warn(
"`list_group_participants` is being deprecated and will be removed "
"in a future version. Use `get_group_participants` instead",
DeprecationWarning,
)
return self.get_group_participants(appointment_group, **kwargs)
def list_user_participants(self, appointment_group, **kwargs):
"""
List user participants in this appointment group.
.. warning::
.. deprecated:: 0.10.0
Use :func:`canvasapi. canvas.Canvas.get_user_participants` instead.
:calls: `GET /api/v1/appointment_groups/:id/users \
<https://canvas.instructure.com/doc/api/appointment_groups.html#method.appointment_groups.users>`_
:param appointment_group: The object or ID of the appointment group.
:type appointment_group: :class:`canvasapi.appointment_group.AppointmentGroup` or int
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.user.User`
"""
warnings.warn(
"`list_user_participants` is being deprecated and will be removed in a future version."
" Use `get_user_participants` instead",
DeprecationWarning,
)
return self.get_user_participants(appointment_group, **kwargs)
def reserve_time_slot(self, calendar_event, participant_id=None, **kwargs):
"""
Return single Calendar Event by id
:calls: `POST /api/v1/calendar_events/:id/reservations \
<https://canvas.instructure.com/doc/api/calendar_events.html#method.calendar_events_api.reserve>`_
:param calendar_event: The object or ID of the calendar event.
:type calendar_event: :class:`canvasapi.calendar_event.CalendarEvent` or int
:param participant_id: The ID of the participant, if given.
:type participant_id: str
:rtype: :class:`canvasapi.calendar_event.CalendarEvent`
"""
from canvasapi.calendar_event import CalendarEvent
calendar_event_id = obj_or_id(
calendar_event, "calendar_event", (CalendarEvent,)
)
if participant_id:
uri = "calendar_events/{}/reservations/{}".format(
calendar_event_id, participant_id
)
else:
uri = "calendar_events/{}/reservations".format(calendar_event_id)
response = self.__requester.request(
"POST", uri, _kwargs=combine_kwargs(**kwargs)
)
return CalendarEvent(self.__requester, response.json())
def search_accounts(self, **kwargs):
"""
Return a list of up to 5 matching account domains. Partial matches on
name and domain are supported.
:calls: `GET /api/v1/accounts/search \
<https://canvas.instructure.com/doc/api/account_domain_lookups.html#method.account_domain_lookups.search>`_
:rtype: dict
"""
response = self.__requester.request(
"GET", "accounts/search", _kwargs=combine_kwargs(**kwargs)
)
return response.json()
def search_all_courses(self, **kwargs):
"""
List all the courses visible in the public index.
Returns a list of dicts, each containing a single course.
:calls: `GET /api/v1/search/all_courses \
<https://canvas.instructure.com/doc/api/search.html#method.search.all_courses>`_
:rtype: `list`
"""
response = self.__requester.request(
"GET", "search/all_courses", _kwargs=combine_kwargs(**kwargs)
)
return response.json()
def search_recipients(self, **kwargs):
"""
Find valid recipients (users, courses and groups) that the current user
can send messages to.
Returns a list of mixed data types.
:calls: `GET /api/v1/search/recipients \
<https://canvas.instructure.com/doc/api/search.html#method.search.recipients>`_
:rtype: `list`
"""
if "search" not in kwargs:
kwargs["search"] = " "
response = self.__requester.request(
"GET", "search/recipients", _kwargs=combine_kwargs(**kwargs)
)
return response.json()
def set_course_nickname(self, course, nickname):
"""
Set a nickname for the given course. This will replace the
course's name in the output of subsequent API calls, as
well as in selected places in the Canvas web user interface.
:calls: `PUT /api/v1/users/self/course_nicknames/:course_id \
<https://canvas.instructure.com/doc/api/users.html#method.course_nicknames.update>`_
:param course: The ID of the course.
:type course: :class:`canvasapi.course.Course` or int
:param nickname: The nickname for the course.
:type nickname: str
:rtype: :class:`canvasapi.course.CourseNickname`
"""
from canvasapi.course import CourseNickname
course_id = obj_or_id(course, "course", (Course,))
response = self.__requester.request(
"PUT", "users/self/course_nicknames/{}".format(course_id), nickname=nickname
)
return CourseNickname(self.__requester, response.json())
|
the-stack_106_28492 | """empty message
Revision ID: 77b63ea5b036
Revises:
Create Date: 2020-05-20 16:09:45.241980
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '77b63ea5b036'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('id', sa.BigInteger(), nullable=False),
sa.Column('screen_name', sa.String(length=128), nullable=False),
sa.Column('name', sa.String(), nullable=True),
sa.Column('location', sa.String(), nullable=True),
sa.Column('followers_count', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('tweet',
sa.Column('id', sa.BigInteger(), nullable=False),
sa.Column('user_id', sa.BigInteger(), nullable=True),
sa.Column('full_text', sa.String(length=500), nullable=True),
sa.Column('embedding', sa.PickleType(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('tweet')
op.drop_table('user')
# ### end Alembic commands ###
|
the-stack_106_28493 | import coreschema
import six
from django.utils.translation import gettext_lazy as _
from django_filters import rest_framework as filters
from django_filters.fields import ModelMultipleChoiceField, MultipleChoiceField
from django_filters.filters import QuerySetRequestMixin
from django_filters.rest_framework import DjangoFilterBackend, MultipleChoiceFilter
class TimeStampedFilter(filters.FilterSet):
created = filters.DateFilter(help_text=_('Filter results by results created at the given time'))
created__date = filters.DateFilter(
help_text=_('Filter results by results created on the given date'),
lookup_expr='date',
field_name='created',
)
created__gte = filters.DateFilter(
help_text=_('Filter results by results created after or at the the given time'),
lookup_expr='gte',
field_name='created'
)
created__lte = filters.DateFilter(
help_text=_('Filter results by results created before or at the given time'),
lookup_expr='lte',
field_name='created'
)
created__gt = filters.DateFilter(
help_text=_('Filter results by results created after the given time'),
lookup_expr='gt',
field_name='created'
)
created__lt = filters.DateFilter(
help_text=_('Filter results by results created before the given time'),
lookup_expr='lt',
field_name='created'
)
modified = filters.DateFilter(help_text=_('Filter results by results modified at the given time'))
modified__date = filters.DateFilter(
help_text=_('Filter results by results modified on the given date'),
lookup_expr='date',
field_name='modified'
)
modified__gte = filters.DateFilter(
help_text=_('Filter results by results modified after or at the the given time'),
lookup_expr='gte',
field_name='modified'
)
modified__lte = filters.DateFilter(
help_text=_('Filter results by results modified before or at the given time'),
lookup_expr='lte',
field_name='modified'
)
modified__gt = filters.DateFilter(
help_text=_('Filter results by results modified after the given time'),
lookup_expr='gt',
field_name='modified'
)
modified__lt = filters.DateFilter(
help_text=_('Filter results by results modified before the given time'),
lookup_expr='lt',
field_name='modified'
)
class Backend(DjangoFilterBackend):
def get_coreschema_field(self, field):
description = six.text_type(field.extra.get('help_text', ''))
if isinstance(field, filters.NumberFilter):
return coreschema.Number(description=description)
elif isinstance(field, filters.MultipleChoiceFilter):
return coreschema.Array(
items=coreschema.Enum(enum=[c[0] for c in field.field.choices]),
description=description,
unique_items=True,
)
elif isinstance(field, filters.ChoiceFilter):
return coreschema.Enum(
enum=[c[0] for c in field.field.choices],
description=description
)
else:
return coreschema.String(description=description)
class CsvMultipleChoiceMixin(object):
def to_python(self, value):
if value and len(value) == 1 and ',' in value[0]:
return super(CsvMultipleChoiceMixin, self).to_python(value[0].split(','))
else:
return super(CsvMultipleChoiceMixin, self).to_python(value)
class CsvMultipleChoiceField(CsvMultipleChoiceMixin, MultipleChoiceField):
pass
class CsvModelMultipleChoiceField(CsvMultipleChoiceMixin, ModelMultipleChoiceField):
pass
class CsvMultipleChoiceFilter(MultipleChoiceFilter):
field_class = CsvMultipleChoiceField
class CsvModelMultipleChoiceFilter(QuerySetRequestMixin, CsvMultipleChoiceFilter):
field_class = CsvModelMultipleChoiceField
|
the-stack_106_28494 | ########################################################################################################################
# The following class is for different kinds of annealing optimizer
########################################################################################################################
from pickletools import optimize #nosec
import boto3
import json
import pickle #nosec
import os
import datetime
import logging
import re
from .MolGeoCalc import update_pts_distance
from .MoleculeParser import MoleculeData
s3_client = boto3.client("s3")
log = logging.getLogger()
log.setLevel('INFO')
class ResultParser():
def __init__(self, method, **param):
self.agg_response = None
self.method = method
self.set = set()
# raw_result, load from pickle file, maintain by dwave
self.raw_result = None
if self.method == "dwave-qa":
self.bucket = param["bucket"]
self.prefix = param["prefix"]
self.task_id = param["task_id"]
self._load_raw_result()
# result: get by task_id, maintain by braket api
self.result = None
# initial mol file
self.atom_pos_data = {}
self.atom_pos_data_raw = {}
self.atom_pos_data_temp = {}
self.mol_file_name = param["raw_path"]
logging.info("MoleculeData.load()")
self.mol_data = MoleculeData.load(param["data_path"])
logging.info("init mol data for final position")
self._init_mol_file(self.atom_pos_data)
logging.info("init mol data for raw position")
self._init_mol_file(self.atom_pos_data_raw)
# parse model_info
self.rb_var_map = None
self.var_rb_map = None
self.M = None
self.D = None
self.theta_option = None
self.valid_var_name = []
self._parse_model_info()
# initial parameter file
self.parameters = {}
self._init_parameters()
if self.method == "dwave-sa":
logging.info("parse simulated annealer result")
self.result = None
elif self.method == "dwave-qa":
logging.info("parse quantum annealer result")
obj = self._read_result_obj(
self.bucket, self.prefix, self.task_id, "results.json")
self.result = json.loads(obj["Body"].read())
def _init_parameters(self):
logging.info("_init_parameters")
# TODO: leave for future post process
# van_der_waals_check = 'initial'
# self.parameters["volume"] = {}
# self.parameters["volume"]["initial"], _, self.set = mol_distance_func(
# self.atom_pos_data, van_der_waals_check, self.set)
self.parameters["volume"] = {}
self.parameters["volume"]["optimize"] = 0
self.parameters["volume"]["initial"] = 0
def _init_mol_file(self, pos_data):
for pt, info in self.mol_data.atom_data.items():
pos_data[pt] = {}
pos_data[pt]['pts'] = [info['x'], info['y'], info['z']]
pos_data[pt]['idx'] = ([0, 0, 0], [0, 0, 0])
pos_data[pt]['vdw-radius'] = info['vdw-radius']
def _init_temp_mol_file(self):
logging.info("_init_mol_file")
for pt, info in self.mol_data.atom_data.items():
self.atom_pos_data_temp[pt] = {}
self.atom_pos_data_temp[pt]['pts'] = [
info['x'], info['y'], info['z']]
self.atom_pos_data_temp[pt]['idx'] = ([0, 0, 0], [0, 0, 0])
self.atom_pos_data_temp[pt]['vdw-radius'] = info['vdw-radius']
def _read_result_obj(self, bucket, prefix, task_id, file_name):
logging.info("_read_result_obj")
key = f"{prefix}/{task_id}/{file_name}"
logging.info(f"_read_result_obj: {key}")
obj = s3_client.get_object(Bucket=bucket, Key=key)
return obj
def _load_raw_result(self):
logging.info("_load_raw_result")
if self.method == "dwave-sa":
logging.info("load simulated annealer raw result")
full_path = "./sa_result.pickle"
with open(full_path, "rb") as f:
self.raw_result = pickle.load(f) #nosec
elif self.method == "dwave-qa":
logging.info("load quantum annealer raw result")
obj = self._read_result_obj(
self.bucket, self.prefix, self.task_id, "qa_result.pickle") #nosec
self.raw_result = pickle.loads(obj["Body"].read()) #nosec
def get_all_result(self):
return self.raw_result, self.result
def get_time(self):
if self.method == "dwave-qa":
local_time = self.raw_result["time"]
# task_time
date_time_str = self.result["taskMetadata"]["createdAt"]
start = datetime.datetime.strptime(
date_time_str, "%Y-%m-%dT%H:%M:%S.%fZ")
date_time_str = self.result["taskMetadata"]["endedAt"]
end = datetime.datetime.strptime(
date_time_str, "%Y-%m-%dT%H:%M:%S.%fZ")
task_time = (end-start).total_seconds()
# reference https://docs.dwavesys.com/docs/latest/c_qpu_timing.html
# qa_total_time = qpu_program_time + sampling_time + qpu_access_overhead_time + total_post_processing_time
qpu_programming_overtime = self.result["additionalMetadata"][
"dwaveMetadata"]["timing"]["qpuProgrammingTime"]
qpu_sampling_time = self.result["additionalMetadata"]["dwaveMetadata"]["timing"]["qpuSamplingTime"]
qpu_access_overhead_time = self.result["additionalMetadata"][
"dwaveMetadata"]["timing"]["qpuAccessOverheadTime"]
total_post_processing_time = self.result["additionalMetadata"][
"dwaveMetadata"]["timing"]["totalPostProcessingTime"]
qa_total_time = qpu_programming_overtime + qpu_sampling_time + \
qpu_access_overhead_time + total_post_processing_time
qa_total_time = qa_total_time/1000.0
qa_total_time = qa_total_time/1000.0
qa_access_time = self.result["additionalMetadata"]["dwaveMetadata"]["timing"]["qpuAccessTime"]/1000.0
qa_access_time = qa_access_time/1000.0
return local_time, task_time, qa_total_time, qa_access_time
else:
local_time = self.raw_result["time"]
logging.info("sa only has local_time!")
return local_time, None, None, None
def _parse_model_info(self):
logging.info("_parse_model_info")
# logging.info("_parse_model_info() model_info = {}".format(self.raw_result["model_info"]))
self.rb_var_map = self.raw_result["model_info"]["rb_var_map"]
self.var_rb_map = self.raw_result["model_info"]["var_rb_map"]
# parse D from model_name
model_name = self.raw_result["model_info"]["model_name"]
self.M = int(model_name.split("_")[0])
self.D = int(model_name.split("_")[1])
self.theta_option = [x * 360/self.D for x in range(self.D)]
for rb in self.raw_result["model_info"]["rb_name"]:
var = self.rb_var_map[rb]
for d in range(self.D):
self.valid_var_name.append(f'x_{var}_{d+1}')
# logging.info(f"valid var for this model is {self.valid_var_name}")
return 0
def generate_optimize_pts(self):
logging.info("generate_optimize_pts()")
# get best configuration
pddf_sample_result = self.raw_result["response"].aggregate(
).to_pandas_dataframe()
pddf_best_result = pddf_sample_result.iloc[pddf_sample_result['energy'].idxmin(
), :]
logging.debug("generate_optimize_pts model_info={}".format(
self.raw_result["model_info"]))
best_config = pddf_best_result.filter(items=self.valid_var_name)
chosen_var = set(best_config[best_config == 1].index.tolist())
initial_var = set()
# change chose var to dict
var_dict = {}
for valid_var in chosen_var:
var_name = valid_var.split("_")[1]
var_angle = valid_var.split("_")[2]
var_dict[var_name] = var_angle
initial_name = f"X_{var_name}_1"
initial_var.add(initial_name)
logging.debug(f"var_dict is {var_dict}")
# calculate optimized position
f_distances_raw = {}
f_distances_optimize = {}
missing_var = set()
for ris in self.mol_data.bond_graph.sort_ris_data[str(self.M)].keys():
# ris: '30+31', '29+30', '30+31,29+30'
# atom_pos_data_temp = self.atom_pos_data_raw.copy()
self._init_mol_file(self.atom_pos_data_temp)
logging.debug(f"ris group {ris} ")
torsion_group = ris.split(",")
# update points
rb_set = self.mol_data.bond_graph.sort_ris_data[str(
self.M)][ris]
tor_list = []
for rb_name in torsion_group:
var_name = self.rb_var_map[rb_name]
var_angle = 1
if var_name in var_dict.keys():
var_angle = var_dict[var_name]
else:
logging.info(f"missing result for {var_name}")
missing_var.add(f"X_{var_name}_1")
initial_var.add(f"X_{var_name}_1")
chosen_var.add(f"X_{var_name}_1")
tor_list.append(f'X_{var_name}_{var_angle}')
logging.debug(f"theta_option {self.theta_option}")
logging.debug(f"rb_set {rb_set}")
optimize_distance = update_pts_distance(
self.atom_pos_data_temp, rb_set, tor_list, self.var_rb_map, self.theta_option, True, True)
raw_distance = update_pts_distance(
self.atom_pos_data_raw, rb_set, None, None, None, False, True)
update_pts_distance(self.atom_pos_data, rb_set, tor_list,
self.var_rb_map, self.theta_option, True, False)
f_distances_optimize[tuple(ris)] = optimize_distance
f_distances_raw[tuple(ris)] = raw_distance
self.parameters["volume"]["optimize"] = self.parameters["volume"]["optimize"] + \
optimize_distance
self.parameters["volume"]["initial"] = self.parameters["volume"]["initial"] + raw_distance
logging.debug(f"finish update optimize points for {chosen_var}")
logging.debug(f_distances_optimize)
logging.debug(f_distances_raw)
# update mol distance metrics
# van_der_waals_check = 'test'
# self.parameters["volume"]["optimize"], _, _ = mol_distance_func(
# self.atom_pos_data, van_der_waals_check, self.set)
# update relative improvement
optimize_gain = self.parameters["volume"]["optimize"] / \
self.parameters["volume"]["initial"]
optimize_state = False
if optimize_gain < 1.0:
logging.info("Fail to find optimized shape, return to original one")
self.parameters["volume"]["optimize"] = self.parameters["volume"]["initial"]
self.parameters["volume"]["gain"] = 1.0
self.parameters["volume"]["unfolding_results"] = list(initial_var)
optimize_state = False
else:
self.parameters["volume"]["gain"] = optimize_gain
# update optimized results
self.parameters["volume"]["unfolding_results"] = list(chosen_var)
optimize_state = True
self.parameters["volume"]["optimize_info"] = {}
self.parameters["volume"]["optimize_info"]["missing_var"] = list(missing_var)
self.parameters["volume"]["optimize_info"]["optimize_state"] = optimize_state
return 0
def save_mol_file(self, save_name):
logging.info(f"save_mol_file {save_name}")
raw_f = open(self.mol_file_name, "r")
lines = raw_f.readlines()
start_parse = 0
def _update_atom_pos(line, atom_pos_data):
atom_idx_name = re.findall(r"\d+ [A-Z]\d+", line)[0]
logging.debug("atom id name is {}".format(atom_idx_name))
atom_idx = atom_idx_name.split(' ')[0]
regrex = re.compile(
r"[-+]?\d+\.\d+ +[-+]?\d+\.\d+ +[-+]?\d+\.\d+", re.IGNORECASE)
update_pos_x = atom_pos_data[atom_idx]['pts'][0]
update_pos_y = atom_pos_data[atom_idx]['pts'][1]
update_pos_z = atom_pos_data[atom_idx]['pts'][2]
update_pos = "{} {} {}".format(
update_pos_x, update_pos_y, update_pos_z)
update_line = regrex.sub(update_pos, line)
return update_line
mol_save_name = f"{self.mol_file_name.split('mol2')[0][:-1]}_{self.method}_{save_name}.mol2"
file_save_name = f"{self.mol_file_name.split('mol2')[0][:-1]}_{self.method}_{save_name}.json"
update_f = open(mol_save_name, 'w')
for line in lines:
logging.debug(line)
if line.startswith("@<TRIPOS>BOND"):
logging.debug("finish atom part")
start_parse = 0
if start_parse == 1:
update_line = _update_atom_pos(line, self.atom_pos_data)
update_f.write(update_line)
else:
update_f.write(line)
if line.startswith("@<TRIPOS>ATOM"):
logging.debug("found atom start position")
start_parse = 1
raw_f.close()
update_f.close()
# update_parameters
with open(file_save_name, "w") as outfile:
json.dump(self.parameters, outfile)
logging.info(f"finish save {mol_save_name} and {file_save_name}")
return [mol_save_name, file_save_name]
|
the-stack_106_28495 | from logging import log
from typing import Dict as _Dict
from typing import List as _List
from xml.etree.ElementTree import ParseError as _XmlParseError
import asyncio as _asyncio
from discord import TextChannel as _TextChannel
import discord.ext.tasks as _tasks
from discord.ext.commands import Bot as _Bot
from discord.ext.commands import Cog as _Cog
from discord.ext.commands import Context as _Context
from discord.ext.commands import is_owner as _is_owner
from discord.ext.commands import guild_only as _guild_only
from discord.ext.commands import group as _command_group
from .. import bot_settings as _bot_settings
from .. import utils as _utils
from ..utils.discord import escape_markdown_and_mentions as _escape_markdown_and_mentions
from ..converters import PssChatLoggerConverter as _PssChatLoggerConverter
from ..model import orm as _orm
from ..model.chat_log import PssChatLogger as _PssChatLogger
from ..pssapi import message_service as _message_service
from ..pssapi import device_login as _login
from ..pssapi.errors import PssApiError as _PssApiError
from ..pssapi.errors import ServerMaintenanceError as _ServerMaintenanceError
# ---------- Constants ----------
# ---------- Cog ----------
class ChatLogger(_Cog):
__CHAT_LOG_INTERVAL: float = 60.0
def __init__(self, bot: _Bot) -> None:
if not bot:
raise ValueError('Parameter \'bot\' must not be None.')
self.__bot = bot
self.log_chat.start()
@property
def bot(self) -> _Bot:
return self.__bot
def cog_unload(self):
self.log_chat.cancel()
@_tasks.loop(seconds=__CHAT_LOG_INTERVAL)
async def log_chat(self):
utc_now = _utils.datetime.get_utc_now()
with _orm.create_session() as session:
pss_chat_loggers = _orm.get_all(_PssChatLogger, session)
if not pss_chat_loggers:
return
try:
access_token = await _login()
except (_PssApiError, _XmlParseError) as e:
print(e)
return
channel_keys: _Dict[str, _List[_PssChatLogger]] = {}
for pss_chat_logger in pss_chat_loggers:
channel_keys.setdefault(pss_chat_logger.pss_channel_key, []).append(pss_chat_logger)
channel_key_count = len(channel_keys.keys())
remaining_time = ChatLogger.__CHAT_LOG_INTERVAL - (_utils.datetime.get_utc_now() - utc_now).total_seconds()
delay = remaining_time / channel_key_count * (1 - channel_key_count * .01)
for channel_key, pss_chat_loggers in channel_keys.items():
try:
messages = await _message_service.list_messages_for_channel_key(channel_key, access_token)
except _ServerMaintenanceError:
print(f'Server is under maintenance.')
return
except (_PssApiError, _XmlParseError) as e:
print(f'Could not get messages for channel key \'{channel_key}\': {e}')
messages = None
if messages:
messages = sorted(messages, key=lambda x: x.message_id)
for pss_chat_logger in pss_chat_loggers:
channel: _TextChannel = await self.bot.fetch_channel(pss_chat_logger.channel_id)
if channel:
messages = [message for message in messages if message.message_id > pss_chat_logger.last_pss_message_id]
lines = []
for message in messages:
user_name_and_fleet = f'**{_escape_markdown_and_mentions(message.user_name)}'
if message.fleet_name:
user_name_and_fleet += f'** ({_escape_markdown_and_mentions(message.fleet_name)})**'
lines.append(f'{user_name_and_fleet}:** {_escape_markdown_and_mentions(message.message)}')
if lines:
try:
await _utils.discord.send_lines_to_channel(channel, lines)
except:
continue
pss_chat_logger.last_pss_message_id = max(message.message_id for message in messages)
with _orm.create_session() as session:
pss_chat_logger = _orm.merge(session, pss_chat_logger)
pss_chat_logger.save(session)
if channel_key_count > 1:
await _asyncio.sleep(delay)
@_guild_only()
@_command_group(name='chatlog', brief='Configure Chat Logging', invoke_without_command=True)
async def base(self, ctx: _Context) -> None:
"""
Configure chat logging on this server. Check out the sub commands.
"""
if ctx.invoked_subcommand is None:
_utils.assert_.authorized_channel_or_server_manager(ctx, _bot_settings.AUTHORIZED_CHANNEL_IDS)
await ctx.send_help('chatlog')
@_guild_only()
@base.command(name='add', brief='Add chat logger')
async def add(self, ctx: _Context, channel_key: str, channel: _TextChannel, *, name: str) -> None:
"""
Add a chat logger to this server.
Usage:
vivi chatlog add [channel_key] [channel] [name]
Parameters:
channel_key: Mandatory. The channel key of the PSS chat channel to be logged.
channel: Mandatory. The channel the PSS chat shall be logged to.
name: Mandatory. A name for this logger to recognize it.
Examples:
vivi chatlog add public #log Public Chat - Adds a chat logger for the public chat that will post to the channel #log
"""
_utils.assert_.authorized_channel_or_server_manager(ctx, _bot_settings.AUTHORIZED_CHANNEL_IDS)
log_channel = _PssChatLogger.make(ctx.guild.id, channel.id, channel_key, name)
with _orm.create_session() as session:
log_channel.create(session)
pss_chat_loggers = _orm.get_all(_PssChatLogger, session)
if len(pss_chat_loggers) == 1:
self.log_chat.start()
await _utils.discord.reply(ctx, f'Posting messages from channel \'{channel_key}\' to {channel.mention}.')
@_guild_only()
@base.command(name='edit', brief='Edit chat logger')
async def edit(self, ctx: _Context, logger_id: int) -> None:
"""
Edit a chat logger. An assistant will guide you.
Usage:
vivi chatlog edit [logger_id]
Parameter:
logger_id: Mandatory. The chat logger to be edited.
Examples:
vivi chatlog edit 1 - Edits the chat logger with ID '1' on this server.
"""
_utils.assert_.authorized_channel_or_server_manager(ctx, _bot_settings.AUTHORIZED_CHANNEL_IDS)
with _orm.create_session() as session:
pss_chat_logger = _orm.get_first_filtered_by(
_PssChatLogger,
session,
id=logger_id,
guild_id=ctx.guild.id,
)
if not pss_chat_logger:
raise Exception(f'A chat log with the ID {logger_id} does not exist on this server.')
converter = _PssChatLoggerConverter(pss_chat_logger)
await _utils.discord.reply_lines(ctx, (await converter.to_text()))
prompt_text = f'Please enter a new [channel_key]'
new_channel_key, aborted, skipped_new_channel_key = await _utils.discord.inquire_for_text(ctx, prompt_text, abort_text='Aborted', skip_text='Skipped.')
if aborted:
await _utils.discord.reply(ctx, f'The request has been cancelled.')
return
prompt_text = f'Please enter a new [channel]'
new_channel, aborted, skipped_new_channel = await _utils.discord.inquire_for_text_channel(ctx, prompt_text, abort_text='Aborted', skip_text='Skipped.')
if aborted:
await _utils.discord.reply(ctx, f'The request has been cancelled.')
return
prompt_text = f'Please enter a new [name]'
new_name, aborted, skipped_new_name = await _utils.discord.inquire_for_text(ctx, prompt_text, abort_text='Aborted', skip_text='Skipped.')
if aborted:
await _utils.discord.reply(ctx, f'The request has been cancelled.')
return
edited = False
if new_channel_key and not skipped_new_channel_key:
pss_chat_logger.pss_channel_key = new_channel_key
edited = True
if new_channel and not skipped_new_channel:
pss_chat_logger.channel_id = new_channel.id
edited = True
if new_name and not skipped_new_name:
pss_chat_logger.name = new_name
edited = True
if edited:
with _orm.create_session() as session:
pss_chat_logger = _orm.merge(session, pss_chat_logger)
pss_chat_logger.save(session)
lines = [f'The chat logger has been edited.']
lines.extend((await converter.to_text()))
await _utils.discord.reply_lines(ctx, lines)
else:
await _utils.discord.reply(ctx, f'The chat logger {pss_chat_logger} has not been edited.')
@_guild_only()
@base.group(name='list', brief='List chat loggers for this server', invoke_without_command=True)
async def list(self, ctx: _Context) -> None:
"""
Lists all chat logger configured on this server.
Usage:
vivi chatlog list
"""
if ctx.invoked_subcommand is None:
_utils.assert_.authorized_channel_or_server_manager(ctx, _bot_settings.AUTHORIZED_CHANNEL_IDS)
with _orm.create_session() as session:
pss_chat_loggers = _orm.get_all_filtered_by(_PssChatLogger, session, guild_id=ctx.guild.id)
lines = ['__Listing chat loggers for this Discord server__']
for pss_chat_logger in pss_chat_loggers:
converter = _PssChatLoggerConverter(pss_chat_logger)
definition_lines = await converter.to_text()
lines.extend(definition_lines)
lines.append('_ _')
if len(lines) > 1:
lines = lines[:-1]
else:
lines.append('There are no chat loggers configured for this server.')
await _utils.discord.reply_lines(ctx, lines)
@_is_owner()
@_guild_only()
@list.command(name='all', brief='List all chat loggers', hidden=True)
async def list_all(self, ctx: _Context) -> None:
"""
Lists all chat logger configured on any server.
Usage:
vivi chatlog list all
"""
_utils.assert_.authorized_channel_or_server_manager(ctx, _bot_settings.AUTHORIZED_CHANNEL_IDS)
with _orm.create_session() as session:
pss_chat_loggers = _orm.get_all(_PssChatLogger, session)
lines = ['__Listing all chat loggers__']
for pss_chat_logger in pss_chat_loggers:
converter = _PssChatLoggerConverter(pss_chat_logger)
definition_lines = await converter.to_text(True, self.bot)
lines.extend(definition_lines)
lines.append('_ _')
if len(lines) > 1:
lines = lines[:-1]
else:
lines.append('There are no chat loggers configured.')
await _utils.discord.reply_lines(ctx, lines)
@_guild_only()
@base.command(name='delete', brief='Delete chat logger', aliases=['remove'])
async def delete(self, ctx: _Context, logger_id: int) -> None:
"""
Removes a chat logger.
Usage:
vivi chatlog delete [logger_id]
Parameters:
logger_id: Mandatory. The ID of the chat logger to be deleted.
Examples:
vivi chatlog delete 1 - Removes the chat logger with the ID '1'.
"""
_utils.assert_.authorized_channel_or_server_manager(ctx, _bot_settings.AUTHORIZED_CHANNEL_IDS)
with _orm.create_session() as session:
pss_chat_logger: _PssChatLogger = _orm.get_first_filtered_by(
_PssChatLogger,
session,
id=logger_id,
guild_id=ctx.guild.id
)
if not pss_chat_logger:
raise Exception(f'A chat log with the ID {logger_id} does not exist on this server.')
converter = _PssChatLoggerConverter(pss_chat_logger)
await _utils.discord.reply_lines(ctx, (await converter.to_text()))
prompt_text = f'Do you really want to delete the chat log listed above?'
delete_log, aborted, _ = await _utils.discord.inquire_for_true_false(ctx, prompt_text)
if aborted:
await _utils.discord.reply(ctx, f'The request has been cancelled.')
elif delete_log:
with _orm.create_session() as session:
pss_chat_logger = _orm.get_by_id(_PssChatLogger, session, logger_id)
session.delete(pss_chat_logger)
session.commit()
await _utils.discord.reply(ctx, f'The chat log has been deleted.')
else:
await _utils.discord.reply(ctx, f'The chat log has not been deleted.')
def setup(bot: _Bot):
bot.add_cog(ChatLogger(bot)) |
the-stack_106_28497 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2021 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Module containing weather symbol implementation."""
import copy
import operator
from typing import Any, Dict, List, Optional, Tuple, Union
import iris
import numpy as np
from iris import Constraint
from iris.coords import AuxCoord
from iris.cube import Cube, CubeList
from numpy import ndarray
from improver import BasePlugin
from improver.metadata.amend import update_model_id_attr_attribute
from improver.metadata.probabilistic import (
find_threshold_coordinate,
get_threshold_coord_name_from_probability_name,
probability_is_above_or_below,
)
from improver.metadata.utilities import (
create_new_diagnostic_cube,
generate_mandatory_attributes,
)
from improver.wxcode.utilities import (
expand_nested_lists,
get_parameter_names,
is_variable,
update_daynight,
weather_code_attributes,
)
from improver.wxcode.wxcode_decision_tree import START_NODE, wxcode_decision_tree
from improver.wxcode.wxcode_decision_tree_global import (
START_NODE_GLOBAL,
wxcode_decision_tree_global,
)
def _define_invertible_conditions() -> Dict[str, str]:
"""Returns a dictionary of boolean comparator strings where the value is the
logical inverse of the key."""
invertible_conditions = {
">=": "<",
">": "<=",
"OR": "AND",
"": "",
}
# Add reverse {value: key} entries to invertible_conditions
reverse_inversions = {}
for k, v in invertible_conditions.items():
reverse_inversions[v] = k
invertible_conditions.update(reverse_inversions)
return invertible_conditions
INVERTIBLE_CONDITIONS = _define_invertible_conditions()
class WeatherSymbols(BasePlugin):
"""
Definition and implementation of a weather symbol decision tree. This
plugin uses a variety of diagnostic inputs and the decision tree logic
to determine the most representative weather symbol for each site
defined in the input cubes.
"""
def __init__(
self, wxtree: str = "high_resolution", model_id_attr: Optional[str] = None
) -> None:
"""
Define a decision tree for determining weather symbols based upon
the input diagnostics. Use this decision tree to allocate a weather
symbol to each point.
Args:
wxtree:
Used to choose weather symbol decision tree.
Default is "high_resolution"
"global" will load the global weather symbol decision tree.
model_id_attr:
Name of attribute recording source models that should be
inherited by the output cube. The source models are expected as
a space-separated string.
float_tolerance defines the tolerance when matching thresholds to allow
for the difficulty of float comparisons.
float_abs_tolerance defines the tolerance for when the threshold
is zero. It has to be sufficiently small that a valid rainfall rate
or snowfall rate could not trigger it.
"""
def make_thresholds_with_units(items):
if isinstance(items, list):
return [make_thresholds_with_units(item) for item in items]
values, units = items
return iris.coords.AuxCoord(values, units=units)
self.wxtree = wxtree
self.model_id_attr = model_id_attr
if wxtree == "global":
self.queries = wxcode_decision_tree_global()
self.start_node = START_NODE_GLOBAL
else:
self.queries = wxcode_decision_tree()
self.start_node = START_NODE
for query in self.queries.values():
query["diagnostic_thresholds"] = make_thresholds_with_units(
query["diagnostic_thresholds"]
)
self.float_tolerance = 0.01
self.float_abs_tolerance = 1e-12
# flag to indicate whether to expect "threshold" as a coordinate name
# (defaults to False, checked on reading input cubes)
self.coord_named_threshold = False
def __repr__(self) -> str:
"""Represent the configured plugin instance as a string."""
return "<WeatherSymbols tree={} start_node={}>".format(
self.wxtree, self.start_node
)
def check_input_cubes(self, cubes: CubeList) -> Optional[Dict[str, Any]]:
"""
Check that the input cubes contain all the diagnostics and thresholds
required by the decision tree. Sets self.coord_named_threshold to
"True" if threshold-type coordinates have the name "threshold" (as
opposed to the standard name of the diagnostic), for backward
compatibility.
Args:
cubes:
A CubeList containing the input diagnostic cubes.
Returns:
A dictionary of (keyword) nodes names where the diagnostic
data is missing and (values) node associated with
diagnostic_missing_action.
Raises:
IOError:
Raises an IOError if any of the required input data is missing.
The error includes details of which fields are missing.
"""
optional_node_data_missing = {}
missing_data = []
for key, query in self.queries.items():
diagnostics = get_parameter_names(
expand_nested_lists(query, "diagnostic_fields")
)
thresholds = expand_nested_lists(query, "diagnostic_thresholds")
conditions = expand_nested_lists(query, "diagnostic_conditions")
for diagnostic, threshold, condition in zip(
diagnostics, thresholds, conditions
):
# First we check the diagnostic name and units, performing
# a conversion is required and possible.
test_condition = iris.Constraint(name=diagnostic)
matched_cube = cubes.extract(test_condition)
if not matched_cube:
if "diagnostic_missing_action" in query:
optional_node_data_missing.update(
{key: query[query["diagnostic_missing_action"]]}
)
else:
missing_data.append([diagnostic, threshold, condition])
continue
cube_threshold_units = find_threshold_coordinate(matched_cube[0]).units
threshold.convert_units(cube_threshold_units)
# Then we check if the required threshold is present in the
# cube, and that the thresholding is relative to it correctly.
threshold = threshold.points.item()
threshold_name = find_threshold_coordinate(matched_cube[0]).name()
# Set flag to check for old threshold coordinate names
if threshold_name == "threshold" and not self.coord_named_threshold:
self.coord_named_threshold = True
# Check threshold == 0.0
if abs(threshold) < self.float_abs_tolerance:
coord_constraint = {
threshold_name: lambda cell: np.isclose(
cell.point, 0, rtol=0, atol=self.float_abs_tolerance
)
}
else:
coord_constraint = {
threshold_name: lambda cell: np.isclose(
cell.point, threshold, rtol=self.float_tolerance, atol=0
)
}
# Checks whether the spp__relative_to_threshold attribute is above
# or below a threshold and and compares to the diagnostic_condition.
test_condition = iris.Constraint(
coord_values=coord_constraint,
cube_func=lambda cube: (
probability_is_above_or_below(cube) == condition
),
)
matched_threshold = matched_cube.extract(test_condition)
if not matched_threshold:
missing_data.append([diagnostic, threshold, condition])
if missing_data:
msg = (
"Weather Symbols input cubes are missing"
" the following required"
" input fields:\n"
)
dyn_msg = "name: {}, threshold: {}, " "spp__relative_to_threshold: {}\n"
for item in missing_data:
msg = msg + dyn_msg.format(*item)
raise IOError(msg)
if not optional_node_data_missing:
optional_node_data_missing = None
return optional_node_data_missing
@staticmethod
def _invert_comparator(comparator: str) -> str:
"""Inverts a single comparator string."""
try:
return INVERTIBLE_CONDITIONS[comparator]
except KeyError:
raise KeyError(f"Unexpected condition {comparator}, cannot invert it.")
def invert_condition(self, condition: Dict) -> Tuple[str, str]:
"""
Invert a comparison condition to allow positive identification of conditions
satisfying the negative ('fail') case.
Args:
condition:
A single query from the decision tree.
Returns:
- A string representing the inverted comparison.
- A string representing the inverted combination
"""
inverted_threshold = self._invert_comparator(condition["threshold_condition"])
inverted_combination = self._invert_comparator(
condition["condition_combination"]
)
return inverted_threshold, inverted_combination
def create_condition_chain(self, test_conditions: Dict) -> List:
"""
Construct a list of all the conditions specified in a single query.
Args:
test_conditions:
A query from the decision tree.
Returns:
A valid condition chain is defined recursively:
(1) If each a_1, ..., a_n is an extract expression (i.e. a
constraint, or a list of constraints,
operator strings and floats), and b is either "AND", "OR" or "",
then [[a1, ..., an], b] is a valid condition chain.
(2) If a1, ..., an are each valid conditions chain, and b is
either "AND" or "OR", then [[a1, ..., an], b] is a valid
condition chain.
"""
conditions = []
loop = 0
for diagnostic, p_threshold, d_threshold in zip(
test_conditions["diagnostic_fields"],
test_conditions["probability_thresholds"],
test_conditions["diagnostic_thresholds"],
):
loop += 1
if isinstance(diagnostic, list):
# We have a list which could contain variable names, operators and
# numbers. The variable names need converting into Iris Constraint
# syntax while operators and numbers remain unchanged.
# We expect an entry in p_threshold for each variable name, so
# d_threshold_index is used to track these.
d_threshold_index = -1
extract_constraint = []
for item in diagnostic:
if is_variable(item):
# Add a constraint from the variable name and threshold value
d_threshold_index += 1
extract_constraint.append(
self.construct_extract_constraint(
item,
d_threshold[d_threshold_index],
self.coord_named_threshold,
)
)
else:
# Add this operator or variable as-is
extract_constraint.append(item)
else:
# Non-lists are assumed to be constraints on a single variable.
extract_constraint = self.construct_extract_constraint(
diagnostic, d_threshold, self.coord_named_threshold
)
conditions.append(
[
extract_constraint,
test_conditions["threshold_condition"],
p_threshold,
]
)
condition_chain = [conditions, test_conditions["condition_combination"]]
return condition_chain
def construct_extract_constraint(
self, diagnostic: str, threshold: AuxCoord, coord_named_threshold: bool
) -> Constraint:
"""
Construct an iris constraint.
Args:
diagnostic:
The name of the diagnostic to be extracted from the CubeList.
threshold:
The thresholds within the given diagnostic cube that is
needed, including units. Note these are NOT coords from the
original cubes, just constructs to associate units with values.
coord_named_threshold:
If true, use old naming convention for threshold coordinates
(coord.long_name=threshold). Otherwise extract threshold
coordinate name from diagnostic name
Returns:
A constraint
"""
if coord_named_threshold:
threshold_coord_name = "threshold"
else:
threshold_coord_name = get_threshold_coord_name_from_probability_name(
diagnostic
)
threshold_val = threshold.points.item()
if abs(threshold_val) < self.float_abs_tolerance:
cell_constraint = lambda cell: np.isclose(
cell.point, threshold_val, rtol=0, atol=self.float_abs_tolerance,
)
else:
cell_constraint = lambda cell: np.isclose(
cell.point, threshold_val, rtol=self.float_tolerance, atol=0,
)
kw_dict = {"{}".format(threshold_coord_name): cell_constraint}
constraint = iris.Constraint(name=diagnostic, **kw_dict)
return constraint
@staticmethod
def find_all_routes(
graph: Dict,
start: str,
end: int,
omit_nodes: Optional[Dict] = None,
route: Optional[List[str]] = None,
) -> List[str]:
"""
Function to trace all routes through the decision tree.
Args:
graph:
A dictionary that describes each node in the tree,
e.g. {<node_name>: [<succeed_name>, <fail_name>]}
start:
The node name of the tree root (currently always
heavy_precipitation).
end:
The weather symbol code to which we are tracing all routes.
omit_nodes:
A dictionary of (keyword) nodes names where the diagnostic
data is missing and (values) node associated with
diagnostic_missing_action.
route:
A list of node names found so far.
Returns:
A list of node names that defines the route from the tree root
to the weather symbol leaf (end of chain).
References:
Method based upon Python Patterns - Implementing Graphs essay
https://www.python.org/doc/essays/graphs/
"""
if route is None:
route = []
if omit_nodes:
start_not_valid = True
while start_not_valid:
if start in omit_nodes:
start = omit_nodes[start]
else:
start_not_valid = False
route = route + [start]
if start == end:
return [route]
if start not in graph.keys():
return []
routes = []
for node in graph[start]:
if node not in route:
newroutes = WeatherSymbols.find_all_routes(
graph, node, end, omit_nodes=omit_nodes, route=route
)
routes.extend(newroutes)
return routes
def create_symbol_cube(self, cubes: Union[List[Cube], CubeList]) -> Cube:
"""
Create an empty weather symbol cube
Args:
cubes:
List of input cubes used to generate weather symbols
Returns:
A cube with suitable metadata to describe the weather symbols
that will fill it and data initiated with the value -1 to allow
any unset points to be readily identified.
"""
threshold_coord = find_threshold_coordinate(cubes[0])
template_cube = next(cubes[0].slices_over([threshold_coord])).copy()
# remove coordinates and bounds that do not apply to weather symbols
template_cube.remove_coord(threshold_coord)
for coord in template_cube.coords():
if coord.name() in ["forecast_period", "time"]:
coord.bounds = None
mandatory_attributes = generate_mandatory_attributes(cubes)
optional_attributes = weather_code_attributes()
if self.model_id_attr:
optional_attributes.update(
update_model_id_attr_attribute(cubes, self.model_id_attr)
)
symbols = create_new_diagnostic_cube(
"weather_code",
"1",
template_cube,
mandatory_attributes,
optional_attributes=optional_attributes,
data=np.ma.masked_all_like(template_cube.data).astype(np.int32),
)
return symbols
@staticmethod
def compare_array_to_threshold(
arr: ndarray, comparator: str, threshold: float
) -> ndarray:
"""Compare two arrays element-wise and return a boolean array.
Args:
arr
comparator:
One of '<', '>', '<=', '>='.
threshold
Returns:
Array of booleans.
Raises:
ValueError: If comparator is not one of '<', '>', '<=', '>='.
"""
if comparator == "<":
return arr < threshold
elif comparator == ">":
return arr > threshold
elif comparator == "<=":
return arr <= threshold
elif comparator == ">=":
return arr >= threshold
else:
raise ValueError(
"Invalid comparator: {}. ".format(comparator),
"Comparator must be one of '<', '>', '<=', '>='.",
)
def evaluate_extract_expression(
self, cubes: CubeList, expression: Union[Constraint, List]
) -> ndarray:
"""Evaluate a single condition.
Args:
cubes:
A cubelist containing the diagnostics required for the
weather symbols decision tree, these at co-incident times.
expression:
Defined recursively:
A list consisting of an iris.Constraint or a list of
iris.Constraint, strings (representing operators) and floats
is a valid expression.
A list consisting of valid expressions, strings (representing
operators) and floats is a valid expression.
Returns:
An array or masked array of booleans
"""
operator_map = {
"+": operator.add,
"-": operator.sub,
"*": operator.mul,
"/": operator.truediv,
}
if isinstance(expression, iris.Constraint):
return cubes.extract(expression)[0].data
else:
curr_expression = copy.deepcopy(expression)
# evaluate sub-expressions first
for idx, item in enumerate(expression):
if isinstance(item, list):
curr_expression = (
curr_expression[:idx]
+ [self.evaluate_extract_expression(cubes, item)]
+ curr_expression[idx + 1 :]
)
# evaluate operators in order of precedence
for op_str in ["/", "*", "+", "-"]:
while len(curr_expression) > 1:
for idx, item in enumerate(curr_expression):
if isinstance(item, str) and (item == op_str):
left_arg = curr_expression[idx - 1]
right_arg = curr_expression[idx + 1]
if isinstance(left_arg, iris.Constraint):
left_eval = cubes.extract(left_arg)[0].data
else:
left_eval = left_arg
if isinstance(right_arg, iris.Constraint):
right_eval = cubes.extract(right_arg)[0].data
else:
right_eval = right_arg
op = operator_map[op_str]
res = op(left_eval, right_eval)
curr_expression = (
curr_expression[: idx - 1]
+ [res]
+ curr_expression[idx + 2 :]
)
break
else:
break
if isinstance(curr_expression[0], iris.Constraint):
res = cubes.extract(curr_expression[0])[0].data
return res
def evaluate_condition_chain(
self, cubes: CubeList, condition_chain: List
) -> ndarray:
"""Recursively evaluate the list of conditions.
We can safely use recursion here since the depth will be small.
Args:
cubes:
A cubelist containing the diagnostics required for the
weather symbols decision tree, these at co-incident times.
condition_chain:
A valid condition chain is defined recursively:
(1) If each a_1, ..., a_n is an extract expression (i.e. a
constraint, or a list of constraints,
operator strings and floats), and b is either "AND", "OR" or "",
then [[a1, ..., an], b] is a valid condition chain.
(2) If a1, ..., an are each valid conditions chain, and b is
either "AND" or "OR", then [[a1, ..., an], b] is a valid
condition chain.
Returns:
An array of masked array of booleans
"""
def is_chain(item):
return (
isinstance(item, list)
and isinstance(item[1], str)
and (item[1] in ["AND", "OR", ""])
)
items_list, comb = condition_chain
item = items_list[0]
if is_chain(item):
res = self.evaluate_condition_chain(cubes, item)
else:
condition, comparator, threshold = item
res = self.compare_array_to_threshold(
self.evaluate_extract_expression(cubes, condition),
comparator,
threshold,
)
for item in items_list[1:]:
if is_chain(item):
new_res = self.evaluate_condition_chain(cubes, item)
else:
condition, comparator, threshold = item
new_res = self.compare_array_to_threshold(
self.evaluate_extract_expression(cubes, condition),
comparator,
threshold,
)
# If comb is "", then items_list has length 1, so here we can
# assume comb is either "AND" or "OR"
if comb == "AND":
res = res & new_res
elif comb == "OR":
res = res | new_res
else:
msg = (
"Invalid condition chain found. First element has length > 1 ",
"but second element is not 'AND' or 'OR'.",
)
raise RuntimeError(msg)
return res
def process(self, cubes: CubeList) -> Cube:
"""Apply the decision tree to the input cubes to produce weather
symbol output.
Args:
cubes:
A cubelist containing the diagnostics required for the
weather symbols decision tree, these at co-incident times.
Returns:
A cube of weather symbols.
"""
# Check input cubes contain required data
optional_node_data_missing = self.check_input_cubes(cubes)
# Construct graph nodes dictionary
graph = {
key: [self.queries[key]["succeed"], self.queries[key]["fail"]]
for key in self.queries
}
# Search through tree for all leaves (weather code end points)
defined_symbols = []
for item in self.queries.values():
for value in item.values():
if isinstance(value, int):
defined_symbols.append(value)
# Create symbol cube
symbols = self.create_symbol_cube(cubes)
# Loop over possible symbols
for symbol_code in defined_symbols:
# In current decision tree
# start node is heavy_precipitation
routes = self.find_all_routes(
graph,
self.start_node,
symbol_code,
omit_nodes=optional_node_data_missing,
)
# Loop over possible routes from root to leaf
for route in routes:
conditions = []
for i_node in range(len(route) - 1):
current_node = route[i_node]
current = copy.copy(self.queries[current_node])
try:
next_node = route[i_node + 1]
except KeyError:
next_node = symbol_code
if current["fail"] == next_node:
(
current["threshold_condition"],
current["condition_combination"],
) = self.invert_condition(current)
conditions.append(self.create_condition_chain(current))
test_chain = [conditions, "AND"]
# Set grid locations to suitable weather symbol
symbols.data[
np.ma.where(self.evaluate_condition_chain(cubes, test_chain))
] = symbol_code
# Update symbols for day or night.
symbols = update_daynight(symbols)
return symbols
|
the-stack_106_28498 | # Time: O(n)
# Space: O(1)
# counting sort solution
class Solution(object):
def specialArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
MAX_NUM = 1000
count = [0]*(MAX_NUM+1)
for num in nums:
count[num] += 1
n = len(nums)
for i in xrange(len(count)):
if i == n:
return i
n -= count[i]
return -1
# Time: O(n)
# Space: O(1)
# counting sort + binary search solution
class Solution2(object):
def specialArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
MAX_NUM = 1000
def inplace_counting_sort(nums, reverse=False): # Time: O(n)
count = [0]*(MAX_NUM+1)
for num in nums:
count[num] += 1
for i in xrange(1, len(count)):
count[i] += count[i-1]
for i in reversed(xrange(len(nums))): # inplace but unstable sort
if nums[i] < 0: # processed
continue
while i != count[nums[i]]-1:
count[nums[i]] -= 1
nums[count[nums[i]]], nums[i] = ~nums[i], nums[count[nums[i]]]
count[nums[i]] -= 1
nums[i] = ~nums[i]
for i in xrange(len(nums)):
nums[i] = ~nums[i] # restore values
if reverse: # unstable sort
nums.reverse()
inplace_counting_sort(nums, reverse=True)
left, right = 0, len(nums)-1
while left <= right: # Time: O(logn)
mid = left + (right-left)//2
if nums[mid] <= mid:
right = mid-1
else:
left = mid+1
return -1 if left < len(nums) and nums[left] == left else left
# Time: O(n)
# Space: O(n)
# counting sort + binary search solution
class Solution3(object):
def specialArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
MAX_NUM = 1000
def counting_sort(nums, reverse=False): # Time: O(n), Space: O(n)
count = [0]*(MAX_NUM+1)
for num in nums:
count[num] += 1
for i in xrange(1, len(count)):
count[i] += count[i-1]
result = [0]*len(nums)
if not reverse:
for num in reversed(nums): # stable sort
count[num] -= 1
result[count[num]] = num
else:
for num in nums: # stable sort
count[num] -= 1
result[count[num]] = num
result.reverse()
return result
nums = counting_sort(nums, reverse=True) # extra O(n) space for stable sort
left, right = 0, len(nums)-1
while left <= right: # Time: O(logn)
mid = left + (right-left)//2
if nums[mid] <= mid:
right = mid-1
else:
left = mid+1
return -1 if left < len(nums) and nums[left] == left else left
# Time: O(nlogn)
# Space: O(1)
# sort solution
class Solution4(object):
def specialArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
nums.sort(reverse=True) # Time: O(nlogn)
for i in xrange(len(nums)): # Time: O(n)
if nums[i] <= i:
break
else:
i += 1
return -1 if i < len(nums) and nums[i] == i else i
|
the-stack_106_28500 | from raptiformica.shell.raptiformica import clean
from tests.testcase import TestCase
class TestClean(TestCase):
def setUp(self):
self.log = self.set_up_patch('raptiformica.shell.raptiformica.log')
self.run_raptiformica_command = self.set_up_patch(
'raptiformica.shell.raptiformica.run_raptiformica_command'
)
def test_clean_logs_clean_message(self):
clean('1.2.3.3', 2223)
self.assertTrue(self.log.info.called)
def test_clean_runs_raptiformica_command(self):
clean('1.2.3.3', 2223)
self.run_raptiformica_command.assert_called_once_with(
'export PYTHONPATH=.; ./bin/raptiformica_clean.py '
'--verbose',
'1.2.3.3', port=2223
)
def test_clean_returns_raptiformica_command_exit_code(self):
ret = clean('1.2.3.3', 2223)
self.assertEqual(ret, self.run_raptiformica_command.return_value)
|
the-stack_106_28501 | #!/usr/bin/env python3
# Copyright (C) 2019-2021 The btclib developers
#
# This file is part of btclib. It is subject to the license terms in the
# LICENSE file found in the top-level directory of this distribution.
#
# No part of btclib including this file, may be copied, modified, propagated,
# or distributed except according to the terms contained in the LICENSE file.
"""Bitcoin message signing (BMS).
Bitcoin uses a P2PKH address-based scheme for message signature: such
a signature does prove the control of the private key corresponding to
the address and, consequently, of the associated bitcoins (if any).
Message signature adopts a custom compact 65-bytes (fixed size)
serialization format (i.e. not the ASN.1 DER format used for
transactions, which would results in 71-bytes average signature).
One should never sign a vague statement that could be reused
out of the context it was intended for. Always include at least:
- name (nickname, customer id, e-mail, etc.)
- date and time
- who the message is intended for (name, business name, e-mail, etc.)
- specific purpose of the message
To mitigate the risk of signing a possibly deceiving message,
for any given message a *magic* "Bitcoin Signed Message:\\n" prefix is
added, then the hash of the resulting message is signed.
This BMS scheme relies on ECDSA,
i.e. it works with private/public key pairs, not addresses:
the address is only used to identify a key pair.
At signing time, a wallet infrastructure is required to access
the private key corresponding to a given address;
alternatively, the private key must be provided explicitly.
To verify the ECDSA signature the public key is not needed
because (EC)DSA allows public key recovery:
public keys that correctly verify the signature
can be implied from the signature itself.
In the case of the Bitcoin secp256k1 curve,
two public keys are recovered
(up to four with non-zero but negligible probability);
at verification time the address must match
that public key in the recovery set
marked as the right one at signature time.
The (r, s) DSA signature is serialized as
[1-byte recovery flag][32-bytes r][32-bytes s],
in a compact 65-bytes (fixed-size) encoding.
The serialized signature is then base64-encoded to transport it
across channels that are designed to deal with textual data.
Base64-encoding uses 10 digits, 26 lowercase characters, 26 uppercase
characters, '+' (plus sign), and '/' (forward slash).
The equal sign '=' is used as encoding end marker.
The recovery flag is used
at verification time to discriminate among recovered
public keys (and among address types in the case
of scheme extension beyond P2PKH).
Explicitly, the recovery flag value is:
key_id + (4 if compressed else 0) + 27
where:
- key_id is the index in the [0, 3] range identifying which of the
recovered public keys is the one associated to the address
- compressed indicates if the address is the hash of the compressed
public key representation
- 27 identify a P2PKH address, which is the only kind of address
supported by Bitcoin Core;
when the recovery flag is in the [31, 34] range of compressed
addresses, Electrum also check for P2WPKH-P2SH and P2WPKH
(SegWit always uses compressed public keys);
BIP137 (Trezor) uses, instead, 35 and 39 instead of 27
for P2WPKH-P2SH and P2WPKH (respectively)
+----------+---------+-----------------------------------------------------+
| rec flag | key id | address type |
+==========+=========+=====================================================+
| 27 | 0 | P2PKH uncompressed |
+----------+---------+-----------------------------------------------------+
| 28 | 1 | P2PKH uncompressed |
+----------+---------+-----------------------------------------------------+
| 29 | 2 | P2PKH uncompressed |
+----------+---------+-----------------------------------------------------+
| 30 | 3 | P2PKH uncompressed |
+----------+---------+-----------------------------------------------------+
| 31 | 0 | P2PKH compressed (also Electrum P2WPKH-P2SH/P2WPKH) |
+----------+---------+-----------------------------------------------------+
| 32 | 1 | P2PKH compressed (also Electrum P2WPKH-P2SH/P2WPKH) |
+----------+---------+-----------------------------------------------------+
| 33 | 2 | P2PKH compressed (also Electrum P2WPKH-P2SH/P2WPKH) |
+----------+---------+-----------------------------------------------------+
| 34 | 3 | P2PKH compressed (also Electrum P2WPKH-P2SH/P2WPKH) |
+----------+---------+-----------------------------------------------------+
| 35 | 0 | BIP137 (Trezor) P2WPKH-P2SH |
+----------+---------+-----------------------------------------------------+
| 36 | 1 | BIP137 (Trezor) P2WPKH-P2SH |
+----------+---------+-----------------------------------------------------+
| 37 | 2 | BIP137 (Trezor) P2WPKH-P2SH |
+----------+---------+-----------------------------------------------------+
| 38 | 3 | BIP137 (Trezor) P2WPKH-P2SH |
+----------+---------+-----------------------------------------------------+
| 39 | 0 | BIP137 (Trezor) P2WPKH |
+----------+---------+-----------------------------------------------------+
| 40 | 1 | BIP137 (Trezor) P2WPKH |
+----------+---------+-----------------------------------------------------+
| 41 | 2 | BIP137 (Trezor) P2WPKH |
+----------+---------+-----------------------------------------------------+
| 42 | 3 | BIP137 (Trezor) P2WPKH |
+----------+---------+-----------------------------------------------------+
This implementation endorses the Electrum approach: a signature
generated with a compressed WIF (i.e. without explicit address or
with a compressed P2PKH address) is valid also for the
P2WPKH-P2SH and P2WPKH addresses derived from the same WIF.
Nonetheless, it is possible to obtain the BIP137 behaviour if
at signing time the compressed WIF is supplemented with
a P2WPKH-P2SH or P2WPKH address:
in this case the signature will be valid only for that same
address.
https://github.com/bitcoin/bitcoin/pull/524
https://github.com/bitcoin/bips/blob/master/bip-0137.mediawiki
"""
import base64
import secrets
from dataclasses import InitVar, dataclass
from hashlib import sha256
from typing import Optional, Tuple, Type, Union
from btclib.alias import BinaryData, Octets, String
from btclib.b32 import has_segwit_prefix, p2wpkh, witness_from_address
from btclib.b58 import h160_from_address, p2pkh, p2wpkh_p2sh, wif_from_prv_key
from btclib.ecc import dsa
from btclib.ecc.curve import mult, secp256k1
from btclib.ecc.sec_point import bytes_from_point
from btclib.exceptions import BTClibValueError
from btclib.hashes import hash160, magic_message
from btclib.network import NETWORKS
from btclib.to_prv_key import PrvKey, prv_keyinfo_from_prv_key
from btclib.utils import bytesio_from_binarydata
_REQUIRED_LENGHT = 65
@dataclass(frozen=True)
class Sig:
# 1 byte
rf: int
dsa_sig: dsa.Sig
check_validity: InitVar[bool] = True
def __post_init__(self, check_validity: bool) -> None:
if check_validity:
self.assert_valid()
def assert_valid(self) -> None:
if self.rf < 27 or self.rf > 42:
raise BTClibValueError(f"invalid recovery flag: {self.rf}")
self.dsa_sig.assert_valid()
if self.dsa_sig.ec != secp256k1:
raise BTClibValueError(f"invalid curve: {self.dsa_sig.ec.name}")
def serialize(self, check_validity: bool = True) -> bytes:
if check_validity:
self.assert_valid()
# [1-byte recovery flag][32-bytes r][32-bytes s]
n_size = self.dsa_sig.ec.n_size
return b"".join(
[
self.rf.to_bytes(1, byteorder="big", signed=False),
self.dsa_sig.r.to_bytes(n_size, byteorder="big", signed=False),
self.dsa_sig.s.to_bytes(n_size, byteorder="big", signed=False),
]
)
def b64encode(self, check_validity: bool = True) -> str:
"""Return the BMS address-based signature as base64-encoding.
First off, the signature is serialized in the
[1-byte rf][32-bytes r][32-bytes s] compact format,
then it is base64-encoded.
"""
data_binary = self.serialize(check_validity)
return base64.b64encode(data_binary).decode("ascii")
@classmethod
def parse(cls: Type["Sig"], data: BinaryData, check_validity: bool = True) -> "Sig":
stream = bytesio_from_binarydata(data)
sig_bin = stream.read(_REQUIRED_LENGHT)
if check_validity and len(sig_bin) != _REQUIRED_LENGHT:
err_msg = f"invalid decoded length: {len(sig_bin)}"
err_msg += f" instead of {_REQUIRED_LENGHT}"
raise BTClibValueError(err_msg)
rf = sig_bin[0]
ec = secp256k1
n_size = ec.n_size
r = int.from_bytes(sig_bin[1 : 1 + n_size], "big", signed=False)
s = int.from_bytes(sig_bin[1 + n_size : 1 + 2 * n_size], "big", signed=False)
dsa_sig = dsa.Sig(r, s, ec, check_validity=False)
return cls(rf, dsa_sig, check_validity)
@classmethod
def b64decode(cls: Type["Sig"], data: String, check_validity: bool = True) -> "Sig":
"""Return the verified components of the provided BMS signature.
The address-based BMS signature can be represented
as (rf, r, s) tuple or as base64-encoding of the compact format
[1-byte rf][32-bytes r][32-bytes s].
"""
if isinstance(data, str):
data = data.strip()
data_decoded = base64.b64decode(data)
# pylance cannot grok the following line
return cls.parse(data_decoded, check_validity) # type: ignore
def gen_keys(
prv_key: Optional[PrvKey] = None,
network: Optional[str] = None,
compressed: Optional[bool] = None,
) -> Tuple[str, str]:
"""Return a private/public key pair.
The private key is a WIF, the public key is a base58 P2PKH address.
"""
if prv_key is None:
if network is None:
network = "mainnet"
ec = NETWORKS[network].curve
# q in the range [1, ec.n-1]
prv_key = 1 + secrets.randbelow(ec.n - 1)
wif = wif_from_prv_key(prv_key, network, compressed)
return wif, p2pkh(wif)
def sign(msg: Octets, prv_key: PrvKey, addr: Optional[String] = None) -> Sig:
"Generate address-based compact signature for the provided message."
# first sign the message
magic_msg = magic_message(msg)
q, network, compressed = prv_keyinfo_from_prv_key(prv_key)
dsa_sig = dsa.sign(magic_msg, q)
# now calculate the key_id
# TODO do the match in Jacobian coordinates avoiding mod_inv
pub_keys = dsa.recover_pub_keys(magic_msg, dsa_sig)
Q = mult(q)
# key_id is in [0, 3]
# first two bits in rf are reserved for it
key_id = pub_keys.index(Q)
pub_key = bytes_from_point(Q, compressed=compressed)
if isinstance(addr, str):
addr = addr.strip()
elif isinstance(addr, bytes):
addr = addr.decode("ascii")
# finally, calculate the recovery flag
if addr is None or addr == p2pkh(pub_key, network, compressed):
rf = key_id + 27
# third bit in rf is reserved for the 'compressed' boolean
rf += 4 if compressed else 0
# BIP137
elif addr == p2wpkh_p2sh(pub_key, network):
rf = key_id + 35
elif addr == p2wpkh(pub_key, network):
rf = key_id + 39
else:
raise BTClibValueError("mismatch between private key and address")
return Sig(rf, dsa_sig)
def assert_as_valid(
msg: Octets, addr: String, sig: Union[Sig, String], lower_s: bool = True
) -> None:
# Private function for test/dev purposes
# It raises Errors, while verify should always return True or False
if isinstance(sig, Sig):
sig.assert_valid()
else:
sig = Sig.b64decode(sig)
# first two bits in rf are reserved for key_id
# key_id = 00; key_id = 01; key_id = 10; key_id = 11
# 27-27 = 000000; 28-27 = 000001; 29-27 = 000010; 30-27 = 000011
# 31-27 = 000100; 32-27 = 000101; 33-27 = 000110; 34-27 = 000111
# 35-27 = 001000; 36-27 = 001001; 37-27 = 001010; 38-27 = 001011
# 39-27 = 001100; 40-27 = 001101; 41-27 = 001110; 42-27 = 001111
key_id = sig.rf - 27 & 0b11
magic_msg = magic_message(msg)
Q = dsa.recover_pub_key(key_id, magic_msg, sig.dsa_sig, lower_s, sha256)
compressed = sig.rf > 30
# signature is valid only if the provided address is matched
pub_key = bytes_from_point(Q, compressed=compressed)
if has_segwit_prefix(addr):
wit_ver, h160, _ = witness_from_address(addr)
if wit_ver != 0 or len(h160) != 20:
raise BTClibValueError(f"not a p2wpkh address: {addr!r}")
if not (30 < sig.rf < 35 or sig.rf > 38):
raise BTClibValueError(f"invalid p2wpkh address recovery flag: {sig.rf}")
if hash160(pub_key) != h160:
raise BTClibValueError(f"invalid p2wpkh address: {addr!r}")
return
script_type, h160, _ = h160_from_address(addr)
if script_type == "p2pkh":
if sig.rf > 34:
raise BTClibValueError(f"invalid p2pkh address recovery flag: {sig.rf}")
if hash160(pub_key) != h160:
raise BTClibValueError(f"invalid p2pkh address: {addr!r}")
return
# must be P2WPKH-P2SH
if not 30 < sig.rf < 39:
raise BTClibValueError(f"invalid p2wpkh-p2sh address recovery flag: {sig.rf}")
script_pk = b"\x00\x14" + hash160(pub_key)
if hash160(script_pk) != h160:
raise BTClibValueError(f"invalid p2wpkh-p2sh address: {addr!r}")
def verify(
msg: Octets, addr: String, sig: Union[Sig, String], lower_s: bool = True
) -> bool:
"Verify address-based compact signature for the provided message."
# all kind of Exceptions are catched because
# verify must always return a bool
try:
assert_as_valid(msg, addr, sig, lower_s)
except Exception: # pylint: disable=broad-except
return False
else:
return True
|
the-stack_106_28502 | # vim: set fileencoding=utf-8:
#
# GPIO Zero: a library for controlling the Raspberry Pi's GPIO pins
# Copyright (c) 2016-2019 Dave Jones <[email protected]>
# Copyright (c) 2019 Ben Nuttall <[email protected]>
# Copyright (c) 2018 Rick Ansell <[email protected]>
# Copyright (c) 2016 Andrew Scheller <[email protected]>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import (
unicode_literals,
absolute_import,
print_function,
division,
)
str = type('')
import math
import cmath
import weakref
import operator
import functools
# Handles pre 3.3 versions of Python without collections.abc
try:
from collections.abc import Mapping
except ImportError:
from collections import Mapping
# Back-ported from python 3.5; see
# github.com/PythonCHB/close_pep/blob/master/is_close.py for original
# implementation
def isclose(a, b, rel_tol=1e-9, abs_tol=0.0):
if rel_tol < 0.0 or abs_tol < 0.0:
raise ValueError('error tolerances must be non-negative')
if a == b: # fast-path for exact equality
return True
if cmath.isinf(a) or cmath.isinf(b):
return False
diff = abs(b - a)
return (
(diff <= abs(rel_tol * b)) or
(diff <= abs(rel_tol * a)) or
(diff <= abs_tol)
)
# Backported from py3.4
def mean(data):
if iter(data) is data:
data = list(data)
n = len(data)
if not n:
raise ValueError('cannot calculate mean of empty data')
return sum(data) / n
# Backported from py3.4
def median(data):
data = sorted(data)
n = len(data)
if not n:
raise ValueError('cannot calculate median of empty data')
elif n % 2:
return data[n // 2]
else:
i = n // 2
return (data[i - 1] + data[i]) / 2
# Backported from py3.3
def log2(x):
return math.log(x, 2)
# Copied from the MIT-licensed https://github.com/slezica/python-frozendict
class frozendict(Mapping):
def __init__(self, *args, **kwargs):
self.__dict = dict(*args, **kwargs)
self.__hash = None
def __getitem__(self, key):
return self.__dict[key]
def copy(self, **add_or_replace):
return frozendict(self, **add_or_replace)
def __iter__(self):
return iter(self.__dict)
def __len__(self):
return len(self.__dict)
def __repr__(self):
return '<frozendict %s>' % repr(self.__dict)
def __hash__(self):
if self.__hash is None:
hashes = map(hash, self.items())
self.__hash = functools.reduce(operator.xor, hashes, 0)
return self.__hash
# Backported from py3.4
class WeakMethod(weakref.ref):
"""
A custom `weakref.ref` subclass which simulates a weak reference to
a bound method, working around the lifetime problem of bound methods.
"""
__slots__ = "_func_ref", "_meth_type", "_alive", "__weakref__"
def __new__(cls, meth, callback=None):
try:
obj = meth.__self__
func = meth.__func__
except AttributeError:
raise TypeError("argument should be a bound method, not {0}"
.format(type(meth)))
def _cb(arg):
# The self-weakref trick is needed to avoid creating a reference
# cycle.
self = self_wr()
if self._alive:
self._alive = False
if callback is not None:
callback(self)
self = weakref.ref.__new__(cls, obj, _cb)
self._func_ref = weakref.ref(func, _cb)
self._meth_type = type(meth)
self._alive = True
self_wr = weakref.ref(self)
return self
def __call__(self):
obj = super(WeakMethod, self).__call__()
func = self._func_ref()
if obj is None or func is None:
return None
return self._meth_type(func, obj)
def __eq__(self, other):
if isinstance(other, WeakMethod):
if not self._alive or not other._alive:
return self is other
return weakref.ref.__eq__(self, other) and self._func_ref == other._func_ref
return False
def __ne__(self, other):
if isinstance(other, WeakMethod):
if not self._alive or not other._alive:
return self is not other
return weakref.ref.__ne__(self, other) or self._func_ref != other._func_ref
return True
__hash__ = weakref.ref.__hash__
|
the-stack_106_28506 | """
้ฃ็ณ่ดญ้ไบคๆๆฅๅฃ
"""
import json
import hashlib
import sys
from copy import copy
from datetime import datetime
import pytz
from typing import Dict, List, Any, Callable, Type, Union
from types import TracebackType
from functools import lru_cache
import requests
import wmi
from vnpy.api.rest import RestClient, Request
from vnpy.trader.constant import (
Direction,
Offset,
Exchange,
Product,
Status
)
from vnpy.trader.gateway import BaseGateway
from vnpy.trader.event import EVENT_TIMER
from vnpy.trader.object import (
TickData,
OrderData,
TradeData,
AccountData,
PositionData,
ContractData,
BarData,
OrderRequest,
CancelRequest,
SubscribeRequest,
HistoryRequest
)
# REST_HOST = "http://seedtest.ap-ec.cn/CT-OPEN-SERVER" # ๆต่ฏ็ฏๅข่ฎฟ้ฎไธป่ทฏๅพ
REST_HOST = "http://seed.ap-ec.cn/CT-OPEN-SERVER" # ็ไบง็ฏๅข่ฎฟ้ฎไธป่ทฏๅพ
CALLBACK_TYPE = Callable[[dict, "Request"], Any]
ON_FAILED_TYPE = Callable[[int, "Request"], Any]
ON_ERROR_TYPE = Callable[[Type, Exception, TracebackType, "Request"], Any]
STATUS_SUGAR2VT = {
"ๅทฒๆฅ": Status.NOTTRADED,
"้จๆค": Status.CANCELLED,
"ๅทฒๆค": Status.CANCELLED,
"้จๆ": Status.PARTTRADED,
"ๅทฒๆ": Status.ALLTRADED,
"ๅบๅ": Status.INVALID,
}
DIRECTION_VT2SUGAR = {Direction.LONG: "1", Direction.SHORT: "2"}
DIRECTION_SUGAR2VT = {v: k for k, v in DIRECTION_VT2SUGAR.items()}
OFFSET_VT2SUGAR = {Offset.OPEN: "1", Offset.CLOSE: "2"}
OFFSET_SUGAR2VT = {v: k for k, v in OFFSET_VT2SUGAR.items()}
CHINA_TZ = pytz.timezone("Asia/Shanghai")
class SugarGateway(BaseGateway):
"""
้ฃ็ณ่ดญ้ไบคๆๆฅๅฃ
"""
default_setting: Dict[str, Any] = {
"ๅผๆพ่ดฆๅท": "",
"ๅ ๅฏKEY": "",
"TOKEN": "",
"IPๅฐๅ": "",
"ไผ่ฏ็บฟ็จ": 8,
}
exchanges: List[Exchange] = [Exchange.SR]
def __init__(self, event_engine):
"""Constructor"""
super().__init__(event_engine, "SUGAR")
self.rest_api = SugarRestApi(self)
self.orders: Dict[str, OrderData] = {}
def get_order(self, orderid: str) -> OrderData:
""""""
return self.orders.get(orderid, None)
def on_order(self, order: OrderData) -> None:
""""""
self.orders[order.orderid] = order
super().on_order(order)
def connect(self, setting: dict) -> None:
""""""
open_account = setting["ๅผๆพ่ดฆๅท"]
key = setting["ๅ ๅฏKEY"]
token = setting["TOKEN"]
ip_address = setting["IPๅฐๅ"]
session_number = setting["ไผ่ฏ็บฟ็จ"]
self.rest_api.connect(open_account, key, token, ip_address, session_number)
self.init_query()
def subscribe(self, req: SubscribeRequest) -> None:
""""""
self.rest_api.subscribe(req)
def send_order(self, req: OrderRequest) -> str:
""""""
return self.rest_api.send_order(req)
def cancel_order(self, req: CancelRequest) -> None:
""""""
self.rest_api.cancel_order(req)
def query_trade(self) -> None:
""""""
self.rest_api.query_trade()
def query_order(self) -> None:
""""""
self.rest_api.query_order()
def query_quotes(self) -> None:
""""""
self.rest_api.query_quotes()
def query_position(self) -> None:
""""""
self.rest_api.query_position()
def query_account(self) -> None:
""""""
self.rest_api.query_account()
def query_history(self, req: HistoryRequest):
""""""
pass
def close(self) -> None:
""""""
self.rest_api.stop()
self.rest_api.join()
def process_timer_event(self, event):
""""""
for func in self.query_functions:
func()
def init_query(self):
""""""
self.query_functions = [self.query_order, self.query_trade, self.query_quotes, self.query_position, self.query_account]
self.event_engine._interval = 1
self.event_engine.register(EVENT_TIMER, self.process_timer_event)
class SugarRestApi(RestClient):
"""
SUGAR REST API
"""
def __init__(self, gateway: BaseGateway):
""""""
super().__init__()
self.gateway: SugarGateway = gateway
self.gateway_name: str = gateway.gateway_name
self.open_account: str = ""
self.key: str = ""
self.token: str = ""
self.ip_address: str = ""
self.mac_address: str = self.get_mac_address()
self.subscribe_symbol: set = set()
self.trade_id: set = set()
self.order_count = 0
self.callback_dt = None
self.up_dn_limit = {}
def connect(
self,
open_account: str,
key: str,
token: str,
ip_address: str,
session_number: int,
) -> None:
"""
Initialize connection to REST server.
"""
self.open_account = open_account
self.key = key
self.token = token
self.ip_address = ip_address
self.init(REST_HOST)
self.start(session_number)
self.gateway.write_log("้ฃ็ณ่ดญ้ๆฅๅฃๅฏๅจๆๅ")
self.query_contract()
def query_contract(self) -> None:
""""""
requestBody={
"marketId": "000"
}
self.add_request(
method="POST",
path="/quotes/getQuotes.magpie",
data=requestBody,
callback=self.on_query_contract
)
def query_account(self) -> None:
""""""
requestBody = {
"marketId":"000"
}
self.add_request(
method="POST",
path="/hps/getAccountInfo.magpie",
data=requestBody,
callback=self.on_query_account
)
def query_position(self) -> None:
""""""
requestBody={
"marketId": "000",
}
self.add_request(
method="POST",
path="/ct/postionListByGoodsCode.magpie",
data=requestBody,
callback=self.on_query_position
)
def query_quotes(self) -> None:
""""""
requestBody = {
"marketId": "000"
}
self.add_request(
method="POST",
path="/quotes/getQuotes.magpie",
data=requestBody,
callback=self.on_query_quotes
)
def query_order(self) -> None:
""""""
requestBody = {
"marketId":"000",
"currentPage":"1",
"pageSize":"10000",
}
self.add_request(
method="POST",
path="/ct/entrustGridList.magpie",
data=requestBody,
callback=self.on_query_order
)
if self.callback_dt and (datetime.now() - self.callback_dt).seconds > 5:
self.gateway.write_log("ๆฅๅฃ่ฏทๆฑๅๅบ้ด้่ถ
่ฟ5็ง๏ผๅ็้ปๅก๏ผ")
print("ๆฅๅฃ่ฏทๆฑๅๅบ้ด้่ถ
่ฟ5็ง๏ผๅ็้ปๅก๏ผ")
def query_trade(self) -> None:
""""""
requestBody = {
"marketId":"000",
"currentPage":"1",
"pageSize":"10000",
}
self.add_request(
method="POST",
path="/ct/tradeGridDetailList.magpie",
data=requestBody,
callback=self.on_query_trade
)
def query_history(self, req: HistoryRequest) -> List[BarData]:
""""""
pass
def subscribe(self, req: SubscribeRequest) -> None:
""""""
self.subscribe_symbol.add(req.symbol)
def send_order(self, req: OrderRequest) -> str:
""""""
orderid = self.new_orderid()
order: OrderData = req.create_order_data(orderid, self.gateway_name)
order.datetime = CHINA_TZ.localize(datetime.now())
order.price = int(order.price)
order.volume = int(order.volume)
requestBody = {
"goodscode": req.symbol,
"entrustPrice": int(req.price * 100),
"entrustAmountH": int(req.volume),
"entrustBs": DIRECTION_VT2SUGAR.get(req.direction, ""),
"entrustTs": OFFSET_VT2SUGAR.get(req.offset, ""),
"opEntrustWay": "0",
"entrustWay": "1",
"macAddress": self.mac_address,
"ipAddress": self.ip_address
}
self.add_request(
method="POST",
path="/ct/tradeEntrustOrder.magpie",
callback=self.on_send_order,
data=requestBody,
extra=order,
on_error=self.on_send_order_error,
on_failed=self.on_send_order_failed
)
self.gateway.on_order(order)
return order.vt_orderid
def cancel_order(self, req: CancelRequest) -> None:
""""""
order = self.gateway.orders.get(req.orderid, None)
if not order or not order.entrustNo:
self.gateway.write_log("ๆชๆพๅฐๅฏนๅบๅงๆ๏ผๆ ๆณๆค้ใ")
return
requestBody = {
"entrustNo": order.entrustNo,
"macAddress": self.mac_address,
"ipAddress": self.ip_address,
"opEntrustWay": "0"
}
self.add_request(
method="POST",
path="/ct/cancelEntrustOrder.magpie",
data=requestBody,
callback=self.on_cancel_order,
extra=req
)
def on_query_contract(self, data: dict, request: Request) -> None:
""""""
if self.check_error(data, "ๆฅ่ฏขๅ็บฆ"):
return
for d in data["data"]["responseBody"]:
if not d["goodsCode"].startswith("GS"):
continue
contract = ContractData(
symbol=d["goodsCode"],
exchange=Exchange.SR,
name=d["goodsName"],
pricetick=1,
size=1,
min_volume=1,
margin_ratio=0.1,
product=Product.SPOT,
stop_supported=False,
net_position=False,
history_data=False,
gateway_name=self.gateway_name,
)
self.gateway.on_contract(contract)
# ๆฅ่ฏข่ดญ้่ฎกๅๆถจ่ทๅไปทๆ ผ
requestBody = {"goodsCode": d["goodsCode"]}
signature = self.create_signature(requestBody)
request_time = datetime.strftime(datetime.now(),"%Y-%m-%d %H:%M:%S.%f")
request_data = {
"requestHeader":{
"token": self.token,
"sign": signature,
"yqMemberId": self.open_account,
"merRequestNo": request_time,
"merRequestTime": request_time[:-7]
},
"requestBody": requestBody
}
url = self.url_base + "/ct/selectGoodsInfo.magpie"
response = requests.post(url=url, json=request_data, headers={'Content-Type':'application/json'})
response_data = response.json()
try:
self.up_dn_limit.update({
d["goodsCode"]:{
"limit_up": response_data["data"]["responseBody"]["uplimitedPrice"] / 100,
"limit_down": response_data["data"]["responseBody"]["downlimitedPrice"] / 100,
}
})
except:
pass
self.gateway.write_log("ๅ็บฆไฟกๆฏๆฅ่ฏขๆๅ")
def on_query_account(self, data: dict, request: Request) -> None:
""""""
if self.check_error(data, "ๆฅ่ฏข่ดฆๆท"):
return
for d in data["data"]["responseBody"]:
account = AccountData(
accountid=self.open_account,
balance=d["currentBalance"] / 100,
pre_balance=d["beginBalance"] / 100,
available=d["canUseBalance"] / 100,
frozen=d["forbidBalance"] / 100,
commission=d["payFee"] / 100,
margin=(d["lxExDepositBanlance"] + d["lxPreDepositBanlance"]) / 100,
date=str(datetime.now().date()),
time=str(datetime.now().time()),
gateway_name=self.gateway_name,
)
try:
account.percent = round(1 - account.available / account.balance,3) * 100 #่ต้ไฝฟ็จ็
except ZeroDivisionError:
account.percent = 0
self.gateway.on_account(account)
def on_query_position(self, data: dict, request: Request) -> None:
""""""
if self.check_error(data, "ๆฅ่ฏขๆไป"):
return
for d in data["data"]["responseBody"][::-1]:
if not d.get("goodsCode", None):
continue
if d["buyHoldAmount"]:
long_position = PositionData(
gateway_name=self.gateway_name,
symbol=d["goodsCode"],
exchange=Exchange.SR,
direction=Direction.LONG,
volume=d["buyHoldAmount"],
frozen=d["buyLockedAmount"],
price=d["buyAvgPrice"] / 100,
pnl=0,
)
self.gateway.on_position(long_position)
if d["sellHoldAmount"]:
short_position = PositionData(
gateway_name=self.gateway_name,
symbol=d["goodsCode"],
exchange=Exchange.SR,
direction=Direction.SHORT,
volume=d["sellHoldAmount"],
frozen=d["sellLockedAmount"],
price=d["sellAvgPrice"] / 100,
pnl=0,
)
self.gateway.on_position(short_position)
def on_query_quotes(self, data: dict, request: Request) -> None:
""""""
if self.check_error(data, "ๆฅ่ฏข่กๆ
"):
return
for d in data["data"]["responseBody"]:
if d["goodsCode"] not in self.subscribe_symbol:
continue
dt = CHINA_TZ.localize(datetime.now())
tick = TickData(
symbol=d["goodsCode"],
exchange=Exchange.SR,
datetime=dt,
name=d["goodsName"],
volume=int(d["transactionVolume"]),
# open_interest=d["currentQuantity"],
last_price=int(d["newDealPrice"]),
limit_up=self.up_dn_limit.get(d["goodsCode"], 0)["limit_up"],
limit_down=self.up_dn_limit.get(d["goodsCode"], 0)["limit_down"],
open_price=int(d["openingPrice"]),
high_price=int(d["highestPrice"]),
low_price=int(d["lowestPrice"]),
pre_close=int(d["closePrice"]),
bid_price_1=int(d["buyPrice1"]),
ask_price_1=int(d["sellPrice1"]),
bid_volume_1=int(d["buyContractVolume1"]),
ask_volume_1=int(d["sellContractVolume1"]),
gateway_name=self.gateway_name
)
self.gateway.on_tick(tick)
def on_query_order(self, data: dict, request: Request) -> None:
""""""
if self.check_error(data, "ๆฅ่ฏขๅงๆ"):
return
responseBody = data["data"]["responseBody"]
if not responseBody.get("items", None):
return
for d in responseBody["items"][::-1]:
timestamp = f'{d["tradingDate"]} {d["entrustTime"]}'
dt = CHINA_TZ.localize(datetime.strptime(timestamp, "%Y%m%d %H%M%S"))
entrustNo = str(d["entrustNo"])
orderid = self.gateway.orders.get(entrustNo, None)
if not orderid:
orderid = self.new_orderid()
order = OrderData(
gateway_name=self.gateway_name,
symbol=d["goodsCode"],
exchange=Exchange.SR,
orderid=orderid,
direction=DIRECTION_SUGAR2VT.get(str(d["entrustBs"]), None),
offset=OFFSET_SUGAR2VT.get(str(d["entrustTs"]), None),
price=int(d["entrustPrice"] / 100),
volume=int(d["entrustAmount"]),
traded=int(d["businessAmount"]),
status=STATUS_SUGAR2VT.get(d["entrustStatusStr"], None),
datetime=dt
)
order.entrustNo = entrustNo
self.gateway.orders[entrustNo] = orderid
self.gateway.on_order(order)
else:
order: OrderData = self.gateway.orders.get(orderid, None)
if order.status == Status.SUBMITTING:
if d["entrustStatusStr"] == "ๅทฒๆฅ":
order.status = Status.NOTTRADED
self.gateway.on_order(order)
elif d["entrustStatusStr"] == "ๅทฒๆค" or d["entrustStatusStr"] == "้จๆค":
order.status = Status.CANCELLED
self.gateway.on_order(order)
elif d["entrustStatusStr"] == "ๅทฒๆ":
order.status = Status.ALLTRADED
order.traded = int(d["businessAmount"])
self.gateway.on_order(order)
elif d["entrustStatusStr"] == "้จๆ":
order.status = Status.PARTTRADED
order.traded = int(d["businessAmount"])
self.gateway.on_order(order)
elif d["entrustStatusStr"] == "ๅบๅ":
order.status = Status.INVALID
self.gateway.on_order(order)
elif order.status == Status.NOTTRADED and d["entrustStatusStr"] != "ๅทฒๆฅ":
if d["entrustStatusStr"] == "ๅทฒๆค" or d["entrustStatusStr"] == "้จๆค":
order.status = Status.CANCELLED
self.gateway.on_order(order)
elif d["entrustStatusStr"] == "ๅทฒๆ":
order.status = Status.ALLTRADED
order.traded = int(d["businessAmount"])
self.gateway.on_order(order)
elif d["entrustStatusStr"] == "้จๆ":
order.status = Status.PARTTRADED
order.traded = int(d["businessAmount"])
self.gateway.on_order(order)
elif d["entrustStatusStr"] == "ๅบๅ":
order.status = Status.INVALID
self.gateway.on_order(order)
elif order.status == Status.PARTTRADED:
if d["entrustStatusStr"] == "ๅทฒๆ":
order.status = Status.ALLTRADED
order.traded = int(d["businessAmount"])
self.gateway.on_order(order)
elif d["entrustStatusStr"] == "้จๆ" and order.traded < int(d["businessAmount"]):
order.status = Status.PARTTRADED
order.traded = int(d["businessAmount"])
self.gateway.on_order(order)
elif d["entrustStatusStr"] == "้จๆค" or d["entrustStatusStr"] == "ๅทฒๆค":
order.status = Status.CANCELLED
self.gateway.on_order(order)
elif d["entrustStatusStr"] == "ๅบๅ":
order.status = Status.INVALID
self.gateway.on_order(order)
elif order.status == Status.EXCEPTION or order.status == Status.ERROR:
if d["entrustStatusStr"] == "ๅทฒๆฅ":
order.status = Status.NOTTRADED
self.gateway.on_order(order)
elif d["entrustStatusStr"] == "ๅทฒๆค" or d["entrustStatusStr"] == "้จๆค":
order.status = Status.CANCELLED
self.gateway.on_order(order)
elif d["entrustStatusStr"] == "ๅทฒๆ":
order.status = Status.ALLTRADED
order.traded = int(d["businessAmount"])
self.gateway.on_order(order)
elif d["entrustStatusStr"] == "้จๆ":
order.status = Status.PARTTRADED
order.traded = int(d["businessAmount"])
self.gateway.on_order(order)
elif d["entrustStatusStr"] == "ๅบๅ":
order.status = Status.INVALID
self.gateway.on_order(order)
print(f"{datetime.now()}\t่ฎขๅ็ถๆๅผๅธธ๏ผไฟกๆฏ๏ผ{d}")
print("*"*80)
self.callback_dt = datetime.now()
def on_query_trade(self, data: dict, request: Request) -> None:
""""""
if self.check_error(data, "ๆฅ่ฏขๆไบค"):
return
responseBody = data["data"]["responseBody"]
if not responseBody.get("items", None):
return
for d in responseBody["items"][::-1]:
orderid = self.gateway.orders.get(str(d["entrustNo"]), None)
if not orderid:
continue
id = d["id"]
if id not in self.trade_id:
timestamp = f'{d["tradingDate"]} {d["businessTime"]}'
dt = CHINA_TZ.localize(datetime.strptime(timestamp, "%Y%m%d %H%M%S"))
order: OrderData = self.gateway.orders.get(orderid, None)
trade = TradeData(
symbol=order.symbol,
exchange=Exchange.SR,
orderid=order.orderid,
tradeid=id,
direction=order.direction,
offset=order.offset,
price=int(d["businessPrice"]),
volume=int(d["businessAmount"]),
datetime=dt,
gateway_name=self.gateway_name,
)
self.trade_id.add(id)
self.gateway.on_trade(trade)
def on_send_order(self, data: dict, request: Request) -> None:
""""""
order: OrderData = request.extra
if self.check_error(data, "ๅงๆ"):
order.status = Status.EXCEPTION
self.gateway.on_order(order)
return
entrustNo = str(data["data"]["responseBody"]["entrustNo"])
order.entrustNo = entrustNo
self.gateway.orders[entrustNo] = order.orderid
self.gateway.on_order(order)
def on_send_order_failed(self, status_code: str, request: Request) -> None:
"""
Callback when sending order failed on server.
"""
order = request.extra
order.status = Status.EXCEPTION
self.gateway.on_order(order)
msg = f"ๅงๆๅคฑ่ดฅ๏ผ็ถๆ็ ๏ผ{status_code}๏ผไฟกๆฏ๏ผ{request.response.text}"
self.gateway.write_log(msg)
def on_send_order_error(
self,
exception_type: type,
exception_value: Exception,
tb,
request: Request
) -> None:
"""
Callback when sending order caused exception.
"""
order = request.extra
order.status = Status.ERROR
self.gateway.on_order(order)
# Record exception if not ConnectionError
if not issubclass(exception_type, ConnectionError):
self.on_error(exception_type, exception_value, tb, request)
def on_cancel_order(self, data: dict, request: Request) -> None:
""""""
cancel_request = request.extra
order = self.gateway.get_order(cancel_request.orderid)
if not order:
return
if self.check_error(data, "ๆคๅ"):
order.status = Status.EXCEPTION
else:
order.status = Status.CANCELLED
self.gateway.write_log(f"ๅงๆๆคๅๆๅ๏ผ{order.orderid}")
self.gateway.on_order(order)
def on_error(
self,
exception_type: type,
exception_value: Exception,
tb,
request: Request
) -> None:
"""
Callback to handler request exception.
"""
msg = f"่งฆๅๅผๅธธ๏ผ็ถๆ็ ๏ผ{exception_type}๏ผไฟกๆฏ๏ผ{exception_value}"
self.gateway.write_log(msg)
sys.stderr.write(
self.exception_detail(exception_type, exception_value, tb, request)
)
def check_error(self, data: dict, func: str = "") -> bool:
""""""
if data["succeed"]:
return False
error_code = data["errorCode"]
error_msg = data["errorMsg"]
self.gateway.write_log(f"{func}่ฏทๆฑๅบ้๏ผไปฃ็ ๏ผ{error_code}๏ผไฟกๆฏ๏ผ{error_msg}")
return True
def new_orderid(self):
""""""
prefix = datetime.now().strftime("%Y%m%d-%H%M%S-")
self.order_count += 1
suffix = str(self.order_count).rjust(8, "0")
orderid = prefix + suffix
return orderid
def sign(self, request: Request) -> Request:
"""
Generate SUGAR signature.
"""
signature = self.create_signature(request.data)
request_time = datetime.strftime(datetime.now(),"%Y-%m-%d %H:%M:%S.%f")
request_data = {
"requestHeader":{
"token": self.token,
"sign": signature,
"yqMemberId": self.open_account,
"merRequestNo": request_time,
"merRequestTime": request_time[:-7]
},
"requestBody": request.data
}
request.headers = {"Content-Type": "application/json"}
request.data = json.dumps(request_data)
return request
def create_signature(self, requestBody: dict) -> str:
body_data={}
for key, value in requestBody.items():
if value != "":
body_data[key] = value
body_str = ""
for key in sorted(body_data.keys()):
body_str += key + "=" + str(body_data[key]) + "&"
body_str = (body_str[:-1] + self.key).lower().replace(" ", "")
sign_str = get_sha1_secret_str(body_str)
return sign_str
def get_ip_address(self):
"""่ทๅ่ฎก็ฎๆบๅ
ฌ็ฝIPๅฐๅ"""
f = requests.get("http://myip.dnsomatic.com")
ip_address = f.text
return ip_address
def get_mac_address(self):
"""่ทๅ่ฎก็ฎๆบMAC็ฉ็ๅฐๅ(CMD่ฟ่ก"getmac"่ทๅ็ฉ็ๅฐๅ)"""
c = wmi.WMI()
mac_address = ""
for interface in c.Win32_NetworkAdapterConfiguration(IPEnabled=1):
mac_address = interface.MACAddress
return mac_address
@lru_cache(maxsize=999, typed=True)
def get_sha1_secret_str(body_str:str):
"""
ไฝฟ็จsha1ๅ ๅฏ็ฎๆณ๏ผ่ฟๅstrๅ ๅฏๅ็ๅญ็ฌฆไธฒ
"""
sha = hashlib.sha1(body_str.encode('utf-8'))
encrypts = sha.hexdigest()
return encrypts
|
the-stack_106_28507 | import requests
import sys
import time
from lib.tracing import init_tracer
def say_hello(hello_to):
with tracer.start_active_span('say-hello') as scope:
scope.span.set_tag('hello-to', hello_to)
hello_str = format_string(hello_to)
print_hello(hello_str)
def format_string(hello_to):
with tracer.start_active_span('format') as scope:
hello_str = http_get(8081, 'format', 'helloTo', hello_to)
scope.span.log_kv({'event': 'string-format', 'value': hello_str})
return hello_str
def print_hello(hello_str):
with tracer.start_active_span('println') as scope:
http_get(8082, 'publish', 'helloStr', hello_str)
scope.span.log_kv({'event': 'println'})
def http_get(port, path, param, value):
url = 'http://localhost:%s/%s' % (port, path)
r = requests.get(url, params={param: value})
assert r.status_code == 200
return r.text
# main
assert len(sys.argv) == 2
tracer = init_tracer('hello-world')
hello_to = sys.argv[1]
say_hello(hello_to)
# yield to IOLoop to flush the spans
time.sleep(2)
tracer.close()
|
the-stack_106_28508 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from airflow import DAG
from airflow.contrib.sensors.aws_sqs_sensor import SQSSensor
from airflow.utils import timezone
from unittest.mock import patch, MagicMock
from airflow.exceptions import AirflowException
from moto import mock_sqs
from airflow.contrib.hooks.aws_sqs_hook import SQSHook
DEFAULT_DATE = timezone.datetime(2017, 1, 1)
class TestSQSSensor(unittest.TestCase):
def setUp(self):
args = {
'owner': 'airflow',
'start_date': DEFAULT_DATE
}
self.dag = DAG('test_dag_id', default_args=args)
self.sensor = SQSSensor(
task_id='test_task',
dag=self.dag,
sqs_queue='test',
aws_conn_id='aws_default'
)
self.mock_context = MagicMock()
self.sqs_hook = SQSHook()
@mock_sqs
def test_poke_success(self):
self.sqs_hook.create_queue('test')
self.sqs_hook.send_message(queue_url='test', message_body='hello')
result = self.sensor.poke(self.mock_context)
self.assertTrue(result)
self.assertTrue("'Body': 'hello'" in str(self.mock_context['ti'].method_calls),
"context call should contain message hello")
@mock_sqs
def test_poke_no_messsage_failed(self):
self.sqs_hook.create_queue('test')
result = self.sensor.poke(self.mock_context)
self.assertFalse(result)
context_calls = []
self.assertTrue(self.mock_context['ti'].method_calls == context_calls, "context call should be same")
@patch('airflow.contrib.sensors.aws_sqs_sensor.SQSHook')
def test_poke_delete_raise_airflow_exception(self, mock_sqs_hook):
message = {'Messages': [{'MessageId': 'c585e508-2ea0-44c7-bf3e-d1ba0cb87834',
'ReceiptHandle': 'mockHandle',
'MD5OfBody': 'e5a9d8684a8edfed460b8d42fd28842f',
'Body': 'h21'}],
'ResponseMetadata': {'RequestId': '56cbf4aa-f4ef-5518-9574-a04e0a5f1411',
'HTTPStatusCode': 200,
'HTTPHeaders': {
'x-amzn-requestid': '56cbf4aa-f4ef-5518-9574-a04e0a5f1411',
'date': 'Mon, 18 Feb 2019 18:41:52 GMT',
'content-type': 'text/xml', 'mock_sqs_hook-length': '830'},
'RetryAttempts': 0}}
mock_sqs_hook().get_conn().receive_message.return_value = message
mock_sqs_hook().get_conn().delete_message_batch.return_value = \
{'Failed': [{'Id': '22f67273-4dbc-4c19-83b5-aee71bfeb832'}]}
with self.assertRaises(AirflowException) as context:
self.sensor.poke(self.mock_context)
self.assertTrue('Delete SQS Messages failed' in context.exception.args[0])
@patch('airflow.contrib.sensors.aws_sqs_sensor.SQSHook')
def test_poke_receive_raise_exception(self, mock_sqs_hook):
mock_sqs_hook().get_conn().receive_message.side_effect = Exception('test exception')
with self.assertRaises(Exception) as context:
self.sensor.poke(self.mock_context)
self.assertTrue('test exception' in context.exception.args[0])
if __name__ == '__main__':
unittest.main()
|
the-stack_106_28510 | """
A federated learning client at the edge server in a cross-silo training workload.
"""
import time
from dataclasses import dataclass
from plato.algorithms import registry as algorithms_registry
from plato.clients import base
from plato.config import Config
from plato.processors import registry as processor_registry
from plato.trainers import registry as trainers_registry
@dataclass
class Report:
"""Client report, to be sent to the federated learning server."""
client_id: str
num_samples: int
accuracy: float
average_accuracy: float
training_time: float
data_loading_time: float
class Client(base.Client):
"""A federated learning client at the edge server in a cross-silo training workload."""
def __init__(self, server, algorithm=None, trainer=None):
super().__init__()
self.server = server
self.algorithm = algorithm
self.trainer = trainer
def configure(self):
"""Prepare this edge client for training."""
super().configure()
if self.trainer is None:
self.trainer = trainers_registry.get()
self.trainer.set_client_id(self.client_id)
if self.algorithm is None:
self.algorithm = algorithms_registry.get(self.trainer)
self.algorithm.set_client_id(self.client_id)
# Pass inbound and outbound data payloads through processors for
# additional data processing
self.outbound_processor, self.inbound_processor = processor_registry.get(
"Client", client_id=self.client_id, trainer=self.trainer)
def load_data(self):
"""The edge client does not need to train models using local data."""
def load_payload(self, server_payload):
"""The edge client does not need to train models using local data."""
def process_server_response(self, server_response):
"""Additional client-specific processing on the server response."""
if 'current_global_round' in server_response:
self.server.current_global_round = server_response[
'current_global_round']
async def train(self):
"""The aggregation workload on an edge client."""
training_start_time = time.perf_counter()
# Signal edge server to select clients to start a new round of local aggregation
self.server.new_global_round_begins.set()
# Wait for the edge server to finish model aggregation
await self.server.model_aggregated.wait()
self.server.model_aggregated.clear()
# Extract model weights and biases
weights = self.algorithm.extract_weights()
# Generate a report for the server, performing model testing if applicable
if hasattr(Config().server,
'edge_do_test') and Config().server.edge_do_test:
accuracy = self.server.accuracy
else:
accuracy = 0
if Config().clients.do_test:
average_accuracy = self.server.accuracy
else:
average_accuracy = 0
training_time = time.perf_counter() - training_start_time
return Report(self.client_id, self.server.total_samples, accuracy,
average_accuracy, training_time, 0), weights
|
the-stack_106_28511 | # Parallel implementation template from: https://gitlab.com/lucasrthompson/Sonic-Bot-In-OpenAI-and-NEAT
# PuyoPuyo gym environment from: https://github.com/frostburn/gym_puyopuyo
import os
import pickle
import numpy as np
from gym_puyopuyo import register
import gym
import neat
import visualize
NUM_WORKERS = 4 # number of workers for parallel genome score evaluation
NUM_RUNS = 5 # game runs per genome
NUM_GEN = 5000 # max number of generations
WIDTH = 3 # width for Small env is 3
NUM_COLORS = 3 # 3 colors in the small env mode
# TODO: could probably read color number from observation data
NUM_ACTIONS = 4 * WIDTH - 2 - 1
piece_shape = (3, 2)
fn_results = "feedforward-small"
register()
class Worker():
def __init__(self, genome, config):
self.genome = genome
self.config = config
def doWork(self):
self.env = gym.make("PuyoPuyoEndlessSmall-v2")
net = neat.nn.feed_forward.FeedForwardNetwork.create(self.genome, self.config)
total_reward = 0.0
for _ in range(NUM_RUNS):
ob = self.env.reset()
done = False
ticks = 0
while True:
pieces_sum, field_sum = self.multiplyMatrices(ob[0], ob[1])
next_piece = pieces_sum[0]
inp_piece = np.ndarray.flatten(next_piece)
inp_field = np.ndarray.flatten(field_sum)
inputs = np.hstack([inp_piece, inp_field])
nn_output = net.activate(inputs)
action = np.argmax(nn_output)
ob, rew, done, info = self.env.step(action)
# Reward for clearing line: +1
# Reward for 'surviving' (playing a turn): +1
ticks += 1
total_reward += rew
if done:
break
total_reward += ticks
# Average it out for a more precise result
return total_reward / NUM_RUNS
# Converts the 3d array (RGB) supplied by the game
# into a 1d array to be used as network input
def multiplyMatrices(self, pieces, field, norm = True):
pieces = pieces.astype(np.float64)
field = field.astype(np.float64)
pieces_sum = np.zeros(piece_shape)
field_sum = np.zeros(field[0].shape)
for i in range(0, len(pieces)):
pieces[i] = np.multiply(pieces[i], i + 1)
if(norm):
pieces[i] /= NUM_COLORS
pieces_sum += pieces[i]
for i in range(0, len(field)):
field[i] = np.multiply(field[i], i + 1)
if(norm):
field[i] /= NUM_COLORS
field_sum += field[i]
return pieces_sum, field_sum
def eval_genome(genome, config):
peon = Worker(genome, config)
return peon.doWork()
def run():
# get config
local_dir = os.path.dirname(__file__)
config_path = os.path.join(local_dir, 'config-feedforward-small')
config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
config_path)
# set population and set reporting options
pop = neat.Population(config)
stats = neat.StatisticsReporter()
pop.add_reporter(stats)
pop.add_reporter(neat.StdOutReporter(True))
# Checkpoint every x generations or y minutes
pop.add_reporter(neat.Checkpointer(250, 900, "checkpoints/"+fn_results+"-checkpoint"))
#winner = pop.run(eval_genomes) # non-parallel
pe = neat.ParallelEvaluator(NUM_WORKERS, eval_genome)
winner = pop.run(pe.evaluate, NUM_GEN)
# save network
with open("results/winner-pickle-"+fn_results, 'wb') as f:
pickle.dump(winner, f)
#print(winner)
visualize.plot_stats(stats, ylog=True, view=True, filename="results/"+fn_results+"-fitness.svg")
visualize.plot_species(stats, view=True, filename="results/"+fn_results+"-speciation.svg")
visualize.draw_net(config, winner, view=True,
filename="results/winner-"+fn_results+".net")
visualize.draw_net(config, winner, view=True,
filename="results/winner-"+fn_results+"-enabled.net", show_disabled=False)
visualize.draw_net(config, winner, view=True,
filename="results/winner-"+fn_results+"-pruned.net", show_disabled=False, prune_unused=True)
if __name__ == "__main__":
run()
|
the-stack_106_28512 | import numpy as np
import csv
from decision_tree import DecisionTree
from collections import Counter
class RandomForest:
def __init__(self, num):
self.num = num
self.dts = []
for _ in range(num):
self.dts.append(DecisionTree())
def fit(self, x, y, detailed=False):
num_attribute = x.shape[1]
gap = num_attribute // self.num
for i in range(self.num-1):
self.dts[i].fit(x[:, i*gap:(i+1)*gap], y, detailed=detailed)
self.dts[-1].fit(x[:, (self.num-1)*gap:], y, detailed=detailed)
def predict(self, x):
votes = []
num_attribute = x.shape[1]
gap = num_attribute // self.num
for i in range(self.num-1):
votes.append(self.dts[i].predict(x[:, i*gap:(i+1)*gap]))
votes.append(self.dts[-1].predict(x[:, (self.num-1)*gap:]))
# print(votes)
return np.sum(np.array(votes), axis=0) > len(votes) / 2
def evaluate(self, x, y):
y_pred = self.predict(x)
return np.sum(np.array(y) == np.array(y_pred)) / len(y)
if __name__ == '__main__':
with open(r'..\..\data\tic_tac_toe.csv', newline='') as csvfile:
data = np.array(list(csv.reader(csvfile)))
# data = np.genfromtxt('tic_tac_toe.csv', delimiter=',', dtype=None)
np.random.shuffle(data)
train = data[:int(data.shape[0]*0.8), :]
test = data[int(data.shape[0]*0.8):, :]
train_x = train[:, :-1]
train_y = train[:, -1]
train_y = (train_y == "positive")
test_x = test[:, :-1]
test_y = test[:, -1]
test_y = (test_y == "positive")
rf = RandomForest(2)
rf.fit(train_x, train_y, detailed=False)
print(rf.evaluate(test_x, test_y)) |
the-stack_106_28515 | """
CLI configuration decorator to use TOML configuration files for click commands.
"""
## This section contains code copied and modified from [click_config_file][https://github.com/phha/click_config_file/blob/master/click_config_file.py]
## SPDX-License-Identifier: MIT
import os
import functools
import logging
from pathlib import Path
import click
from samcli.commands.exceptions import ConfigException
from samcli.lib.config.exceptions import SamConfigVersionException
from samcli.cli.context import get_cmd_names
from samcli.lib.config.samconfig import SamConfig, DEFAULT_ENV, DEFAULT_CONFIG_FILE_NAME
__all__ = ("TomlProvider", "configuration_option", "get_ctx_defaults")
LOG = logging.getLogger(__name__)
class TomlProvider:
"""
A parser for toml configuration files
:param cmd: sam command name as defined by click
:param section: section defined in the configuration file nested within `cmd`
"""
def __init__(self, section=None):
self.section = section
def __call__(self, config_path, config_env, cmd_names):
"""
Get resolved config based on the `file_path` for the configuration file,
`config_env` targeted inside the config file and corresponding `cmd_name`
as denoted by `click`.
:param config_path: The path of configuration file.
:param config_env: The name of the sectional config_env within configuration file.
:param cmd_names list(str): sam command name as defined by click
:returns dictionary containing the configuration parameters under specified config_env
"""
resolved_config = {}
# Use default sam config file name if config_path only contain the directory
config_file_path = (
Path(os.path.abspath(config_path)) if config_path else Path(os.getcwd(), DEFAULT_CONFIG_FILE_NAME)
)
config_file_name = config_file_path.name
config_file_dir = config_file_path.parents[0]
samconfig = SamConfig(config_file_dir, config_file_name)
# Enable debug level logging by environment variable "SAM_DEBUG"
if os.environ.get("SAM_DEBUG", "").lower() == "true":
LOG.setLevel(logging.DEBUG)
LOG.debug("Config file location: %s", samconfig.path())
if not samconfig.exists():
LOG.debug("Config file '%s' does not exist", samconfig.path())
return resolved_config
try:
LOG.debug(
"Loading configuration values from [%s.%s.%s] (env.command_name.section) in config file at '%s'...",
config_env,
cmd_names,
self.section,
samconfig.path(),
)
# NOTE(TheSriram): change from tomlkit table type to normal dictionary,
# so that click defaults work out of the box.
samconfig.sanity_check()
resolved_config = {k: v for k, v in samconfig.get_all(cmd_names, self.section, env=config_env).items()}
LOG.debug("Configuration values successfully loaded.")
LOG.debug("Configuration values are: %s", resolved_config)
except KeyError as ex:
LOG.debug(
"Error reading configuration from [%s.%s.%s] (env.command_name.section) "
"in configuration file at '%s' with : %s",
config_env,
cmd_names,
self.section,
samconfig.path(),
str(ex),
)
except SamConfigVersionException as ex:
LOG.debug("%s %s", samconfig.path(), str(ex))
raise ConfigException(f"Syntax invalid in samconfig.toml: {str(ex)}")
except Exception as ex:
LOG.debug("Error reading configuration file: %s %s", samconfig.path(), str(ex))
return resolved_config
def configuration_callback(cmd_name, option_name, saved_callback, provider, ctx, param, value):
"""
Callback for reading the config file.
Also takes care of calling user specified custom callback afterwards.
:param cmd_name: `sam` command name derived from click.
:param option_name: The name of the option. This is used for error messages.
:param saved_callback: User-specified callback to be called later.
:param provider: A callable that parses the configuration file and returns a dictionary
of the configuration parameters. Will be called as
`provider(file_path, config_env, cmd_name)`.
:param ctx: Click context
:param param: Click parameter
:param value: Specified value for config_env
:returns specified callback or the specified value for config_env.
"""
# ctx, param and value are default arguments for click specified callbacks.
ctx.default_map = ctx.default_map or {}
cmd_name = cmd_name or ctx.info_name
param.default = None
config_env_name = ctx.params.get("config_env") or DEFAULT_ENV
config_file = ctx.params.get("config_file") or DEFAULT_CONFIG_FILE_NAME
config_dir = getattr(ctx, "samconfig_dir", None) or os.getcwd()
# If --config-file is an absolute path, use it, if not, start from config_dir
config_file_name = config_file if os.path.isabs(config_file) else os.path.join(config_dir, config_file)
config = get_ctx_defaults(cmd_name, provider, ctx, config_env_name=config_env_name, config_file=config_file_name,)
ctx.default_map.update(config)
return saved_callback(ctx, param, config_env_name) if saved_callback else config_env_name
def get_ctx_defaults(cmd_name, provider, ctx, config_env_name, config_file=None):
"""
Get the set of the parameters that are needed to be set into the click command.
This function also figures out the command name by looking up current click context's parent
and constructing the parsed command name that is used in default configuration file.
If a given cmd_name is start-api, the parsed name is "local_start_api".
provider is called with `config_file`, `config_env_name` and `parsed_cmd_name`.
:param cmd_name: `sam` command name
:param provider: provider to be called for reading configuration file
:param ctx: Click context
:param config_env_name: config-env within configuration file, sam configuration file will be relative to the
supplied original template if its path is not specified
:param config_file: configuration file name
:return: dictionary of defaults for parameters
"""
return provider(config_file, config_env_name, get_cmd_names(cmd_name, ctx))
def configuration_option(*param_decls, **attrs):
"""
Adds configuration file support to a click application.
NOTE: This decorator should be added to the top of parameter chain, right below click.command, before
any options are declared.
Example:
>>> @click.command("hello")
@configuration_option(provider=TomlProvider(section="parameters"))
@click.option('--name', type=click.String)
def hello(name):
print("Hello " + name)
This will create a hidden click option whose callback function loads configuration parameters from default
configuration environment [default] in default configuration file [samconfig.toml] in the template file
directory.
:param preconfig_decorator_list: A list of click option decorator which need to place before this function. For
exmple, if we want to add option "--config-file" and "--config-env" to allow customized configuration file
and configuration environment, we will use configuration_option as below:
@configuration_option(
preconfig_decorator_list=[decorator_customize_config_file, decorator_customize_config_env],
provider=TomlProvider(section=CONFIG_SECTION),
)
By default, we enable these two options.
:param provider: A callable that parses the configuration file and returns a dictionary
of the configuration parameters. Will be called as
`provider(file_path, config_env, cmd_name)
"""
def decorator_configuration_setup(f):
configuration_setup_params = ()
configuration_setup_attrs = {}
configuration_setup_attrs[
"help"
] = "This is a hidden click option whose callback function loads configuration parameters."
configuration_setup_attrs["is_eager"] = True
configuration_setup_attrs["expose_value"] = False
configuration_setup_attrs["hidden"] = True
configuration_setup_attrs["type"] = click.STRING
provider = attrs.pop("provider")
saved_callback = attrs.pop("callback", None)
partial_callback = functools.partial(configuration_callback, None, None, saved_callback, provider)
configuration_setup_attrs["callback"] = partial_callback
return click.option(*configuration_setup_params, **configuration_setup_attrs)(f)
def composed_decorator(decorators):
def decorator(f):
for deco in decorators:
f = deco(f)
return f
return decorator
# Compose decorators here to make sure the context parameters are updated before callback function
decorator_list = [decorator_configuration_setup]
pre_config_decorators = attrs.pop(
"preconfig_decorator_list", [decorator_customize_config_file, decorator_customize_config_env]
)
for decorator in pre_config_decorators:
decorator_list.append(decorator)
return composed_decorator(decorator_list)
def decorator_customize_config_file(f):
"""
CLI option to customize configuration file name. By default it is 'samconfig.toml' in project directory.
Ex: --config-file samconfig.toml
:param f: Callback function passed by Click
:return: Callback function
"""
config_file_attrs = {}
config_file_param_decls = ("--config-file",)
config_file_attrs["help"] = (
"The path and file name of the configuration file containing default parameter values to use. "
"Its default value is 'samconfig.toml' in project directory. For more information about configuration files, "
"see: "
"https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-sam-cli-config.html."
)
config_file_attrs["default"] = "samconfig.toml"
config_file_attrs["is_eager"] = True
config_file_attrs["required"] = False
config_file_attrs["type"] = click.STRING
return click.option(*config_file_param_decls, **config_file_attrs)(f)
def decorator_customize_config_env(f):
"""
CLI option to customize configuration environment name. By default it is 'default'.
Ex: --config-env default
:param f: Callback function passed by Click
:return: Callback function
"""
config_env_attrs = {}
config_env_param_decls = ("--config-env",)
config_env_attrs["help"] = (
"The environment name specifying the default parameter values in the configuration file to use. "
"Its default value is 'default'. For more information about configuration files, see: "
"https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-sam-cli-config.html."
)
config_env_attrs["default"] = "default"
config_env_attrs["is_eager"] = True
config_env_attrs["required"] = False
config_env_attrs["type"] = click.STRING
return click.option(*config_env_param_decls, **config_env_attrs)(f)
# End section copied from [[click_config_file][https://github.com/phha/click_config_file/blob/master/click_config_file.py]
|
the-stack_106_28516 | #!/usr/bin/env python
"""
ml_fits2.py
Intermediate-level code for building models in scikit-learn and xgboost
For basic-level code, see ml_fits.py
"""
import argparse
import catboost
import lightgbm
import numpy as np
import os
import pandas as pd
import pickle
import warnings
import xgboost as xgb
from hyperopt import hp, fmin, tpe, STATUS_OK, Trials
from sklearn import ensemble, gaussian_process, kernel_ridge, linear_model, naive_bayes, neighbors, neural_network
from sklearn import datasets, multiclass, pipeline, preprocessing, svm, tree
from sklearn.gaussian_process.kernels import ConstantKernel, RBF
from sklearn.model_selection import cross_val_score, KFold
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score, make_scorer
from sklearn.metrics import accuracy_score, average_precision_score, f1_score, matthews_corrcoef, precision_score
from sklearn.metrics import recall_score, roc_auc_score
from sklearn.utils.multiclass import type_of_target
from typing import Any, Callable, Dict, List, Optional
warnings.filterwarnings('ignore')
RANDOM_STATE = 12345
gpr_kernel = ConstantKernel(
1.0, (1e-3, 1e3)) * RBF(10, (1e-2, 1e2))
regression_models = {
'cat': catboost.CatBoostRegressor(verbose=False),
'dtr': tree.DecisionTreeRegressor(),
'gbr': ensemble.GradientBoostingRegressor(),
'gpr': gaussian_process.GaussianProcessRegressor(),
'gpr2': gaussian_process.GaussianProcessRegressor(kernel=gpr_kernel),
'knr': neighbors.KNeighborsRegressor(),
'krr': kernel_ridge.KernelRidge(),
'lasso': linear_model.Lasso(),
'lgbm': lightgbm.LGBMRegressor(),
'linear': linear_model.LinearRegression(),
'logistic': linear_model.LogisticRegression(),
'nb': naive_bayes.GaussianNB(),
'nn': neural_network.MLPRegressor(hidden_layer_sizes=(100,), activation='relu', solver='adam'),
'svr': svm.SVR(),
'svr_rbf': svm.SVR(kernel='rbf', C=100, gamma=0.1, epsilon=.1),
'svr_lin': svm.SVR(kernel='linear', C=100, gamma='auto'),
'svr_poly': svm.SVR(kernel='poly', C=100, gamma='auto', degree=3, epsilon=.1, coef0=1),
'ridge': linear_model.Ridge(),
'rf': ensemble.RandomForestRegressor(n_estimators=100, criterion='mse'),
'xgb': xgb.XGBRegressor()
}
regression_scores = {
'MSE': mean_squared_error,
'MAE': mean_absolute_error,
'r2': r2_score
}
mae_scorer = make_scorer(mean_absolute_error, greater_is_better=False)
mse_scorer = make_scorer(mean_squared_error, greater_is_better=False)
r2_scorer = make_scorer(r2_score, greater_is_better=True)
regression_summary = {
'MSE': ['mean', 'std'],
'MAE': ['mean', 'std'],
'r2': ['mean', 'std']
}
classification_models = {
'sgd': linear_model.SGDClassifier(),
'ridge': linear_model.RidgeClassifier(),
'logistic': linear_model.LogisticRegression(multi_class='multinomial'),
'gnb': naive_bayes.GaussianNB(),
'knr': neighbors.KNeighborsClassifier(),
'mlp': neural_network.MLPClassifier(),
'dtc': tree.DecisionTreeClassifier(),
'rf': ensemble.RandomForestClassifier()
}
classification_scores = {
'acc': accuracy_score,
'avg_precision': average_precision_score,
'f1': f1_score,
'mcc': matthews_corrcoef,
'precision': precision_score,
'recall': recall_score,
'roc': roc_auc_score
}
classification_summary = {
'acc': ['mean', 'std'],
'avg_precision': ['mean', 'std'],
'f1': ['mean', 'std'],
'precision': ['mean', 'std'],
'recall': ['mean', 'std'],
'roc': ['mean', 'std']
}
multiclass_models = {
'sgd': linear_model.SGDClassifier(),
'ridge': linear_model.RidgeClassifier(),
'logistic': linear_model.LogisticRegression(multi_class='multinomial'),
'gnb': naive_bayes.GaussianNB(),
'knr': neighbors.KNeighborsClassifier(),
# 'mlp': neural_network.MLPClassifier(),
'dtc': tree.DecisionTreeClassifier(),
'rf': ensemble.RandomForestClassifier()
}
multiclass_scores = {
'acc': accuracy_score,
'avg_precision': average_precision_score,
}
multiclass_summary = {
'acc': ['mean', 'std'],
'avg_precision': ['mean', 'std'],
}
# ----- Scalers -----
scalers = {
'box_cos': preprocessing.PowerTransformer(method='box-cox', standardize=False),
'max_abs': preprocessing.MaxAbsScaler(),
'min_max': preprocessing.MinMaxScaler(),
'mm_quantile': pipeline.Pipeline([
('min_max', preprocessing.MinMaxScaler()),
('quantile', preprocessing.QuantileTransformer(
output_distribution='normal', random_state=RANDOM_STATE))
]),
'normalizer': preprocessing.Normalizer(),
'quantile': preprocessing.QuantileTransformer(),
'quantile1': preprocessing.QuantileTransformer(output_distribution='normal', random_state=RANDOM_STATE),
'robust': preprocessing.RobustScaler(),
'standard': preprocessing.StandardScaler(),
'yeo_johnson': preprocessing.PowerTransformer(method='yeo-johnson', standardize=True)
}
def load_df(filename: str):
_, ext = os.path.splitext(filename)
if ext == '.xlsx':
df = pd.read_excel(filename, index_col=0).dropna()
elif ext == '.csv':
df = pd.read_csv(filename, index_col=0).dropna()
else:
print('Unable to load file with unknown extension')
return None
return df
def df_to_xy(df: pd.DataFrame, target_col: int):
if target_col == -1:
X = df.iloc[:, :-1]._get_numeric_data()
y = df.iloc[:, -1]
else:
feature_cols = [x for x in df.columns if x != df.columns[target_col]]
X = df.loc[:, feature_cols]._get_numeric_data()
y = df.iloc[:, target_col]
return X.values, y.values
def load_xy(filename: str, target_col: int):
df = load_df(filename)
X, y = df_to_xy(df, target_col)
return X, y
def evaluate(model: Any, X_train: np.ndarray, y_train: np.ndarray,
X_test: np.ndarray, y_test: np.ndarray, scores: Dict[str, Any]):
obj = model.fit(X_train, y_train)
y_pred = obj.predict(X_test)
results = {}
for score_name, score_fn in scores.items():
try:
results[score_name] = score_fn(y_test, y_pred)
except ValueError as e:
# if e == 'multiclass format is not supported':
print(e)
results[score_name] = np.nan
return results
def run_kfold(X: np.ndarray, y: np.ndarray, models: Dict[str, Any],
scores: Dict[str, Any], n_splits: int = 10,
multiclass_strat: Optional[str] = ''):
kf = KFold(n_splits=n_splits, shuffle=True, random_state=12345)
results = []
for model_name, model in models.items():
print(f"----- {model_name} -----")
for i, (train_index, test_index) in enumerate(kf.split(X)):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
model2 = model
if multiclass_strat == 'ovr':
model2 = multiclass.OneVsRestClassifier(model)
elif multiclass_strat == 'ovo':
model2 = multiclass.OneVsOneClassifier(model)
temp_results = evaluate(
model2, X_train, y_train, X_test, y_test, scores)
print(f"Fold = {i}: {temp_results}")
temp_results['method'] = model_name
temp_results['fold'] = i
results.append(temp_results)
return pd.DataFrame(results)
def get_classification_model_from_params(params):
model = xgb.XGBClassifier(
# silent=False,
# scale_pos_weight=1,
max_depth=params['max_depth'],
learning_rate=params['learning_rate'],
# tree_method = 'gpu_hist',
# gpu_id=0,
n_estimators=params['n_estimators'],
gamma=params['gamma'],
min_child_weight=params['min_child_weight'],
subsample=params['subsample'],
colsample_bytree=params['colsample_bytree']
# objective='binary:logistic',
# reg_alpha = 0.3,
)
return model
def get_regression_model_from_params(params):
# https://xgboost.readthedocs.io/en/latest/python/python_api.html
model = xgb.XGBRegressor(
n_estimators=params['n_estimators'],
max_depth=params['max_depth'],
learning_rate=params['learning_rate'],
# verbosity=3, # (0=silent, 3=debug)
# objective='...',
# booster='...', # gbtree, gblinear or dart
# tree_method='auto',
# n_jobs=...,
gamma=params['gamma'],
min_child_weight=params['min_child_weight'],
# max_delta_step=...,
subsample=params['subsample'],
colsample_bytree=params['colsample_bytree']
# colsample_bylevel=...,
# reg_alpha = 0.3,
# reg_lambda=...,
# scale_pos_weight=1,
# base_score=...,
# random_state=...,
# gpu_id=None # gpu_id=0 for first GPU
)
return model
def loss_metric(params):
"""
Calculates a cross-validated loss metric
Use metric_sign=1, to get cv_scores.mean() in order to minimize
metric = mean_absolute_error, mean_squared_error
Use metric_sign=-1, to get -cv_scores.mean() in order to maximize
metric = accuracy_score, average_precision_score, f1_score, matthews_corrcoef, precision_score
recall_score, roc_auc_score, r2_score, ...
Set get_model_from_params to a function that takes params as an
input and returns a model
"""
global X_train, y_train, X_valid, y_valid, metric, metric_sign, Model
cv_scores = cross_val_score(
Model(**params),
X_train, y_train,
scoring=make_scorer(metric),
cv=5,
n_jobs=-1 # use all cores if possible
)
return {
'loss': metric_sign * cv_scores.mean(),
'status': STATUS_OK
}
def hyp_tune(my_fn, search_space, algo=tpe.suggest, max_evals=100, seed=12345):
"""
Hyperparamter tuning of a model
"""
global metric
trials = Trials()
result = fmin(
fn=my_fn,
space=search_space,
algo=algo,
trials=trials,
max_evals=max_evals,
rstate=np.random.RandomState(seed=seed)
)
df_x = pd.DataFrame(trials.idxs_vals[1])
loss = pd.DataFrame(trials.results)
df_r = pd.concat((df_x, loss), axis=1)
return result, trials, df_r
def results2df(results, trials):
df_x = pd.DataFrame(trials.idxs_vals[1])
loss = pd.DataFrame(trials.results)
df_r = pd.concat((df_x, loss), axis=1)
return df_r
def plot_results(df_r: pd.DataFrame):
_, ax = plt.subplots(1, 1, figsize=(12, 5))
ax.semilogy(df_r.index, df_r['loss'], marker='+', markeredgecolor='red', markersize=5, markeredgewidth=1);
ax.set_xlabel('Iteration number');
ax.set_ylabel('Loss');
ax.grid(True);
# ----- Machine Learning Models -----
def create_single_model(model: Any, X: np.ndarray, y: np.ndarray,
filename: Optional[str] = None):
obj = model.fit(X, y)
if filename is None:
filename = f'model.pickle'
with open(filename, 'wb') as f:
pickle.dump(obj, f)
print(f'Saved: {filename}')
def select_model_type(model_type, y):
global regression_models, regression_scores, regression_summary
global classification_models, classification_scores, classification_summary
global multiclass_models, multiclass_scores, multiclass_summary
multiclass_strat = ''
sort_by, ascending = 'acc', False
if model_type == 'auto':
model_type = type_of_target(y)
if model_type == 'multiclass' and len(np.unique(y)) > 20:
model_type = 'continuous'
print(f'Automatically selected model type: {model_type}')
if model_type == 'continuous':
# sort_by, ascending = 'MSE', True
sort_by, ascending = 'r2', False
models = regression_models
scores = regression_scores
summary = regression_summary
elif model_type == 'binary':
models = classification_models
scores = classification_scores
summary = classification_summary
elif model_type == 'multiclass':
models = multiclass_models
scores = multiclass_scores
summary = multiclass_summary
multiclass_strat = 'ovr'
y = preprocessing.label_binarize(y, classes=np.unique(y))
return multiclass_strat, sort_by, ascending, models, scores, summary, multiclass_strat, y
def run_ML(X: np.array, y: np.array,
model_type: str,
multiclass_strat: Optional[str] = None):
"""
Runs machine learning on all models specified in the parameter: models
and evaluates them on all metrics in the paramter: scores
"""
multiclass_strat, sort_by, ascending, models, scores, summary, multiclass_strat, y \
= select_model_type(model_type, y)
scaler = preprocessing.MinMaxScaler()
X_scaled = scaler.fit_transform(X)
df_raw = run_kfold(X_scaled, y, models, scores,
n_splits=5, multiclass_strat=multiclass_strat)
cols = ['fold', 'method'] + list(scores.keys())
df_raw = df_raw[cols]
print('----- results.csv (raw) -----')
print(df_raw)
df_raw.to_csv('results.csv')
print('----- summary.csv (data averaged across k-folds) -----')
df_summary = df_raw.groupby('method').agg(summary)
df_summary.to_csv('summary.csv')
df_summary = df_summary.sort_values((sort_by, 'mean'), ascending=ascending)
print(df_summary)
df_summary.to_excel('summary.xlsx')
return df_summary
def main(args):
if args.demo:
if args.task == 'regression':
X, y = datasets.load_diabetes(return_X_y=True)
elif args.task == 'classification':
X, y = datasets.load_breast_cancer(return_X_y=True)
elif args.task == 'multiclass':
X, y = datasets.load_digits(return_X_y=True)
else:
X, y = load_xy(args.file, args.target_col)
if args.task == 'regression':
df_summary = run_ML(X, y, model_type='continuous')
else:
df_summary = run_ML(X, y, model_type='auto')
best_model = df_summary.index[0]
print(f'Best model: {best_model}')
# create_single_model(best_model, X, y)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--file', type=str, help='Input .csv file')
parser.add_argument('--target_col', type=int, default=-1, help='Target Column')
parser.add_argument('--task', type=str, choices=['regression', 'classification', 'multiclass'])
parser.add_argument('--demo', action='store_true', help='Run classification or regression demo')
args = parser.parse_args()
main(args)
|
the-stack_106_28517 | import asyncio
import datetime
import functools
import json
import logging
import time
from collections import defaultdict
import discord
from discord.ext import commands
from tle.util import codeforces_common as cf_common
from tle.util import cache_system2
from tle.util import db
from tle.util import discord_common
from tle.util import paginator
from tle.util import ranklist as rl
from tle.util import table
_CONTESTS_PER_PAGE = 5
_CONTEST_PAGINATE_WAIT_TIME = 5 * 60
_STANDINGS_PER_PAGE = 15
_STANDINGS_PAGINATE_WAIT_TIME = 2 * 60
_FINISHED_CONTESTS_LIMIT = 5
class ContestCogError(commands.CommandError):
pass
def _get_formatted_contest_info(contest, tz):
start = datetime.datetime.fromtimestamp(contest.startTimeSeconds, tz)
start = f'{start.strftime("%d %b %y, %H:%M")} {tz}'
duration_days, duration_hrs, duration_mins, _ = cf_common.time_format(contest.durationSeconds)
duration = f'{duration_hrs}h {duration_mins}m'
if duration_days > 0:
duration = f'{duration_days}d ' + duration
return contest.name, str(contest.id), start, duration, contest.register_url
def _get_formatted_contest_desc(id_str, start, duration, url, max_duration_len):
em = '\N{EM QUAD}'
sq = '\N{WHITE SQUARE WITH UPPER RIGHT QUADRANT}'
desc = (f'`{em}{id_str}{em}|'
f'{em}{start}{em}|'
f'{em}{duration.rjust(max_duration_len, em)}{em}|'
f'{em}`[`link {sq}`]({url} "Link to contest page")')
return desc
def _get_embed_fields_from_contests(contests):
infos = []
for contest in contests:
info = _get_formatted_contest_info(contest, datetime.timezone.utc)
infos.append(info)
max_duration_len = max(len(duration) for _, _, _, duration, _ in infos)
fields = []
for name, id_str, start, duration, url in infos:
value = _get_formatted_contest_desc(id_str, start, duration, url, max_duration_len)
fields.append((name, value))
return fields
async def _send_reminder_at(channel, role, contests, before_secs, send_time):
delay = send_time - time.time()
if delay <= 0:
return
await asyncio.sleep(delay)
values = cf_common.time_format(before_secs)
def make(value, label):
tmp = f'{value} {label}'
return tmp if value == 1 else tmp + 's'
labels = 'day hr min sec'.split()
before_str = ' '.join(make(value, label) for label, value in zip(labels, values) if value > 0)
desc = f'About to start in {before_str}'
embed = discord_common.cf_color_embed(description=desc)
for name, value in _get_embed_fields_from_contests(contests):
embed.add_field(name=name, value=value)
await channel.send(role.mention, embed=embed)
class Contests(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.future_contests = None
self.active_contests = None
self.finished_contests = None
self.start_time_map = defaultdict(list)
self.task_map = defaultdict(list)
self.member_converter = commands.MemberConverter()
self.role_converter = commands.RoleConverter()
self.logger = logging.getLogger(self.__class__.__name__)
@commands.Cog.listener()
async def on_ready(self):
asyncio.create_task(self._updater_task())
async def _updater_task(self):
self.logger.info('Running Contests cog updater task')
while True:
try:
await cf_common.event_sys.wait_for('EVENT_CONTEST_LIST_REFRESH')
await self._reload()
except Exception:
self.logger.warning(f'Exception in Contests cog updater task, ignoring.', exc_info=True)
async def _reload(self):
contest_cache = cf_common.cache2.contest_cache
self.future_contests = contest_cache.get_contests_in_phase('BEFORE')
self.active_contests = (contest_cache.get_contests_in_phase('CODING') +
contest_cache.get_contests_in_phase('PENDING_SYSTEM_TEST') +
contest_cache.get_contests_in_phase('SYSTEM_TEST'))
self.finished_contests = contest_cache.get_contests_in_phase('FINISHED')
# Future contests already sorted by start time.
self.active_contests.sort(key=lambda contest: contest.startTimeSeconds)
self.finished_contests.sort(key=lambda contest: contest.end_time, reverse=True)
# Keep most recent _FINISHED_LIMIT
self.finished_contests = self.finished_contests[:_FINISHED_CONTESTS_LIMIT]
self.logger.info(f'Refreshed cache')
self.start_time_map.clear()
for contest in self.future_contests:
if not cf_common.is_nonstandard_contest(contest):
# Exclude non-standard contests from reminders.
self.start_time_map[contest.startTimeSeconds].append(contest)
self._reschedule_all_tasks()
def _reschedule_all_tasks(self):
for guild in self.bot.guilds:
self._reschedule_tasks(guild.id)
def _reschedule_tasks(self, guild_id):
for task in self.task_map[guild_id]:
task.cancel()
self.task_map[guild_id].clear()
self.logger.info(f'Tasks for guild {guild_id} cleared')
if not self.start_time_map:
return
try:
settings = cf_common.user_db.get_reminder_settings(guild_id)
except db.DatabaseDisabledError:
return
if settings is None:
return
channel_id, role_id, before = settings
channel_id, role_id, before = int(channel_id), int(role_id), json.loads(before)
guild = self.bot.get_guild(guild_id)
channel, role = guild.get_channel(channel_id), guild.get_role(role_id)
for start_time, contests in self.start_time_map.items():
for before_mins in before:
before_secs = 60 * before_mins
task = asyncio.create_task(
_send_reminder_at(channel, role, contests, before_secs, start_time - before_secs))
self.task_map[guild_id].append(task)
self.logger.info(f'{len(self.task_map[guild_id])} tasks scheduled for guild {guild_id}')
@staticmethod
def _make_contest_pages(contests, title):
pages = []
chunks = paginator.chunkify(contests, _CONTESTS_PER_PAGE)
for chunk in chunks:
embed = discord_common.cf_color_embed()
for name, value in _get_embed_fields_from_contests(chunk):
embed.add_field(name=name, value=value, inline=False)
pages.append((title, embed))
return pages
async def _send_contest_list(self, ctx, contests, *, title, empty_msg):
if contests is None:
raise ContestCogError('Contest list not present')
if len(contests) == 0:
await ctx.send(embed=discord_common.embed_neutral(empty_msg))
return
pages = self._make_contest_pages(contests, title)
paginator.paginate(self.bot, ctx.channel, pages, wait_time=_CONTEST_PAGINATE_WAIT_TIME,
set_pagenum_footers=True)
@commands.group(brief='Commands for listing contests',
invoke_without_command=True)
async def clist(self, ctx):
await ctx.send_help(ctx.command)
@clist.command(brief='List future contests')
async def future(self, ctx):
"""List future contests on Codeforces."""
await self._send_contest_list(ctx, self.future_contests,
title='Future contests on Codeforces',
empty_msg='No future contests scheduled')
@clist.command(brief='List active contests')
async def active(self, ctx):
"""List active contests on Codeforces, namely those in coding phase, pending system
test or in system test."""
await self._send_contest_list(ctx, self.active_contests,
title='Active contests on Codeforces',
empty_msg='No contests currently active')
@clist.command(brief='List recent finished contests')
async def finished(self, ctx):
"""List recently concluded contests on Codeforces."""
await self._send_contest_list(ctx, self.finished_contests,
title='Recently finished contests on Codeforces',
empty_msg='No finished contests found')
@commands.group(brief='Commands for contest reminders',
invoke_without_command=True)
async def remind(self, ctx):
await ctx.send_help('remind')
@remind.command(brief='Set reminder settings')
@commands.has_role('Admin')
async def here(self, ctx, role: discord.Role, *before: int):
"""Sets reminder channel to current channel, role to the given role, and reminder
times to the given values in minutes."""
if not role.mentionable:
raise ContestCogError('The role for reminders must be mentionable')
if not before or any(before_mins <= 0 for before_mins in before):
raise ContestCogError('Please provide valid `before` values')
before = sorted(before, reverse=True)
cf_common.user_db.set_reminder_settings(ctx.guild.id, ctx.channel.id, role.id, json.dumps(before))
await ctx.send(embed=discord_common.embed_success('Reminder settings saved successfully'))
self._reschedule_tasks(ctx.guild.id)
@remind.command(brief='Clear all reminder settings')
@commands.has_role('Admin')
async def clear(self, ctx):
cf_common.user_db.clear_reminder_settings(ctx.guild.id)
await ctx.send(embed=discord_common.embed_success('Reminder settings cleared'))
self._reschedule_tasks(ctx.guild.id)
@remind.command(brief='Show reminder settings')
async def settings(self, ctx):
"""Shows the role, channel and before time settings."""
settings = cf_common.user_db.get_reminder_settings(ctx.guild.id)
if settings is None:
await ctx.send(embed=discord_common.embed_neutral('Reminder not set'))
return
channel_id, role_id, before = settings
channel_id, role_id, before = int(channel_id), int(role_id), json.loads(before)
channel, role = ctx.guild.get_channel(channel_id), ctx.guild.get_role(role_id)
if channel is None:
raise ContestCogError('The channel set for reminders is no longer available')
if role is None:
raise ContestCogError('The role set for reminders is no longer available')
before_str = ', '.join(str(before_mins) for before_mins in before)
embed = discord_common.embed_success('Current reminder settings')
embed.add_field(name='Channel', value=channel.mention)
embed.add_field(name='Role', value=role.mention)
embed.add_field(name='Before', value=f'At {before_str} mins before contest')
await ctx.send(embed=embed)
@remind.command(brief='Subscribe to or unsubscribe from contest reminders',
usage='[not]')
async def me(self, ctx, arg: str = None):
settings = cf_common.user_db.get_reminder_settings(ctx.guild.id)
if settings is None:
raise ContestCogError('To use this command, reminder settings must be set by an admin')
_, role_id, _ = settings
role = ctx.guild.get_role(int(role_id))
if role is None:
raise ContestCogError('The role set for reminders is no longer available')
if arg is None:
if role in ctx.author.roles:
await ctx.send(embed=discord_common.embed_neutral('You are already subscribed to contest reminders'))
return
await ctx.author.add_roles(role, reason='User subscribed to contest reminders')
await ctx.send(embed=discord_common.embed_success('Successfully subscribed to contest reminders'))
elif arg == 'not':
if role not in ctx.author.roles:
await ctx.send(embed=discord_common.embed_neutral('You are not subscribed to contest reminders'))
return
await ctx.author.remove_roles(role, reason='User unsubscribed from contest reminders')
await ctx.send(embed=discord_common.embed_success('Successfully unsubscribed from contest reminders'))
@staticmethod
def _get_cf_or_ioi_standings_table(problem_indices, handle_standings, deltas=None, *, mode):
assert mode in ('cf', 'ioi')
def maybe_int(value):
return int(value) if mode == 'cf' else value
header_style = '{:>} {:<} {:^} ' + ' '.join(['{:^}'] * len(problem_indices))
body_style = '{:>} {:<} {:>} ' + ' '.join(['{:>}'] * len(problem_indices))
header = ['#', 'Handle', '='] + problem_indices
if deltas:
header_style += ' {:^}'
body_style += ' {:>}'
header += ['\N{INCREMENT}']
body = []
for handle, standing in handle_standings:
virtual = '#' if standing.party.participantType == 'VIRTUAL' else ''
tokens = [standing.rank, handle + ':' + virtual, maybe_int(standing.points)]
for problem_result in standing.problemResults:
score = ''
if problem_result.points:
score = str(maybe_int(problem_result.points))
tokens.append(score)
body.append(tokens)
if deltas:
for tokens, delta in zip(body, deltas):
tokens.append('' if delta is None else f'{delta:+}')
return header_style, body_style, header, body
@staticmethod
def _get_icpc_standings_table(problem_indices, handle_standings, deltas=None):
header_style = '{:>} {:<} {:^} {:^} ' + ' '.join(['{:^}'] * len(problem_indices))
body_style = '{:>} {:<} {:>} {:>} ' + ' '.join(['{:<}'] * len(problem_indices))
header = ['#', 'Handle', '=', '-'] + problem_indices
if deltas:
header_style += ' {:^}'
body_style += ' {:>}'
header += ['\N{INCREMENT}']
body = []
for handle, standing in handle_standings:
virtual = '#' if standing.party.participantType == 'VIRTUAL' else ''
tokens = [standing.rank, handle + ':' + virtual, int(standing.points), int(standing.penalty)]
for problem_result in standing.problemResults:
score = '+' if problem_result.points else ''
if problem_result.rejectedAttemptCount:
penalty = str(problem_result.rejectedAttemptCount)
if problem_result.points:
score += penalty
else:
score = '-' + penalty
tokens.append(score)
body.append(tokens)
if deltas:
for tokens, delta in zip(body, deltas):
tokens.append('' if delta is None else f'{delta:+}')
return header_style, body_style, header, body
def _make_standings_pages(self, contest, problem_indices, handle_standings, deltas=None):
pages = []
handle_standings_chunks = paginator.chunkify(handle_standings, _STANDINGS_PER_PAGE)
num_chunks = len(handle_standings_chunks)
delta_chunks = paginator.chunkify(deltas, _STANDINGS_PER_PAGE) if deltas else [None] * num_chunks
if contest.type == 'CF':
get_table = functools.partial(self._get_cf_or_ioi_standings_table, mode='cf')
elif contest.type == 'ICPC':
get_table = self._get_icpc_standings_table
elif contest.type == 'IOI':
get_table = functools.partial(self._get_cf_or_ioi_standings_table, mode='ioi')
else:
assert False, f'Unexpected contest type {contest.type}'
num_pages = 1
for handle_standings_chunk, delta_chunk in zip(handle_standings_chunks, delta_chunks):
header_style, body_style, header, body = get_table(problem_indices,
handle_standings_chunk,
delta_chunk)
t = table.Table(table.Style(header=header_style, body=body_style))
t += table.Header(*header)
t += table.Line('\N{EM DASH}')
for row in body:
t += table.Data(*row)
t += table.Line('\N{EM DASH}')
page_num_footer = f' # Page: {num_pages} / {num_chunks}' if num_chunks > 1 else ''
# We use yaml to get nice colors in the ranklist.
content = f'```yaml\n{t}\n{page_num_footer}```'
pages.append((content, None))
num_pages += 1
return pages
@commands.command(brief='Show ranklist for given handles and/or server members')
async def ranklist(self, ctx, contest_id: int, *handles: str):
"""Shows ranklist for the contest with given contest id. If handles contains
'+server', all server members are included. No handles defaults to '+server'.
"""
contest = cf_common.cache2.contest_cache.get_contest(contest_id)
wait_msg = None
try:
ranklist = cf_common.cache2.ranklist_cache.get_ranklist(contest)
deltas_status = 'Predicted'
except cache_system2.RanklistNotMonitored:
if contest.phase == 'BEFORE':
raise ContestCogError(f'Contest `{contest.id} | {contest.name}` has not started')
wait_msg = await ctx.send('Please wait...')
ranklist = await cf_common.cache2.ranklist_cache.generate_ranklist(contest.id,
fetch_changes=True)
deltas_status = 'Final'
handles = set(handles)
if not handles:
handles.add('+server')
if '+server' in handles:
handles.remove('+server')
guild_handles = [handle for discord_id, handle
in cf_common.user_db.get_handles_for_guild(ctx.guild.id)]
handles.update(guild_handles)
handles = await cf_common.resolve_handles(ctx, self.member_converter, handles, maxcnt=256)
handle_standings = []
for handle in handles:
try:
standing = ranklist.get_standing_row(handle)
except rl.HandleNotPresentError:
continue
handle_standings.append((handle, standing))
if not handle_standings:
raise ContestCogError(f'None of the handles are present in the ranklist of `{contest.name}`')
handle_standings.sort(key=lambda data: data[1].rank)
deltas = None
if ranklist.is_rated:
deltas = [ranklist.get_delta(handle) for handle, standing in handle_standings]
problem_indices = [problem.index for problem in ranklist.problems]
pages = self._make_standings_pages(contest, problem_indices, handle_standings, deltas)
embed = discord_common.cf_color_embed(title=contest.name, url=contest.url)
phase = contest.phase.capitalize().replace('_', ' ')
embed.add_field(name='Phase', value=phase)
if ranklist.is_rated:
embed.add_field(name='Deltas', value=deltas_status)
if wait_msg:
try:
await wait_msg.delete()
except:
pass
await ctx.send(embed=embed)
paginator.paginate(self.bot, ctx.channel, pages, wait_time=_STANDINGS_PAGINATE_WAIT_TIME)
async def cog_command_error(self, ctx, error):
if isinstance(error, (ContestCogError, rl.RanklistError, cache_system2.CacheError)):
await ctx.send(embed=discord_common.embed_alert(error))
error.handled = True
return
await cf_common.resolve_handle_error_handler(ctx, error)
def setup(bot):
bot.add_cog(Contests(bot))
|
the-stack_106_28518 | import numpy as np
from matplotlib import pyplot as plt
n_tokens = 100
fs = 0.5
x = np.arange(0, n_tokens, 0.1) * fs
pos_enc = np.sin(x)
print(pos_enc.shape)
plt.plot(x, pos_enc)
plt.xlabel('Embedding Dimensions')
plt.ylabel('Token Position')
plt.show()
|
the-stack_106_28520 | # Copyright 2014 Google Inc. All Rights Reserved.
"""Command for getting a target pool's health."""
from googlecloudsdk.compute.lib import base_classes
from googlecloudsdk.compute.lib import request_helper
from googlecloudsdk.compute.lib import utils
class GetHealth(base_classes.BaseCommand):
"""Get the health of instances in a target pool."""
@staticmethod
def Args(parser):
base_classes.AddFieldsFlag(parser, 'targetPoolInstanceHealth')
utils.AddRegionFlag(
parser,
resource_type='target pool',
operation_type='get health information for')
parser.add_argument(
'name',
help='The name of the target pool.')
@property
def service(self):
return self.compute.targetPools
@property
def resource_type(self):
return 'targetPoolInstanceHealth'
def GetTargetPool(self):
"""Fetches the target pool resource."""
errors = []
objects = list(request_helper.MakeRequests(
requests=[(self.service,
'Get',
self.messages.ComputeTargetPoolsGetRequest(
project=self.project,
region=self.target_pool_ref.region,
targetPool=self.target_pool_ref.Name()))],
http=self.http,
batch_url=self.batch_url,
errors=errors,
custom_get_requests=None))
if errors:
utils.RaiseToolException(
errors,
error_message='Could not fetch target pool:')
return objects[0]
def Run(self, args):
"""Returns a list of TargetPoolInstanceHealth objects."""
self.target_pool_ref = self.CreateRegionalReference(
args.name, args.region, resource_type='targetPools')
target_pool = self.GetTargetPool()
instances = target_pool.instances
# If the target pool has no instances, we should return an empty
# list.
if not instances:
return
requests = []
for instance in instances:
request_message = self.messages.ComputeTargetPoolsGetHealthRequest(
instanceReference=self.messages.InstanceReference(
instance=instance),
project=self.project,
region=self.target_pool_ref.region,
targetPool=self.target_pool_ref.Name())
requests.append((self.service, 'GetHealth', request_message))
errors = []
resources = request_helper.MakeRequests(
requests=requests,
http=self.http,
batch_url=self.batch_url,
errors=errors,
custom_get_requests=None)
for resource in resources:
yield resource
if errors:
utils.RaiseToolException(
errors,
error_message='Could not get health for some targets:')
GetHealth.detailed_help = {
'brief': 'Get the health of instances in a target pool',
'DESCRIPTION': """\
*{command}* displays the health of instances in a target pool.
""",
}
|
the-stack_106_28521 | import asyncio
import contextlib
from datetime import datetime
from functools import wraps
import hashlib
import json
import logging
import multiprocessing
import os
from pathlib import Path
import platform
import sys
import textwrap
import typing
from typing import Any, Callable, Dict, List, Optional, Text
import uuid
import requests
from terminaltables import SingleTable
import rasa
from rasa import model
from rasa.constants import (
CONFIG_FILE_TELEMETRY_KEY,
CONFIG_TELEMETRY_DATE,
CONFIG_TELEMETRY_ENABLED,
CONFIG_TELEMETRY_ID,
)
from rasa.engine.storage.local_model_storage import LocalModelStorage
from rasa.shared.constants import DOCS_URL_TELEMETRY
from rasa.shared.exceptions import RasaException
import rasa.shared.utils.io
from rasa.utils import common as rasa_utils
import rasa.utils.io
if typing.TYPE_CHECKING:
from rasa.core.brokers.broker import EventBroker
from rasa.core.tracker_store import TrackerStore
from rasa.core.channels.channel import InputChannel
from rasa.core.agent import Agent
from rasa.shared.nlu.training_data.training_data import TrainingData
from rasa.shared.importers.importer import TrainingDataImporter
from rasa.core.utils import AvailableEndpoints
logger = logging.getLogger(__name__)
SEGMENT_ENDPOINT = "https://api.segment.io/v1/track"
SEGMENT_REQUEST_TIMEOUT = 5 # seconds
TELEMETRY_ENABLED_ENVIRONMENT_VARIABLE = "RASA_TELEMETRY_ENABLED"
TELEMETRY_DEBUG_ENVIRONMENT_VARIABLE = "RASA_TELEMETRY_DEBUG"
# the environment variable can be used for local development to set a test key
# e.g. `RASA_TELEMETRY_WRITE_KEY=12354 rasa train`
TELEMETRY_WRITE_KEY_ENVIRONMENT_VARIABLE = "RASA_TELEMETRY_WRITE_KEY"
EXCEPTION_WRITE_KEY_ENVIRONMENT_VARIABLE = "RASA_EXCEPTION_WRITE_KEY"
TELEMETRY_ID = "metrics_id"
TELEMETRY_ENABLED_BY_DEFAULT = True
# if one of these environment variables is set, we assume to be running in CI env
CI_ENVIRONMENT_TELL = [
"bamboo.buildKey",
"BUILD_ID",
"BUILD_NUMBER",
"BUILDKITE",
"CI",
"CIRCLECI",
"CONTINUOUS_INTEGRATION",
"GITHUB_ACTIONS",
"HUDSON_URL",
"JENKINS_URL",
"TEAMCITY_VERSION",
"TRAVIS",
"CODEBUILD_BUILD_ARN",
"CODEBUILD_BUILD_ID",
"CODEBUILD_BATCH_BUILD_IDENTIFIER",
]
# If updating or creating a new event, remember to update
# https://rasa.com/docs/rasa/telemetry
TRAINING_STARTED_EVENT = "Training Started"
TRAINING_COMPLETED_EVENT = "Training Completed"
TELEMETRY_DISABLED_EVENT = "Telemetry Disabled"
TELEMETRY_DATA_SPLIT_EVENT = "Training Data Split"
TELEMETRY_DATA_VALIDATED_EVENT = "Training Data Validated"
TELEMETRY_DATA_CONVERTED_EVENT = "Training Data Converted"
TELEMETRY_TRACKER_EXPORTED_EVENT = "Tracker Exported"
TELEMETRY_INTERACTIVE_LEARNING_STARTED_EVENT = "Interactive Learning Started"
TELEMETRY_SERVER_STARTED_EVENT = "Server Started"
TELEMETRY_PROJECT_CREATED_EVENT = "Project Created"
TELEMETRY_SHELL_STARTED_EVENT = "Shell Started"
TELEMETRY_RASA_X_LOCAL_STARTED_EVENT = "Rasa X Local Started"
TELEMETRY_VISUALIZATION_STARTED_EVENT = "Story Visualization Started"
TELEMETRY_TEST_CORE_EVENT = "Model Core Tested"
TELEMETRY_TEST_NLU_EVENT = "Model NLU Tested"
TELEMETRY_MARKERS_EXTRACTION_INITIATED_EVENT = "Markers Extraction Initiated"
TELEMETRY_MARKERS_EXTRACTED_EVENT = "Markers Extracted"
TELEMETRY_MARKERS_STATS_COMPUTED_EVENT = "Markers Statistics Computed"
TELEMETRY_MARKERS_PARSED_COUNT = "Markers Parsed"
# used to calculate the context on the first call and cache it afterwards
TELEMETRY_CONTEXT = None
def print_telemetry_reporting_info() -> None:
"""Print telemetry information to std out."""
message = textwrap.dedent(
f"""
Rasa Open Source reports anonymous usage telemetry to help improve the product
for all its users.
If you'd like to opt-out, you can use `rasa telemetry disable`.
To learn more, check out {DOCS_URL_TELEMETRY}."""
).strip()
table = SingleTable([[message]])
print(table.table)
def _default_telemetry_configuration(is_enabled: bool) -> Dict[Text, Any]:
return {
CONFIG_TELEMETRY_ENABLED: is_enabled,
CONFIG_TELEMETRY_ID: uuid.uuid4().hex,
CONFIG_TELEMETRY_DATE: datetime.now(),
}
def _write_default_telemetry_configuration(
is_enabled: bool = TELEMETRY_ENABLED_BY_DEFAULT,
) -> bool:
new_config = _default_telemetry_configuration(is_enabled)
success = rasa_utils.write_global_config_value(
CONFIG_FILE_TELEMETRY_KEY, new_config
)
# Do not show info if user has enabled/disabled telemetry via env var
telemetry_environ = os.environ.get(TELEMETRY_ENABLED_ENVIRONMENT_VARIABLE)
if is_enabled and success and telemetry_environ is None:
print_telemetry_reporting_info()
return success
def _is_telemetry_enabled_in_configuration() -> bool:
"""Read telemetry configuration from the user's Rasa config file in $HOME.
Creates a default configuration if no configuration exists.
Returns:
`True`, if telemetry is enabled, `False` otherwise.
"""
try:
stored_config = rasa_utils.read_global_config_value(
CONFIG_FILE_TELEMETRY_KEY, unavailable_ok=False
)
return stored_config[CONFIG_TELEMETRY_ENABLED]
except ValueError as e:
logger.debug(f"Could not read telemetry settings from configuration file: {e}")
# seems like there is no config, we'll create one and enable telemetry
success = _write_default_telemetry_configuration()
# if writing the configuration failed, telemetry will be disabled
return TELEMETRY_ENABLED_BY_DEFAULT and success
def is_telemetry_enabled() -> bool:
"""Check if telemetry is enabled either in configuration or environment.
Returns:
`True`, if telemetry is enabled, `False` otherwise.
"""
telemetry_environ = os.environ.get(TELEMETRY_ENABLED_ENVIRONMENT_VARIABLE)
if telemetry_environ is not None:
return telemetry_environ.lower() == "true"
try:
return rasa_utils.read_global_config_value(
CONFIG_FILE_TELEMETRY_KEY, unavailable_ok=False
)[CONFIG_TELEMETRY_ENABLED]
except ValueError:
return False
def initialize_telemetry() -> bool:
"""Read telemetry configuration from the user's Rasa config file in $HOME.
Creates a default configuration if no configuration exists.
Returns:
`True`, if telemetry is enabled, `False` otherwise.
"""
try:
# calling this even if the environment variable is set makes sure the
# configuration is created and there is a telemetry ID
is_enabled_in_configuration = _is_telemetry_enabled_in_configuration()
telemetry_environ = os.environ.get(TELEMETRY_ENABLED_ENVIRONMENT_VARIABLE)
if telemetry_environ is None:
return is_enabled_in_configuration
return telemetry_environ.lower() == "true"
except Exception as e: # skipcq:PYL-W0703
logger.exception(
f"Failed to initialize telemetry reporting: {e}."
f"Telemetry reporting will be disabled."
)
return False
def ensure_telemetry_enabled(f: Callable[..., Any]) -> Callable[..., Any]:
"""Function decorator for telemetry functions that ensures telemetry is enabled.
WARNING: does not work as a decorator for async generators.
Args:
f: function to call if telemetry is enabled
Returns:
Return wrapped function
"""
# allows us to use the decorator for async and non async functions
if asyncio.iscoroutinefunction(f):
@wraps(f)
async def decorated_coroutine(*args: Any, **kwargs: Any) -> Any:
if is_telemetry_enabled():
return await f(*args, **kwargs)
return None
return decorated_coroutine
@wraps(f)
def decorated(*args: Any, **kwargs: Any) -> Any:
if is_telemetry_enabled():
return f(*args, **kwargs)
return None
return decorated
def _fetch_write_key(tool: Text, environment_variable: Text) -> Optional[Text]:
"""Read the write key from a tool from our set of keys.
Args:
tool: name of the tool we want to fetch a key for
environment_variable: name of the environment variable to set the key
Returns:
write key, if a key was present.
"""
import pkg_resources
from rasa import __name__ as name
if os.environ.get(environment_variable):
# a write key set using the environment variable will always
# overwrite any key provided as part of the package (`keys` file)
return os.environ.get(environment_variable)
write_key_path = pkg_resources.resource_filename(name, "keys")
# noinspection PyBroadException
try:
with open(write_key_path) as f:
return json.load(f).get(tool)
except Exception: # skipcq:PYL-W0703
return None
def telemetry_write_key() -> Optional[Text]:
"""Read the Segment write key from the segment key text file.
The segment key text file should by present only in wheel/sdist packaged
versions of Rasa Open Source. This avoids running telemetry locally when
developing on Rasa or when running CI builds.
In local development, this should always return `None` to avoid logging telemetry.
Returns:
Segment write key, if the key file was present.
"""
return _fetch_write_key("segment", TELEMETRY_WRITE_KEY_ENVIRONMENT_VARIABLE)
def sentry_write_key() -> Optional[Text]:
"""Read the sentry write key from the sentry key text file.
Returns:
Sentry write key, if the key file was present.
"""
return _fetch_write_key("sentry", EXCEPTION_WRITE_KEY_ENVIRONMENT_VARIABLE)
def _encode_base64(original: Text, encoding: Text = "utf-8") -> Text:
"""Encodes a string as a base64 string.
Args:
original: Text to be encoded.
encoding: Encoding used to convert text to binary.
Returns:
Encoded text.
"""
import base64
return base64.b64encode(original.encode(encoding)).decode(encoding)
def segment_request_header(write_key: Text) -> Dict[Text, Any]:
"""Use a segment write key to create authentication headers for the segment API.
Args:
write_key: Authentication key for segment.
Returns:
Authentication headers for segment.
"""
return {
"Authorization": "Basic {}".format(_encode_base64(write_key + ":")),
"Content-Type": "application/json",
}
def segment_request_payload(
distinct_id: Text,
event_name: Text,
properties: Dict[Text, Any],
context: Dict[Text, Any],
) -> Dict[Text, Any]:
"""Compose a valid payload for the segment API.
Args:
distinct_id: Unique telemetry ID.
event_name: Name of the event.
properties: Values to report along the event.
context: Context information about the event.
Returns:
Valid segment payload.
"""
return {
"userId": distinct_id,
"event": event_name,
"properties": properties,
"context": context,
}
def in_continuous_integration() -> bool:
"""Returns `True` if currently running inside a continuous integration context."""
return any(env in os.environ for env in CI_ENVIRONMENT_TELL)
def _is_telemetry_debug_enabled() -> bool:
"""Check if telemetry debug mode is enabled."""
return (
os.environ.get(TELEMETRY_DEBUG_ENVIRONMENT_VARIABLE, "false").lower() == "true"
)
def print_telemetry_event(payload: Dict[Text, Any]) -> None:
"""Print a telemetry events payload to the commandline.
Args:
payload: payload of the event
"""
print("Telemetry Event:")
print(json.dumps(payload, indent=2))
def _send_event(
distinct_id: Text,
event_name: Text,
properties: Dict[Text, Any],
context: Dict[Text, Any],
) -> None:
"""Report the contents segmentof an event to the /track Segment endpoint.
Documentation: https://.com/docs/sources/server/http/
Do not call this function from outside telemetry.py! This function does not
check if telemetry is enabled or not.
Args:
distinct_id: Unique telemetry ID.
event_name: Name of the event.
properties: Values to report along the event.
context: Context information about the event.
"""
payload = segment_request_payload(distinct_id, event_name, properties, context)
if _is_telemetry_debug_enabled():
print_telemetry_event(payload)
return
write_key = telemetry_write_key()
if not write_key:
# If TELEMETRY_WRITE_KEY is empty or `None`, telemetry has not been
# enabled for this build (e.g. because it is running from source)
logger.debug("Skipping request to external service: telemetry key not set.")
return
headers = segment_request_header(write_key)
resp = requests.post(
SEGMENT_ENDPOINT, headers=headers, json=payload, timeout=SEGMENT_REQUEST_TIMEOUT
)
# handle different failure cases
if resp.status_code != 200:
logger.debug(
f"Segment telemetry request returned a {resp.status_code} response. "
f"Body: {resp.text}"
)
else:
data = resp.json()
if not data.get("success"):
logger.debug(
f"Segment telemetry request returned a failure. Response: {data}"
)
def _hash_directory_path(path: Text) -> Optional[Text]:
"""Create a hash for the directory.
Returns:
hash of the directories path
"""
full_path = Path(path).absolute()
return hashlib.sha256(str(full_path).encode("utf-8")).hexdigest()
# noinspection PyBroadException
def _is_docker() -> bool:
"""Guess if we are running in docker environment.
Returns:
`True` if we are running inside docker, `False` otherwise.
"""
# first we try to use the env
try:
os.stat("/.dockerenv")
return True
except Exception: # skipcq:PYL-W0703
pass
# if that didn't work, try to use proc information
try:
return "docker" in rasa.shared.utils.io.read_file("/proc/self/cgroup", "utf8")
except Exception: # skipcq:PYL-W0703
return False
def with_default_context_fields(
context: Optional[Dict[Text, Any]] = None
) -> Dict[Text, Any]:
"""Return a new context dictionary with default and provided field values merged.
The default fields contain only the OS information for now.
Args:
context: Context information about the event.
Return:
A new context.
"""
context = context or {}
return {**_default_context_fields(), **context}
def _default_context_fields() -> Dict[Text, Any]:
"""Return a dictionary that contains the default context values.
Return:
A new context containing information about the runtime environment.
"""
global TELEMETRY_CONTEXT
if not TELEMETRY_CONTEXT:
# Make sure to update the example in docs/docs/telemetry/telemetry.mdx
# if you change / add context
TELEMETRY_CONTEXT = {
"os": {"name": platform.system(), "version": platform.release()},
"ci": in_continuous_integration(),
"project": model.project_fingerprint(),
"directory": _hash_directory_path(os.getcwd()),
"python": sys.version.split(" ")[0],
"rasa_open_source": rasa.__version__,
"cpu": multiprocessing.cpu_count(),
"docker": _is_docker(),
}
# avoid returning the cached dict --> caller could modify the dictionary...
# usually we would use `lru_cache`, but that doesn't return a dict copy and
# doesn't work on inner functions, so we need to roll our own caching...
return TELEMETRY_CONTEXT.copy()
def _track(
event_name: Text,
properties: Optional[Dict[Text, Any]] = None,
context: Optional[Dict[Text, Any]] = None,
) -> None:
"""Tracks a telemetry event.
It is OK to use this function from outside telemetry.py, but note that it
is recommended to create a new track_xyz() function for complex telemetry
events, or events that are generated from many parts of the Rasa Open Source code.
Args:
event_name: Name of the event.
properties: Dictionary containing the event's properties.
context: Dictionary containing some context for this event.
"""
try:
telemetry_id = get_telemetry_id()
if not telemetry_id:
logger.debug("Will not report telemetry events as no ID was found.")
return
if not properties:
properties = {}
properties[TELEMETRY_ID] = telemetry_id
_send_event(
telemetry_id, event_name, properties, with_default_context_fields(context)
)
except Exception as e: # skipcq:PYL-W0703
logger.debug(f"Skipping telemetry reporting: {e}")
def get_telemetry_id() -> Optional[Text]:
"""Return the unique telemetry identifier for this Rasa Open Source install.
The identifier can be any string, but it should be a UUID.
Returns:
The identifier, if it is configured correctly.
"""
try:
telemetry_config = (
rasa_utils.read_global_config_value(CONFIG_FILE_TELEMETRY_KEY) or {}
)
return telemetry_config.get(CONFIG_TELEMETRY_ID)
except Exception as e: # skipcq:PYL-W0703
logger.debug(f"Unable to retrieve telemetry ID: {e}")
return None
def toggle_telemetry_reporting(is_enabled: bool) -> None:
"""Write to the configuration if telemetry tracking should be enabled or disabled.
Args:
is_enabled: `True` if the telemetry reporting should be enabled,
`False` otherwise.
"""
configuration = rasa_utils.read_global_config_value(CONFIG_FILE_TELEMETRY_KEY)
if configuration:
configuration[CONFIG_TELEMETRY_ENABLED] = is_enabled
else:
configuration = _default_telemetry_configuration(is_enabled)
rasa_utils.write_global_config_value(CONFIG_FILE_TELEMETRY_KEY, configuration)
def filter_errors(
event: Optional[Dict[Text, Any]], hint: Optional[Dict[Text, Any]] = None
) -> Optional[Dict[Text, Any]]:
"""Filter errors.
Args:
event: event to be logged to sentry
hint: some hinting information sent alongside of the event
Returns:
the event without any sensitive / PII data or `None` if the event constitutes
an `ImportError` which should be discarded.
"""
if hint and "exc_info" in hint:
exc_type, exc_value, tb = hint["exc_info"]
if isinstance(exc_value, ImportError):
return None
return event
def before_send(
event: Dict[Text, Any], _unused_hint: Optional[Dict[Text, Any]] = None
) -> Optional[Dict[Text, Any]]:
"""Strips the sensitive data and filters errors before sending to sentry.
Args:
event: event to be logged to sentry
_unused_hint: some hinting information sent alongside of the event
Returns:
the event without any sensitive / PII data or `None` if the event should
be discarded.
"""
cleaned_event = strip_sensitive_data_from_sentry_event(event, _unused_hint)
return filter_errors(cleaned_event, _unused_hint)
def strip_sensitive_data_from_sentry_event(
event: Dict[Text, Any], _unused_hint: Optional[Dict[Text, Any]] = None
) -> Optional[Dict[Text, Any]]:
"""Remove any sensitive data from the event (e.g. path names).
Args:
event: event to be logged to sentry
_unused_hint: some hinting information sent alongside of the event
Returns:
the event without any sensitive / PII data or `None` if the event should
be discarded.
"""
# removes any paths from stack traces (avoids e.g. sending
# a users home directory name if package is installed there)
for value in event.get("exception", {}).get("values", []):
for frame in value.get("stacktrace", {}).get("frames", []):
frame["abs_path"] = ""
if f"rasa_sdk{os.path.sep}executor.py" in frame["filename"]:
# this looks a lot like an exception in the SDK and hence custom code
# no need for us to deal with that
return None
elif "site-packages" in frame["filename"]:
# drop site-packages and following slash / backslash
relative_name = frame["filename"].split("site-packages")[-1][1:]
frame["filename"] = os.path.join("site-packages", relative_name)
elif "dist-packages" in frame["filename"]:
# drop dist-packages and following slash / backslash
relative_name = frame["filename"].split("dist-packages")[-1][1:]
frame["filename"] = os.path.join("dist-packages", relative_name)
elif os.path.isabs(frame["filename"]):
# if the file path is absolute, we'll drop the whole event as this is
# very likely custom code. needs to happen after cleaning as
# site-packages / dist-packages paths are also absolute, but fine.
return None
return event
@ensure_telemetry_enabled
def initialize_error_reporting() -> None:
"""Sets up automated error reporting.
Exceptions are reported to sentry. We avoid sending any metadata (local
variables, paths, ...) to make sure we don't compromise any data. Only the
exception and its stacktrace is logged and only if the exception origins
from the `rasa` package.
"""
import sentry_sdk
from sentry_sdk import configure_scope
from sentry_sdk.integrations.atexit import AtexitIntegration
from sentry_sdk.integrations.dedupe import DedupeIntegration
from sentry_sdk.integrations.excepthook import ExcepthookIntegration
# key for local testing can be found at
# https://sentry.io/settings/rasahq/projects/rasa-open-source/install/python/
# for local testing, set the key using `RASA_EXCEPTION_WRITE_KEY=key rasa <command>`
key = sentry_write_key()
if not key:
return
telemetry_id = get_telemetry_id()
# this is a very defensive configuration, avoiding as many integrations as
# possible. it also submits very little data (exception with error message
# and line numbers).
sentry_sdk.init(
f"https://{key}.ingest.sentry.io/2801673",
before_send=before_send,
integrations=[
ExcepthookIntegration(),
DedupeIntegration(),
AtexitIntegration(lambda _, __: None),
],
send_default_pii=False, # activate PII filter
server_name=telemetry_id or "UNKNOWN",
ignore_errors=[
# std lib errors
KeyboardInterrupt, # user hit the interrupt key (Ctrl+C)
MemoryError, # machine is running out of memory
NotImplementedError, # user is using a feature that is not implemented
asyncio.CancelledError, # an async operation has been cancelled by the user
# expected Rasa errors
RasaException,
OSError,
],
in_app_include=["rasa"], # only submit errors in this package
with_locals=False, # don't submit local variables
release=f"rasa-{rasa.__version__}",
default_integrations=False,
environment="development" if in_continuous_integration() else "production",
)
if not telemetry_id:
return
with configure_scope() as scope:
# sentry added these more recently, just a protection in a case where a
# user has installed an older version of sentry
if hasattr(scope, "set_user"):
scope.set_user({"id": telemetry_id})
default_context = _default_context_fields()
if hasattr(scope, "set_context"):
if "os" in default_context:
# os is a nested dict, hence we report it separately
scope.set_context("Operating System", default_context.pop("os"))
scope.set_context("Environment", default_context)
@contextlib.contextmanager
def track_model_training(
training_data: "TrainingDataImporter", model_type: Text, is_finetuning: bool = False
) -> typing.Generator[None, None, None]:
"""Track a model training started.
WARNING: since this is a generator, it can't use the ensure telemetry
decorator. We need to manually add these checks here. This can be
fixed as soon as we drop python 3.6 support.
Args:
training_data: Training data used for the training.
model_type: Specifies the type of training, should be either "rasa", "core"
or "nlu".
is_finetuning: `True` if the model is trained by finetuning another model.
"""
if not initialize_telemetry():
# telemetry reporting is disabled. we won't do any reporting
yield # runs the training
return
config = training_data.get_config()
stories = training_data.get_stories()
nlu_data = training_data.get_nlu_data()
domain = training_data.get_domain()
count_conditional_responses = domain.count_conditional_response_variations()
(
count_total_mappings,
count_custom_mappings,
count_conditional_mappings,
) = domain.count_slot_mapping_statistics()
training_id = uuid.uuid4().hex
# Make sure to update the example in docs/docs/telemetry/telemetry.mdx
# if you change / add any properties
_track(
TRAINING_STARTED_EVENT,
{
"language": config.get("language"),
"training_id": training_id,
"type": model_type,
"pipeline": config.get("pipeline"),
"policies": config.get("policies"),
"train_schema": config.get("train_schema"),
"predict_schema": config.get("predict_schema"),
"num_intent_examples": len(nlu_data.intent_examples),
"num_entity_examples": len(nlu_data.entity_examples),
"num_actions": len(domain.action_names_or_texts),
# Old nomenclature from when 'responses' were still called
# 'templates' in the domain
"num_templates": len(domain.responses),
"num_conditional_response_variations": count_conditional_responses,
"num_slot_mappings": count_total_mappings,
"num_custom_slot_mappings": count_custom_mappings,
"num_conditional_slot_mappings": count_conditional_mappings,
"num_slots": len(domain.slots),
"num_forms": len(domain.forms),
"num_intents": len(domain.intents),
"num_entities": len(domain.entities),
"num_story_steps": len(stories.story_steps),
"num_lookup_tables": len(nlu_data.lookup_tables),
"num_synonyms": len(nlu_data.entity_synonyms),
"num_regexes": len(nlu_data.regex_features),
"is_finetuning": is_finetuning,
"recipe": config.get("recipe"),
},
)
start = datetime.now()
yield
runtime = datetime.now() - start
_track(
TRAINING_COMPLETED_EVENT,
{
"training_id": training_id,
"type": model_type,
"runtime": int(runtime.total_seconds()),
},
)
@ensure_telemetry_enabled
def track_telemetry_disabled() -> None:
"""Track when a user disables telemetry."""
_track(TELEMETRY_DISABLED_EVENT)
@ensure_telemetry_enabled
def track_data_split(fraction: float, data_type: Text) -> None:
"""Track when a user splits data.
Args:
fraction: How much data goes into train and how much goes into test
data_type: Is this core, nlu or nlg data
"""
_track(TELEMETRY_DATA_SPLIT_EVENT, {"fraction": fraction, "type": data_type})
@ensure_telemetry_enabled
def track_validate_files(validation_success: bool) -> None:
"""Track when a user validates data files.
Args:
validation_success: Whether the validation was successful
"""
_track(TELEMETRY_DATA_VALIDATED_EVENT, {"validation_success": validation_success})
@ensure_telemetry_enabled
def track_data_convert(output_format: Text, data_type: Text) -> None:
"""Track when a user converts data.
Args:
output_format: Target format for the converter
data_type: Is this core, nlu or nlg data
"""
_track(
TELEMETRY_DATA_CONVERTED_EVENT,
{"output_format": output_format, "type": data_type},
)
@ensure_telemetry_enabled
def track_tracker_export(
number_of_exported_events: int,
tracker_store: "TrackerStore",
event_broker: "EventBroker",
) -> None:
"""Track when a user exports trackers.
Args:
number_of_exported_events: Number of events that got exported
tracker_store: Store used to retrieve the events from
event_broker: Broker the events are getting published towards
"""
_track(
TELEMETRY_TRACKER_EXPORTED_EVENT,
{
"number_of_exported_events": number_of_exported_events,
"tracker_store": type(tracker_store).__name__,
"event_broker": type(event_broker).__name__,
},
)
@ensure_telemetry_enabled
def track_interactive_learning_start(
skip_visualization: bool, save_in_e2e: bool
) -> None:
"""Track when a user starts an interactive learning session.
Args:
skip_visualization: Is visualization skipped in this session
save_in_e2e: Is e2e used in this session
"""
_track(
TELEMETRY_INTERACTIVE_LEARNING_STARTED_EVENT,
{"skip_visualization": skip_visualization, "save_in_e2e": save_in_e2e},
)
@ensure_telemetry_enabled
def track_server_start(
input_channels: List["InputChannel"],
endpoints: Optional["AvailableEndpoints"],
model_directory: Optional[Text],
number_of_workers: int,
is_api_enabled: bool,
) -> None:
"""Tracks when a user starts a rasa server.
Args:
input_channels: Used input channels
endpoints: Endpoint configuration for the server
model_directory: directory of the running model
number_of_workers: number of used Sanic workers
is_api_enabled: whether the rasa API server is enabled
"""
from rasa.core.utils import AvailableEndpoints
def project_fingerprint_from_model(
_model_directory: Optional[Text],
) -> Optional[Text]:
"""Gets project fingerprint from an app's loaded model."""
if not model_directory:
return None
try:
model_archive = model.get_local_model(_model_directory)
metadata = LocalModelStorage.metadata_from_archive(model_archive)
return metadata.project_fingerprint
except Exception:
return None
if not endpoints:
endpoints = AvailableEndpoints()
_track(
TELEMETRY_SERVER_STARTED_EVENT,
{
"input_channels": [i.name() for i in input_channels],
"api_enabled": is_api_enabled,
"number_of_workers": number_of_workers,
"endpoints_nlg": endpoints.nlg.type if endpoints.nlg else None,
"endpoints_nlu": endpoints.nlu.type if endpoints.nlu else None,
"endpoints_action_server": endpoints.action.type
if endpoints.action
else None,
"endpoints_model_server": endpoints.model.type if endpoints.model else None,
"endpoints_tracker_store": endpoints.tracker_store.type
if endpoints.tracker_store
else None,
"endpoints_lock_store": endpoints.lock_store.type
if endpoints.lock_store
else None,
"endpoints_event_broker": endpoints.event_broker.type
if endpoints.event_broker
else None,
"project": project_fingerprint_from_model(model_directory),
},
)
@ensure_telemetry_enabled
def track_project_init(path: Text) -> None:
"""Track when a user creates a project using rasa init.
Args:
path: Location of the project
"""
_track(
TELEMETRY_PROJECT_CREATED_EVENT, {"init_directory": _hash_directory_path(path)}
)
@ensure_telemetry_enabled
def track_shell_started(model_type: Text) -> None:
"""Track when a user starts a bot using rasa shell.
Args:
model_type: Type of the model, core / nlu or rasa.
"""
_track(TELEMETRY_SHELL_STARTED_EVENT, {"type": model_type})
@ensure_telemetry_enabled
def track_rasa_x_local() -> None:
"""Track when a user runs Rasa X in local mode."""
_track(TELEMETRY_RASA_X_LOCAL_STARTED_EVENT)
@ensure_telemetry_enabled
def track_visualization() -> None:
"""Track when a user runs the visualization."""
_track(TELEMETRY_VISUALIZATION_STARTED_EVENT)
@ensure_telemetry_enabled
def track_core_model_test(num_story_steps: int, e2e: bool, agent: "Agent") -> None:
"""Track when a user tests a core model.
Args:
num_story_steps: Number of test stories used for the comparison
e2e: indicator if tests running in end to end mode
agent: Agent of the model getting tested
"""
if agent.processor is None:
project_fingerprint = ""
else:
project_fingerprint = agent.processor.model_metadata.project_fingerprint
_track(
TELEMETRY_TEST_CORE_EVENT,
{
"project": project_fingerprint,
"end_to_end": e2e,
"num_story_steps": num_story_steps,
},
)
@ensure_telemetry_enabled
def track_nlu_model_test(test_data: "TrainingData") -> None:
"""Track when a user tests an nlu model.
Args:
test_data: Data used for testing
"""
_track(
TELEMETRY_TEST_NLU_EVENT,
{
"num_intent_examples": len(test_data.intent_examples),
"num_entity_examples": len(test_data.entity_examples),
"num_lookup_tables": len(test_data.lookup_tables),
"num_synonyms": len(test_data.entity_synonyms),
"num_regexes": len(test_data.regex_features),
},
)
@ensure_telemetry_enabled
def track_markers_extraction_initiated(
strategy: Text, only_extract: bool, seed: bool, count: Optional[int]
) -> None:
"""Track when a user tries to extract success markers.
Args:
strategy: The strategy the user is using for tracker selection
only_extract: Indicates if the user is only extracting markers or also
producing stats
seed: Indicates if the user used a seed for this attempt
count: (Optional) The number of trackers the user is trying to select.
"""
_track(
TELEMETRY_MARKERS_EXTRACTION_INITIATED_EVENT,
{
"strategy": strategy,
"only_extract": only_extract,
"seed": seed,
"count": count,
},
)
@ensure_telemetry_enabled
def track_markers_extracted(trackers_count: int) -> None:
"""Track when markers have been extracted by a user.
Args:
trackers_count: The actual number of trackers processed
"""
_track(TELEMETRY_MARKERS_EXTRACTED_EVENT, {"trackers_count": trackers_count})
@ensure_telemetry_enabled
def track_markers_stats_computed(trackers_count: int) -> None:
"""Track when stats over markers have been computed by a user.
Args:
trackers_count: The actual number of trackers processed
"""
_track(TELEMETRY_MARKERS_STATS_COMPUTED_EVENT, {"trackers_count": trackers_count})
@ensure_telemetry_enabled
def track_markers_parsed_count(
marker_count: int, max_depth: int, branching_factor: int
) -> None:
"""Track when markers have been successfully parsed from config.
Args:
marker_count: The number of markers found in the config
max_depth: The maximum depth of any marker in the config
branching_factor: The maximum number of children of any marker in the config.
"""
_track(
TELEMETRY_MARKERS_PARSED_COUNT,
{
"marker_count": marker_count,
"max_depth": max_depth,
"branching_factor": branching_factor,
},
)
|
the-stack_106_28522 | import sys
import time
from django.core.management import call_command
from cms.categories.models import PublicationType
from .importer_cls import Importer
# the indiators from wordpress aren't nice so map them to better titles
SOURCES = {
"publication_types": "NHS England & Improvement",
"publication_types-aac": "Accelerated Access Collaborative",
"publication_types-commissioning": "Commissioning",
"publication_types-coronavirus": "Coronavirus",
"publication_types-greenernhs": "Greener NHS",
"publication_types-improvement-hub": "Improvement Hub",
"publication_types-non-executive-opportunities": "Non-executive opportunities",
"publication_types-rightcare": "Right Care",
}
class PublicationTypesImporter(Importer):
def __init__(self):
publication_type = PublicationType.objects.all()
if publication_type:
sys.stdout.write(
"โ ๏ธ Run delete_publication_types before running this command\n"
)
sys.exit()
def parse_results(self):
publication_types = self.results
for r in publication_types:
publication_type = PublicationType(
name=r.get("name"),
slug=r.get("slug"),
description=r.get("description"),
wp_id=r.get("wp_id"),
)
publication_type.save()
sys.stdout.write(".")
if self.next:
time.sleep(self.sleep_between_fetches)
self.fetch_url(self.next)
self.parse_results()
return PublicationType.objects.count(), self.count
|
the-stack_106_28524 | """Custom loader."""
from collections import OrderedDict
import fnmatch
import logging
import os
from pathlib import Path
from typing import Any, Dict, Iterator, List, Optional, TextIO, TypeVar, Union, overload
import yaml
from homeassistant.exceptions import HomeAssistantError
from .const import SECRET_YAML
from .objects import Input, NodeListClass, NodeStrClass
# mypy: allow-untyped-calls, no-warn-return-any
JSON_TYPE = Union[List, Dict, str] # pylint: disable=invalid-name
DICT_T = TypeVar("DICT_T", bound=Dict) # pylint: disable=invalid-name
_LOGGER = logging.getLogger(__name__)
class Secrets:
"""Store secrets while loading YAML."""
def __init__(self, config_dir: Path):
"""Initialize secrets."""
self.config_dir = config_dir
self._cache: Dict[Path, Dict[str, str]] = {}
def get(self, requester_path: str, secret: str) -> str:
"""Return the value of a secret."""
current_path = Path(requester_path)
secret_dir = current_path
while True:
secret_dir = secret_dir.parent
try:
secret_dir.relative_to(self.config_dir)
except ValueError:
# We went above the config dir
break
secrets = self._load_secret_yaml(secret_dir)
if secret in secrets:
_LOGGER.debug(
"Secret %s retrieved from secrets.yaml in folder %s",
secret,
secret_dir,
)
return secrets[secret]
raise HomeAssistantError(f"Secret {secret} not defined")
def _load_secret_yaml(self, secret_dir: Path) -> Dict[str, str]:
"""Load the secrets yaml from path."""
secret_path = secret_dir / SECRET_YAML
if secret_path in self._cache:
return self._cache[secret_path]
_LOGGER.debug("Loading %s", secret_path)
try:
secrets = load_yaml(str(secret_path))
if not isinstance(secrets, dict):
raise HomeAssistantError("Secrets is not a dictionary")
if "logger" in secrets:
logger = str(secrets["logger"]).lower()
if logger == "debug":
_LOGGER.setLevel(logging.DEBUG)
else:
_LOGGER.error(
"secrets.yaml: 'logger: debug' expected, but 'logger: %s' found",
logger,
)
del secrets["logger"]
except FileNotFoundError:
secrets = {}
self._cache[secret_path] = secrets
return secrets
class SafeLineLoader(yaml.SafeLoader):
"""Loader class that keeps track of line numbers."""
def __init__(self, stream: Any, secrets: Optional[Secrets] = None) -> None:
"""Initialize a safe line loader."""
super().__init__(stream)
self.secrets = secrets
def compose_node(self, parent: yaml.nodes.Node, index: int) -> yaml.nodes.Node:
"""Annotate a node with the first line it was seen."""
last_line: int = self.line
node: yaml.nodes.Node = super().compose_node(parent, index)
node.__line__ = last_line + 1 # type: ignore
return node
def load_yaml(fname: str, secrets: Optional[Secrets] = None) -> JSON_TYPE:
"""Load a YAML file."""
try:
with open(fname, encoding="utf-8") as conf_file:
return parse_yaml(conf_file, secrets)
except UnicodeDecodeError as exc:
_LOGGER.error("Unable to read file %s: %s", fname, exc)
raise HomeAssistantError(exc) from exc
def parse_yaml(
content: Union[str, TextIO], secrets: Optional[Secrets] = None
) -> JSON_TYPE:
"""Load a YAML file."""
try:
# If configuration file is empty YAML returns None
# We convert that to an empty dict
return (
yaml.load(content, Loader=lambda stream: SafeLineLoader(stream, secrets))
or OrderedDict()
)
except yaml.YAMLError as exc:
_LOGGER.error(str(exc))
raise HomeAssistantError(exc) from exc
@overload
def _add_reference(
obj: Union[list, NodeListClass], loader: SafeLineLoader, node: yaml.nodes.Node
) -> NodeListClass:
...
@overload
def _add_reference(
obj: Union[str, NodeStrClass], loader: SafeLineLoader, node: yaml.nodes.Node
) -> NodeStrClass:
...
@overload
def _add_reference(
obj: DICT_T, loader: SafeLineLoader, node: yaml.nodes.Node
) -> DICT_T:
...
def _add_reference(obj, loader: SafeLineLoader, node: yaml.nodes.Node): # type: ignore
"""Add file reference information to an object."""
if isinstance(obj, list):
obj = NodeListClass(obj)
if isinstance(obj, str):
obj = NodeStrClass(obj)
setattr(obj, "__config_file__", loader.name)
setattr(obj, "__line__", node.start_mark.line)
return obj
def _include_yaml(loader: SafeLineLoader, node: yaml.nodes.Node) -> JSON_TYPE:
"""Load another YAML file and embeds it using the !include tag.
Example:
device_tracker: !include device_tracker.yaml
"""
fname = os.path.join(os.path.dirname(loader.name), node.value)
try:
return _add_reference(load_yaml(fname, loader.secrets), loader, node)
except FileNotFoundError as exc:
raise HomeAssistantError(
f"{node.start_mark}: Unable to read file {fname}."
) from exc
def _is_file_valid(name: str) -> bool:
"""Decide if a file is valid."""
return not name.startswith(".")
def _find_files(directory: str, pattern: str) -> Iterator[str]:
"""Recursively load files in a directory."""
for root, dirs, files in os.walk(directory, topdown=True):
dirs[:] = [d for d in dirs if _is_file_valid(d)]
for basename in sorted(files):
if _is_file_valid(basename) and fnmatch.fnmatch(basename, pattern):
filename = os.path.join(root, basename)
yield filename
def _include_dir_named_yaml(
loader: SafeLineLoader, node: yaml.nodes.Node
) -> OrderedDict:
"""Load multiple files from directory as a dictionary."""
mapping: OrderedDict = OrderedDict()
loc = os.path.join(os.path.dirname(loader.name), node.value)
for fname in _find_files(loc, "*.yaml"):
filename = os.path.splitext(os.path.basename(fname))[0]
if os.path.basename(fname) == SECRET_YAML:
continue
mapping[filename] = load_yaml(fname, loader.secrets)
return _add_reference(mapping, loader, node)
def _include_dir_merge_named_yaml(
loader: SafeLineLoader, node: yaml.nodes.Node
) -> OrderedDict:
"""Load multiple files from directory as a merged dictionary."""
mapping: OrderedDict = OrderedDict()
loc = os.path.join(os.path.dirname(loader.name), node.value)
for fname in _find_files(loc, "*.yaml"):
if os.path.basename(fname) == SECRET_YAML:
continue
loaded_yaml = load_yaml(fname, loader.secrets)
if isinstance(loaded_yaml, dict):
mapping.update(loaded_yaml)
return _add_reference(mapping, loader, node)
def _include_dir_list_yaml(
loader: SafeLineLoader, node: yaml.nodes.Node
) -> List[JSON_TYPE]:
"""Load multiple files from directory as a list."""
loc = os.path.join(os.path.dirname(loader.name), node.value)
return [
load_yaml(f)
for f in _find_files(loc, "*.yaml")
if os.path.basename(f) != SECRET_YAML
]
def _include_dir_merge_list_yaml(
loader: SafeLineLoader, node: yaml.nodes.Node
) -> JSON_TYPE:
"""Load multiple files from directory as a merged list."""
loc: str = os.path.join(os.path.dirname(loader.name), node.value)
merged_list: List[JSON_TYPE] = []
for fname in _find_files(loc, "*.yaml"):
if os.path.basename(fname) == SECRET_YAML:
continue
loaded_yaml = load_yaml(fname, loader.secrets)
if isinstance(loaded_yaml, list):
merged_list.extend(loaded_yaml)
return _add_reference(merged_list, loader, node)
def _ordered_dict(loader: SafeLineLoader, node: yaml.nodes.MappingNode) -> OrderedDict:
"""Load YAML mappings into an ordered dictionary to preserve key order."""
loader.flatten_mapping(node)
nodes = loader.construct_pairs(node)
seen: Dict = {}
for (key, _), (child_node, _) in zip(nodes, node.value):
line = child_node.start_mark.line
try:
hash(key)
except TypeError as exc:
fname = getattr(loader.stream, "name", "")
raise yaml.MarkedYAMLError(
context=f'invalid key: "{key}"',
context_mark=yaml.Mark(fname, 0, line, -1, None, None),
) from exc
if key in seen:
fname = getattr(loader.stream, "name", "")
_LOGGER.warning(
'YAML file %s contains duplicate key "%s". Check lines %d and %d',
fname,
key,
seen[key],
line,
)
seen[key] = line
return _add_reference(OrderedDict(nodes), loader, node)
def _construct_seq(loader: SafeLineLoader, node: yaml.nodes.Node) -> JSON_TYPE:
"""Add line number and file name to Load YAML sequence."""
(obj,) = loader.construct_yaml_seq(node)
return _add_reference(obj, loader, node)
def _env_var_yaml(loader: SafeLineLoader, node: yaml.nodes.Node) -> str:
"""Load environment variables and embed it into the configuration YAML."""
args = node.value.split()
# Check for a default value
if len(args) > 1:
return os.getenv(args[0], " ".join(args[1:]))
if args[0] in os.environ:
return os.environ[args[0]]
_LOGGER.error("Environment variable %s not defined", node.value)
raise HomeAssistantError(node.value)
def secret_yaml(loader: SafeLineLoader, node: yaml.nodes.Node) -> JSON_TYPE:
"""Load secrets and embed it into the configuration YAML."""
if loader.secrets is None:
raise HomeAssistantError("Secrets not supported in this YAML file")
return loader.secrets.get(loader.name, node.value)
SafeLineLoader.add_constructor("!include", _include_yaml)
SafeLineLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, _ordered_dict
)
SafeLineLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_SEQUENCE_TAG, _construct_seq
)
SafeLineLoader.add_constructor("!env_var", _env_var_yaml)
SafeLineLoader.add_constructor("!secret", secret_yaml)
SafeLineLoader.add_constructor("!include_dir_list", _include_dir_list_yaml)
SafeLineLoader.add_constructor("!include_dir_merge_list", _include_dir_merge_list_yaml)
SafeLineLoader.add_constructor("!include_dir_named", _include_dir_named_yaml)
SafeLineLoader.add_constructor(
"!include_dir_merge_named", _include_dir_merge_named_yaml
)
SafeLineLoader.add_constructor("!input", Input.from_node)
|
the-stack_106_28525 | #!/usr/bin/env python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script from the root of the repository to update all translations from
transifex.
It will do the following automatically:
- fetch all translations using the tx tool
- post-process them into valid and committable format
- remove invalid control characters
- remove location tags (makes diffs less noisy)
TODO:
- auto-add new translations to the build system according to the translation process
'''
from __future__ import division, print_function
import subprocess
import re
import sys
import os
import io
import xml.etree.ElementTree as ET
# Name of transifex tool
TX = 'tx'
# Name of source language file
SOURCE_LANG = 'fantasygold_en.ts'
# Directory with locale files
LOCALE_DIR = 'src/qt/locale'
# Minimum number of messages for translation to be considered at all
MIN_NUM_MESSAGES = 10
def check_at_repository_root():
if not os.path.exists('.git'):
print('No .git directory found')
print('Execute this script at the root of the repository', file=sys.stderr)
exit(1)
def fetch_all_translations():
if subprocess.call([TX, 'pull', '-f', '-a']):
print('Error while fetching translations', file=sys.stderr)
exit(1)
def find_format_specifiers(s):
'''Find all format specifiers in a string.'''
pos = 0
specifiers = []
while True:
percent = s.find('%', pos)
if percent < 0:
break
try:
specifiers.append(s[percent+1])
except:
print('Failed to get specifier')
pos = percent+2
return specifiers
def split_format_specifiers(specifiers):
'''Split format specifiers between numeric (Qt) and others (strprintf)'''
numeric = []
other = []
for s in specifiers:
if s in {'1','2','3','4','5','6','7','8','9'}:
numeric.append(s)
else:
other.append(s)
# If both numeric format specifiers and "others" are used, assume we're dealing
# with a Qt-formatted message. In the case of Qt formatting (see https://doc.qt.io/qt-5/qstring.html#arg)
# only numeric formats are replaced at all. This means "(percentage: %1%)" is valid, without needing
# any kind of escaping that would be necessary for strprintf. Without this, this function
# would wrongly detect '%)' as a printf format specifier.
if numeric:
other = []
# numeric (Qt) can be present in any order, others (strprintf) must be in specified order
return set(numeric),other
def sanitize_string(s):
'''Sanitize string for printing'''
return s.replace('\n',' ')
def check_format_specifiers(source, translation, errors, numerus):
source_f = split_format_specifiers(find_format_specifiers(source))
# assert that no source messages contain both Qt and strprintf format specifiers
# if this fails, go change the source as this is hacky and confusing!
assert(not(source_f[0] and source_f[1]))
try:
translation_f = split_format_specifiers(find_format_specifiers(translation))
except IndexError:
errors.append("Parse error in translation for '%s': '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
else:
if source_f != translation_f:
if numerus and source_f == (set(), ['n']) and translation_f == (set(), []) and translation.find('%') == -1:
# Allow numerus translations to omit %n specifier (usually when it only has one possible value)
return True
errors.append("Mismatch between '%s' and '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
return True
def all_ts_files(suffix=''):
for filename in os.listdir(LOCALE_DIR):
# process only language files, and do not process source language
if not filename.endswith('.ts'+suffix) or filename == SOURCE_LANG+suffix:
continue
if suffix: # remove provided suffix
filename = filename[0:-len(suffix)]
filepath = os.path.join(LOCALE_DIR, filename)
yield(filename, filepath)
FIX_RE = re.compile(b'[\x00-\x09\x0b\x0c\x0e-\x1f]')
def remove_invalid_characters(s):
'''Remove invalid characters from translation string'''
return FIX_RE.sub(b'', s)
# Override cdata escape function to make our output match Qt's (optional, just for cleaner diffs for
# comparison, disable by default)
_orig_escape_cdata = None
def escape_cdata(text):
text = _orig_escape_cdata(text)
text = text.replace("'", ''')
text = text.replace('"', '"')
return text
def postprocess_translations(reduce_diff_hacks=False):
print('Checking and postprocessing...')
if reduce_diff_hacks:
global _orig_escape_cdata
_orig_escape_cdata = ET._escape_cdata
ET._escape_cdata = escape_cdata
for (filename,filepath) in all_ts_files():
os.rename(filepath, filepath+'.orig')
have_errors = False
for (filename,filepath) in all_ts_files('.orig'):
# pre-fixups to cope with transifex output
parser = ET.XMLParser(encoding='utf-8') # need to override encoding because 'utf8' is not understood only 'utf-8'
with open(filepath + '.orig', 'rb') as f:
data = f.read()
# remove control characters; this must be done over the entire file otherwise the XML parser will fail
data = remove_invalid_characters(data)
tree = ET.parse(io.BytesIO(data), parser=parser)
# iterate over all messages in file
root = tree.getroot()
for context in root.findall('context'):
for message in context.findall('message'):
numerus = message.get('numerus') == 'yes'
source = message.find('source').text
translation_node = message.find('translation')
# pick all numerusforms
if numerus:
translations = [i.text for i in translation_node.findall('numerusform')]
else:
translations = [translation_node.text]
for translation in translations:
if translation is None:
continue
errors = []
valid = check_format_specifiers(source, translation, errors, numerus)
for error in errors:
print('%s: %s' % (filename, error))
if not valid: # set type to unfinished and clear string if invalid
translation_node.clear()
translation_node.set('type', 'unfinished')
have_errors = True
# Remove location tags
for location in message.findall('location'):
message.remove(location)
# Remove entire message if it is an unfinished translation
if translation_node.get('type') == 'unfinished':
context.remove(message)
# check if document is (virtually) empty, and remove it if so
num_messages = 0
for context in root.findall('context'):
for message in context.findall('message'):
num_messages += 1
if num_messages < MIN_NUM_MESSAGES:
print('Removing %s, as it contains only %i messages' % (filepath, num_messages))
continue
# write fixed-up tree
# if diff reduction requested, replace some XML to 'sanitize' to qt formatting
if reduce_diff_hacks:
out = io.BytesIO()
tree.write(out, encoding='utf-8')
out = out.getvalue()
out = out.replace(b' />', b'/>')
with open(filepath, 'wb') as f:
f.write(out)
else:
tree.write(filepath, encoding='utf-8')
return have_errors
if __name__ == '__main__':
check_at_repository_root()
fetch_all_translations()
postprocess_translations()
|
the-stack_106_28526 | from . import ml5_nn
from . import utilis
import jp_proxy_widget
from IPython.display import display
from jupyter_ui_poll import ui_events
import numpy as np
import matplotlib.pyplot as plt
import cv2
import time
class ObjectDetector(ml5_nn.neuralNetwork):
def __init__(self, model, options=None, *pargs, **kwargs):
super(ObjectDetector,self).__init__(options=options,*pargs, **kwargs)
self.data = []
if options is None:
options = self.default_options()
self.element.html("Loaded ml5.js")
self.detect_result = []
self.count = 0
self.detect = False
self.model_load = False
def model_ready():
self.model_load = True
self.js_init("""
element.nn_info = {};
const model = ml5.objectDetector(model_name, options = options, callback = modelReady);
element.nn_info.network = model;
function modelReady() {
console.log('Model Ready!');
model_ready()
}
element.predict_images = [];
""",model_name = model, model_ready=model_ready, options = self.options)
with ui_events() as poll:
while self.model_load is False:
poll(10)
print('.', end='')
time.sleep(0.1)
print('Model is ready')
def default_options(self):
return {'filterBoxesThreshold': 0.01,
'IOUThreshold': 0.4,
'classProbThreshold': 0.4 }
def detect_callback(self, info):
self.detect_result.append(info)
def image_detect(self, image, width=400, height=400, callback=None):
if callback is None:
callback = self.detect_callback
self.detect_result = []
self.detect = False
def done_callback():
self.detect = True
if isinstance(image,str):
self.js_init("""
function handleResults(error, result) {
if(error){
console.error(error);
return;
}
console.log(result);
for (i=0;i<result.length;i++){
callback(result[i]);
}
done_callback();
}
var imageData = new Image(width, height)
imageData.src = src;
//console.log(imageData);
element.predict_images = []
element.predict_images.push(imageData);
setTimeout(function(){
element.nn_info.network.detect(element.predict_images[-1], handleResults);
}, 20);
""", src=image, width=width, height=height,
callback=callback, done_callback = done_callback)
with ui_events() as poll:
while self.detect is False:
poll(10) # React to UI events (upto 10 at a time)
print('.', end='')
time.sleep(0.1)
print('done')
else:
if isinstance(image,np.ndarray):
if len(image.shape)==1:
if width*height!=image.shape[0]:
raise ValueError('image shape should be consistent with width and height')
elif len(image.shape)==2:
raise ValueError("Please provide a rgba image pixel array")
else:
if image.shape[2]!=4:
raise ValueError("Please provide a rgba image pixel array")
else:
image = image.flatten()
image = image.tolist()
self.js_init("""
var canvas = document.createElement('canvas');
canvas.width = width;
canvas.height = height;
var ctx = canvas.getContext('2d');
var imgData=ctx.getImageData(0,0,width,height);
imgData.data.set(d);
function handleResults(error, result) {
if(error){
console.error(error);
return;
}
console.log(result);
for (i=0;i<result.length;i++){
callback(result[i]);
}
done_callback();
}
element.nn_info.network.detect(imgData, handleResults);
""",d = image, width=width, height=height,
callback=callback, done_callback = done_callback)
with ui_events() as poll:
while self.detect is False:
poll(10) # React to UI events (upto 10 at a time)
print('.', end='')
time.sleep(0.1)
print('done')
def draw_bounding_box(self, image, width=None, height=None, normalized=True, box_color=(0,255,0), box_thick=2, text_color="white"):
if not self.detect_result:
raise Exception("No object detected")
if width is None or height is None:
img_shape = image.shape
width = img_shape[1]
height = img_shape[0]
fig = plt.figure()
ax = fig.add_subplot(111)
if normalized:
normalized_img = image.copy()
for i in range(len(self.detect_result)):
dt = self.detect_result[i]
normalized_x = int(dt['normalized']['x']*width)
normalized_y = int(dt['normalized']['y']*height)
normalized_w = int(dt['normalized']['width']*width)
normalized_h = int(dt['normalized']['height']*height)
print(dt['label'],normalized_x,normalized_y,normalized_w,normalized_h )
ax.annotate("%s" %dt['label']+"("+str(round(dt['confidence']*100,2))+"%)", xy=(normalized_x, normalized_y), xytext=(10, -10),textcoords="offset points",color=text_color)
normalized_img = cv2.rectangle(normalized_img,
(normalized_x,normalized_y),
(normalized_x+normalized_w,normalized_y+normalized_h),
box_color, box_thick)
ax.imshow(normalized_img)
else:
un_img = image.copy()
for i in range(len(self.detect_result)):
dt = self.detect_result[i]
x = int(dt['x'])
y = int(dt['y'])
w = int(dt['width'])
h = int(dt['height'])
print(dt['label'],x, y, w, h )
ax.annotate("%s" %dt['label']+"("+str(round(dt['confidence']*100,2))+"%)", xy=(x, y), xytext=(10, -10),textcoords="offset points",color=text_color)
un_img = cv2.rectangle(un_img,(x, y),
(x+w, y+h),box_color,box_thick)
ax.imshow(un_img) |
the-stack_106_28527 | # @Time : 2020/6/28
# @Author : Zihan Lin
# @Email : [email protected]
# UPDATE
# @Time : 2020/10/04, 2020/10/9
# @Author : Shanlei Mu, Yupeng Hou
# @Email : [email protected], [email protected]
"""
recbole.config.configurator
################################
"""
import re
import os
import sys
import yaml
import torch
from logging import getLogger
from recbole.evaluator import group_metrics, individual_metrics
from recbole.utils import get_model, Enum, EvaluatorType, ModelType, InputType, \
general_arguments, training_arguments, evaluation_arguments, dataset_arguments
class Config(object):
""" Configurator module that load the defined parameters.
Configurator module will first load the default parameters from the fixed properties in RecBole and then
load parameters from the external input.
External input supports three kind of forms: config file, command line and parameter dictionaries.
- config file: It's a file that record the parameters to be modified or added. It should be in ``yaml`` format,
e.g. a config file is 'example.yaml', the content is:
learning_rate: 0.001
train_batch_size: 2048
- command line: It should be in the format as '---learning_rate=0.001'
- parameter dictionaries: It should be a dict, where the key is parameter name and the value is parameter value,
e.g. config_dict = {'learning_rate': 0.001}
Configuration module allows the above three kind of external input format to be used together,
the priority order is as following:
command line > parameter dictionaries > config file
e.g. If we set learning_rate=0.01 in config file, learning_rate=0.02 in command line,
learning_rate=0.03 in parameter dictionaries.
Finally the learning_rate is equal to 0.02.
"""
def __init__(self, model=None, dataset=None, config_file_list=None, config_dict=None):
"""
Args:
model (str/AbstractRecommender): the model name or the model class, default is None, if it is None, config
will search the parameter 'model' from the external input as the model name or model class.
dataset (str): the dataset name, default is None, if it is None, config will search the parameter 'dataset'
from the external input as the dataset name.
config_file_list (list of str): the external config file, it allows multiple config files, default is None.
config_dict (dict): the external parameter dictionaries, default is None.
"""
self._init_parameters_category()
self.yaml_loader = self._build_yaml_loader()
self.file_config_dict = self._load_config_files(config_file_list)
self.variable_config_dict = self._load_variable_config_dict(config_dict)
self.cmd_config_dict = self._load_cmd_line()
self._merge_external_config_dict()
self.model, self.model_class, self.dataset = self._get_model_and_dataset(model, dataset)
self._load_internal_config_dict(self.model, self.model_class, self.dataset)
self.final_config_dict = self._get_final_config_dict()
self._set_default_parameters()
self._init_device()
def _init_parameters_category(self):
self.parameters = dict()
self.parameters['General'] = general_arguments
self.parameters['Training'] = training_arguments
self.parameters['Evaluation'] = evaluation_arguments
self.parameters['Dataset'] = dataset_arguments
def _build_yaml_loader(self):
loader = yaml.FullLoader
loader.add_implicit_resolver(
u'tag:yaml.org,2002:float',
re.compile(u'''^(?:
[-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)?
|[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+)
|\\.[0-9_]+(?:[eE][-+][0-9]+)?
|[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\.[0-9_]*
|[-+]?\\.(?:inf|Inf|INF)
|\\.(?:nan|NaN|NAN))$''', re.X),
list(u'-+0123456789.'))
return loader
def _convert_config_dict(self, config_dict):
r"""This function convert the str parameters to their original type.
"""
for key in config_dict:
param = config_dict[key]
if not isinstance(param, str):
continue
try:
value = eval(param)
if not isinstance(value, (str, int, float, list, tuple, dict, bool, Enum)):
value = param
except (NameError, SyntaxError, TypeError):
if isinstance(param, str):
if param.lower() == "true":
value = True
elif param.lower() == "false":
value = False
else:
value = param
else:
value = param
config_dict[key] = value
return config_dict
def _load_config_files(self, file_list):
file_config_dict = dict()
if file_list:
for file in file_list:
with open(file, 'r', encoding='utf-8') as f:
file_config_dict.update(yaml.load(f.read(), Loader=self.yaml_loader))
return file_config_dict
def _load_variable_config_dict(self, config_dict):
# HyperTuning may set the parameters such as mlp_hidden_size in NeuMF in the format of ['[]', '[]']
# then config_dict will receive a str '[]', but indeed it's a list []
# temporarily use _convert_config_dict to solve this problem
return self._convert_config_dict(config_dict) if config_dict else dict()
def _load_cmd_line(self):
r""" Read parameters from command line and convert it to str.
"""
cmd_config_dict = dict()
unrecognized_args = []
if "ipykernel_launcher" not in sys.argv[0]:
for arg in sys.argv[1:]:
if not arg.startswith("--") or len(arg[2:].split("=")) != 2:
unrecognized_args.append(arg)
continue
cmd_arg_name, cmd_arg_value = arg[2:].split("=")
if cmd_arg_name in cmd_config_dict and cmd_arg_value != cmd_config_dict[cmd_arg_name]:
raise SyntaxError("There are duplicate commend arg '%s' with different value." % arg)
else:
cmd_config_dict[cmd_arg_name] = cmd_arg_value
if len(unrecognized_args) > 0:
logger = getLogger()
logger.warning('command line args [{}] will not be used in RecBole'.format(' '.join(unrecognized_args)))
cmd_config_dict = self._convert_config_dict(cmd_config_dict)
return cmd_config_dict
def _merge_external_config_dict(self):
external_config_dict = dict()
external_config_dict.update(self.file_config_dict)
external_config_dict.update(self.variable_config_dict)
external_config_dict.update(self.cmd_config_dict)
self.external_config_dict = external_config_dict
def _get_model_and_dataset(self, model, dataset):
if model is None:
try:
model = self.external_config_dict['model']
except KeyError:
raise KeyError(
'model need to be specified in at least one of the these ways: '
'[model variable, config file, config dict, command line] ')
if not isinstance(model, str):
final_model_class = model
final_model = model.__name__
else:
final_model = model
final_model_class = get_model(final_model)
if dataset is None:
try:
final_dataset = self.external_config_dict['dataset']
except KeyError:
raise KeyError('dataset need to be specified in at least one of the these ways: '
'[dataset variable, config file, config dict, command line] ')
else:
final_dataset = dataset
return final_model, final_model_class, final_dataset
def _update_internal_config_dict(self, file):
with open(file, 'r', encoding='utf-8') as f:
config_dict = yaml.load(f.read(), Loader=self.yaml_loader)
if config_dict is not None:
self.internal_config_dict.update(config_dict)
return config_dict
def _load_internal_config_dict(self, model, model_class, dataset):
current_path = os.path.dirname(os.path.realpath(__file__))
overall_init_file = os.path.join(current_path, '../properties/overall.yaml')
model_init_file = os.path.join(current_path, '../properties/model/' + model + '.yaml')
sample_init_file = os.path.join(current_path, '../properties/dataset/sample.yaml')
dataset_init_file = os.path.join(current_path, '../properties/dataset/' + dataset + '.yaml')
quick_start_config_path = os.path.join(current_path, '../properties/quick_start_config/')
context_aware_init = os.path.join(quick_start_config_path, 'context-aware.yaml')
context_aware_on_ml_100k_init = os.path.join(quick_start_config_path, 'context-aware_ml-100k.yaml')
DIN_init = os.path.join(quick_start_config_path, 'sequential_DIN.yaml')
DIN_on_ml_100k_init = os.path.join(quick_start_config_path, 'sequential_DIN_on_ml-100k.yaml')
sequential_init = os.path.join(quick_start_config_path, 'sequential.yaml')
special_sequential_on_ml_100k_init = os.path.join(quick_start_config_path, 'special_sequential_on_ml-100k.yaml')
sequential_embedding_model_init = os.path.join(quick_start_config_path, 'sequential_embedding_model.yaml')
knowledge_base_init = os.path.join(quick_start_config_path, 'knowledge_base.yaml')
self.internal_config_dict = dict()
for file in [overall_init_file, model_init_file, sample_init_file, dataset_init_file]:
if os.path.isfile(file):
config_dict = self._update_internal_config_dict(file)
if file == dataset_init_file:
self.parameters['Dataset'] += [key for key in config_dict.keys() if
key not in self.parameters['Dataset']]
self.internal_config_dict['MODEL_TYPE'] = model_class.type
if self.internal_config_dict['MODEL_TYPE'] == ModelType.GENERAL:
pass
elif self.internal_config_dict['MODEL_TYPE'] in {ModelType.CONTEXT, ModelType.XGBOOST}:
self._update_internal_config_dict(context_aware_init)
if dataset == 'ml-100k':
self._update_internal_config_dict(context_aware_on_ml_100k_init)
elif self.internal_config_dict['MODEL_TYPE'] == ModelType.SEQUENTIAL:
if model == 'DIN':
self._update_internal_config_dict(DIN_init)
if dataset == 'ml-100k':
self._update_internal_config_dict(DIN_on_ml_100k_init)
elif model in ['GRU4RecKG', 'KSR']:
self._update_internal_config_dict(sequential_embedding_model_init)
else:
self._update_internal_config_dict(sequential_init)
if dataset == 'ml-100k' and model in ['GRU4RecF', 'SASRecF', 'FDSA', 'S3Rec']:
self._update_internal_config_dict(special_sequential_on_ml_100k_init)
elif self.internal_config_dict['MODEL_TYPE'] == ModelType.KNOWLEDGE:
self._update_internal_config_dict(knowledge_base_init)
def _get_final_config_dict(self):
final_config_dict = dict()
final_config_dict.update(self.internal_config_dict)
final_config_dict.update(self.external_config_dict)
return final_config_dict
def _set_default_parameters(self):
self.final_config_dict['dataset'] = self.dataset
self.final_config_dict['model'] = self.model
if self.dataset == 'ml-100k':
current_path = os.path.dirname(os.path.realpath(__file__))
self.final_config_dict['data_path'] = os.path.join(current_path, '../dataset_example/' + self.dataset)
else:
self.final_config_dict['data_path'] = os.path.join(self.final_config_dict['data_path'], self.dataset)
if hasattr(self.model_class, 'input_type'):
self.final_config_dict['MODEL_INPUT_TYPE'] = self.model_class.input_type
elif 'loss_type' in self.final_config_dict:
if self.final_config_dict['loss_type'] in ['CE']:
self.final_config_dict['MODEL_INPUT_TYPE'] = InputType.POINTWISE
elif self.final_config_dict['loss_type'] in ['BPR']:
self.final_config_dict['MODEL_INPUT_TYPE'] = InputType.PAIRWISE
else:
raise ValueError('Either Model has attr \'input_type\','
'or arg \'loss_type\' should exist in config.')
eval_type = None
for metric in self.final_config_dict['metrics']:
if metric.lower() in individual_metrics:
if eval_type is not None and eval_type == EvaluatorType.RANKING:
raise RuntimeError('Ranking metrics and other metrics can not be used at the same time.')
else:
eval_type = EvaluatorType.INDIVIDUAL
if metric.lower() in group_metrics:
if eval_type is not None and eval_type == EvaluatorType.INDIVIDUAL:
raise RuntimeError('Ranking metrics and other metrics can not be used at the same time.')
else:
eval_type = EvaluatorType.RANKING
self.final_config_dict['eval_type'] = eval_type
smaller_metric = ['rmse', 'mae', 'logloss']
valid_metric = self.final_config_dict['valid_metric'].split('@')[0]
self.final_config_dict['valid_metric_bigger'] = False if valid_metric in smaller_metric else True
if 'additional_feat_suffix' in self.final_config_dict:
ad_suf = self.final_config_dict['additional_feat_suffix']
if isinstance(ad_suf, str):
self.final_config_dict['additional_feat_suffix'] = [ad_suf]
def _init_device(self):
use_gpu = self.final_config_dict['use_gpu']
if use_gpu:
os.environ["CUDA_VISIBLE_DEVICES"] = str(self.final_config_dict['gpu_id'])
self.final_config_dict['device'] = torch.device("cuda" if torch.cuda.is_available() and use_gpu else "cpu")
def __setitem__(self, key, value):
if not isinstance(key, str):
raise TypeError("index must be a str.")
self.final_config_dict[key] = value
def __getitem__(self, item):
if item in self.final_config_dict:
return self.final_config_dict[item]
else:
return None
def __contains__(self, key):
if not isinstance(key, str):
raise TypeError("index must be a str.")
return key in self.final_config_dict
def __str__(self):
args_info = ''
for category in self.parameters:
args_info += category + ' Hyper Parameters: \n'
args_info += '\n'.join(
["{}={}".format(arg, value)
for arg, value in self.final_config_dict.items()
if arg in self.parameters[category]])
args_info += '\n\n'
return args_info
def __repr__(self):
return self.__str__()
|
the-stack_106_28528 |
import os
import sys
os.environ['MLFLOW_TRACKING_URI'] = 'http://127.0.0.1:5000'
# Imports
import glob
import numpy as np
import os.path as path
from scipy import misc
from keras.models import Sequential
from keras.layers import Activation, Dropout, Flatten, Dense, Conv2D, MaxPooling2D
from keras.callbacks import EarlyStopping, TensorBoard
from sklearn.metrics import accuracy_score, f1_score
from datetime import datetime
from keras import optimizers
import mlflow
import mlflow.keras
import mlflow.sklearn
image_path = os.environ['AUTOMLIB_DATA_PATH']
def setImagePath(path):
print("setting path :" + path)
global image_path
image_path = path
def preprocess(image_format='*.png', train_test_split=0.9):
file_paths = glob.glob(path.join(image_path, image_format))
# Load the images
images = [misc.imread(path) for path in file_paths]
images = np.asarray(images)
# Get image size
image_size = np.asarray([images.shape[1], images.shape[2], images.shape[3]])
print(image_size)
# Scale
images = images / 255
# Read the labels from the filenames
n_images = images.shape[0]
labels = np.zeros(n_images)
for i in range(n_images):
filename = path.basename(file_paths[i])[0]
labels[i] = int(filename[0])
# Split into test and training sets
TRAIN_TEST_SPLIT = train_test_split
# Split at the given index
split_index = int(TRAIN_TEST_SPLIT * n_images)
shuffled_indices = np.random.permutation(n_images)
train_indices = shuffled_indices[0:split_index]
test_indices = shuffled_indices[split_index:]
# Split the images and the labels
x_train = images[train_indices, :, :]
y_train = labels[train_indices]
x_test = images[test_indices, :, :]
y_test = labels[test_indices]
return x_train, y_train, x_test, y_test,image_size
def visualize_data(positive_images, negative_images):
# INPUTS
# positive_images - Images where the label = 1 (True)
# negative_images - Images where the label = 0 (False)
figure = plt.figure()
count = 0
for i in range(positive_images.shape[0]):
count += 1
figure.add_subplot(2, positive_images.shape[0], count)
plt.imshow(positive_images[i, :, :])
plt.axis('off')
plt.title("1")
figure.add_subplot(1, negative_images.shape[0], count)
plt.imshow(negative_images[i, :, :])
plt.axis('off')
plt.title("0")
plt.show()
def cnn(size, n_layers, learning_rate):
print(size)
# INPUTS
# size - size of the input images
# n_layers - number of layers
# OUTPUTS
# model - compiled CNN
# Define hyperparamters
MIN_NEURONS = 20
MAX_NEURONS = 120
KERNEL = (3, 3)
# Determine the # of neurons in each convolutional layer
steps = np.floor(MAX_NEURONS / (n_layers + 1))
nuerons = np.arange(MIN_NEURONS, MAX_NEURONS, steps)
nuerons = nuerons.astype(np.int32)
print(nuerons)
# Define a model
model = Sequential()
# Add convolutional layers
for i in range(0, int(n_layers)):
print(i)
print(nuerons[i])
if i == 0:
shape = (size[0], size[1], size[2])
model.add(Conv2D(nuerons[i], KERNEL, input_shape=shape, data_format='channels_last'))
else:
print(nuerons[i])
model.add(Conv2D(nuerons[i], KERNEL))
model.add(Activation('relu'))
# Add max pooling layer
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(MAX_NEURONS))
model.add(Activation('relu'))
# Add output layer
model.add(Dense(1))
model.add(Activation('sigmoid'))
# Compile the model
model.compile(loss='binary_crossentropy',
optimizer=optimizers.adam(lr=learning_rate),
metrics=['accuracy'])
# Print a summary of the model
model.summary()
return model
def train(hyperparameters, image_format='*.png', train_test_split=0.9, epochs=5,batch_size=200):
print(hyperparameters)
print(image_path)
x_train,y_train,x_test,y_test,image_size = preprocess(image_format, train_test_split)
# Hyperparamater
N_LAYERS = hyperparameters.get("num_layers")
LEARNING_RATE = hyperparameters.get("learning_rate")
# Instantiate the model
model = cnn(size=image_size, n_layers=N_LAYERS, learning_rate=LEARNING_RATE)
# Training hyperparamters
EPOCHS = epochs
BATCH_SIZE = batch_size
# Early stopping callback
PATIENCE = 10
early_stopping = EarlyStopping(monitor='loss', min_delta=0, patience=PATIENCE, verbose=0, mode='auto')
# TensorBoard callback
LOG_DIRECTORY_ROOT = '.'
now = datetime.utcnow().strftime("%Y%m%d%H%M%S")
log_dir = "{}/run-{}/".format(LOG_DIRECTORY_ROOT, now)
tensorboard = TensorBoard(log_dir=log_dir, write_graph=True, write_images=True)
# Place the callbacks in a list
callbacks = [early_stopping, tensorboard]
with mlflow.start_run():
hist = model.fit(x_train, y_train, epochs=EPOCHS, batch_size=BATCH_SIZE, callbacks=callbacks)
mlflow.log_param("hidden_layers", N_LAYERS)
mlflow.log_param("learning_rate", LEARNING_RATE)
for val in hist.history['acc']:
mlflow.log_metric("accuracy", val)
for val in hist.history['loss']:
mlflow.log_metric("loss", val)
return val
#mlflow.log_metric("accuracy", hist.history['acc'][-1])
#mlflow.log_metric("loss", hist.history['loss'][-1])
#mlflow.keras.log_model(model, "./models")
#model.save('./models/mnist_model.h5')
#mlflow.log_artifacts(log_dir)
#mlflow.sklearn.log_model(model, "cnn")
# Train the model
if __name__ == "__main__":
train(num_layers = ys.argv[1])
|
the-stack_106_28529 | import os
import multiprocessing
import tensorflow as tf
from functools import partial
from riptide.utils.datasets import imagerecord_dataset
from riptide.utils.thread_helper import setup_gpu_threadpool
from riptide.anneal.anneal_config import Config
from riptide.anneal.models import get_model, get_optimizer
from riptide.utils.preprocessing.inception_preprocessing import preprocess_image
from absl import app
from absl import flags
from absl import logging
FLAGS = flags.FLAGS
flags.DEFINE_string('model_dir', '/data/jwfromm/anneal_models',
'Directory to save models in.')
flags.DEFINE_string('model', '', 'Name of model to train, must be set.')
flags.DEFINE_string(
'experiment', '',
'Suffix to add to model name, should describe purpose of run.')
flags.DEFINE_string('data_path', '/data/imagenet/tfrecords',
'Directory containing tfrecords to load.')
flags.DEFINE_string('gpus', '', 'Comma seperated list of GPUS to run on.')
flags.DEFINE_integer('epochs', 100, 'Number of epochs to train.')
flags.DEFINE_integer('batch_size', 64, 'Size of each minibatch.')
flags.DEFINE_integer('image_size', 224,
'Height and Width of processed images.')
flags.DEFINE_float('learning_rate', .0128, 'Starting learning rate.')
flags.DEFINE_float('momentum', 0.9, 'Momentum used for optimizer.')
flags.DEFINE_boolean('quantize', 0, 'Use a quantized network.')
flags.DEFINE_float('a_bits', 2.0,
'Number of activation bits to use for binary model.')
flags.DEFINE_float('w_bits', 2.0,
'Number of activation bits to use for binary model.')
def main(argv):
# Set visible GPUS appropriately.
os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpus
# Get thread confirguration.
op_threads, num_workers = setup_gpu_threadpool(len(FLAGS.gpus.split(',')))
num_gpus = len(FLAGS.gpus.split(','))
# Set up the data input functions.
train_preprocess = partial(
preprocess_image,
height=FLAGS.image_size,
width=FLAGS.image_size,
is_training=True)
eval_preprocess = partial(
preprocess_image,
height=FLAGS.image_size,
width=FLAGS.image_size,
is_training=False)
def train_input_fn():
ds = imagerecord_dataset(
FLAGS.data_path,
FLAGS.batch_size,
is_training=True,
preprocess=train_preprocess,
num_workers=num_workers)
return ds.repeat(FLAGS.epochs)
def eval_input_fn():
ds = imagerecord_dataset(
FLAGS.data_path,
FLAGS.batch_size,
is_training=False,
preprocess=eval_preprocess,
num_workers=num_workers)
return ds.repeat(1)
# Set up estimaor model function.
def model_fn(features, labels, mode):
# Generate summary for input images.
tf.compat.v1.summary.image('images', features, max_outputs=4)
if FLAGS.quantize:
a_bits = FLAGS.a_bits
w_bits = FLAGS.w_bits
quantize = True
fixed = False# Do standard dorefa quantization
else:
a_bits = None
w_bits = None
quantize = False
fixed = False
config = Config(quantize=quantize, a_bits=a_bits, w_bits=w_bits, fixed=fixed)
with config:
model = get_model(FLAGS.model)
global_step = tf.compat.v1.train.get_or_create_global_step()
optimizer, learning_rate = get_optimizer(FLAGS.model, global_step,
FLAGS.batch_size, num_gpus)
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(reduction=tf.keras.losses.Reduction.NONE)
def loss_fn(labels, predictions):
per_example_loss = loss_object(labels, predictions)
return tf.nn.compute_average_loss(per_example_loss, global_batch_size=FLAGS.batch_size)
# Track learning rate.
tf.compat.v1.summary.scalar('learning_rate', learning_rate)
# Get proper mode for batchnorm and dropout, must be python bool.
training = (mode == tf.estimator.ModeKeys.TRAIN)
predictions = model(features, training=training)
total_loss = loss_fn(labels, predictions)
reg_losses = model.get_losses_for(None) + model.get_losses_for(features)
if reg_losses:
total_loss += tf.math.add_n(reg_losses)
# Compute training metrics.
accuracy = tf.compat.v1.metrics.accuracy(
labels=labels,
predictions=tf.math.argmax(predictions, axis=-1),
name='acc_op')
accuracy_top_5 = tf.compat.v1.metrics.mean(
tf.math.in_top_k(
predictions=predictions,
targets=tf.reshape(labels, [-1]),
k=5,
name='top_5_op'))
metrics = {'accuracy': accuracy, 'accuracy_top_5': accuracy_top_5}
update_ops = model.get_updates_for(features) + model.get_updates_for(
None)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(
total_loss,
var_list=model.trainable_variables,
global_step=global_step)
# Keep track of training accuracy.
if mode == tf.estimator.ModeKeys.TRAIN:
tf.compat.v1.summary.scalar('train_accuracy', accuracy[1])
tf.compat.v1.summary.scalar('train_accuracy_top_5',
accuracy_top_5[1])
return tf.estimator.EstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
eval_metric_ops=metrics)
# Now we're ready to configure our estimator and train.
# Determine proper name for this model.
full_model_path = os.path.join(FLAGS.model_dir,
"%s_%s" % (FLAGS.model, FLAGS.experiment))
# Figure out which GPUS to run on.
if num_gpus > 1:
strategy = tf.distribute.MirroredStrategy()
else:
strategy = None
session_config = tf.compat.v1.ConfigProto(
inter_op_parallelism_threads=op_threads,
intra_op_parallelism_threads=op_threads,
allow_soft_placement=True)
session_config.gpu_options.allow_growth = True
run_config = tf.estimator.RunConfig(
save_summary_steps=500,
log_step_count_steps=500,
save_checkpoints_secs=3600,
train_distribute=strategy,
session_config=session_config)
classifier = tf.estimator.Estimator(
model_fn=model_fn, model_dir=full_model_path, config=run_config)
train_spec = tf.estimator.TrainSpec(
input_fn=train_input_fn, max_steps=None)
eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn)
tf.estimator.train_and_evaluate(classifier, train_spec, eval_spec)
if __name__ == '__main__':
app.run(main)
|
the-stack_106_28530 | import argparse
import unittest
from unittest import mock
import train_model
from shared import utils
class TestMain(unittest.TestCase):
@mock.patch('train_model.setup_args')
@mock.patch('train_model.get_model')
@mock.patch('train_model.train_model')
def test_main_code_is_behaving_as_expected(self, m_train_model, m_get_model, m_setup_args):
train_model.main()
m_setup_args.assert_called_once()
m_get_model.assert_called_once_with(m_setup_args.return_value, utils.INPUT_SHAPE)
m_train_model.assert_called_once_with(m_get_model.return_value, m_setup_args.return_value)
class TestGetModel(unittest.TestCase):
@classmethod
def setUpClass(cls):
args = argparse.Namespace()
args.model_name = 'mock_model'
args.model_type = utils.INCEPTIONV3_ARCHITECTURE
args.model_mode = utils.CUSTOM
args.nb_classes = 3
args.batch_size = 12
args.epochs = 10
args.augment = False
args.layers_to_freeze = 2
cls.args = args
cls.unimplemented_exceptions = {
utils.VGG_ARCHITECTURE: {utils.BASELINE},
utils.RESNET_ARCHITECTURE: {utils.BASELINE, utils.FINETUNE},
}
def setUp(self):
self.args.model_type = utils.INCEPTIONV3_ARCHITECTURE
self.args.model_mode = utils.CUSTOM
def get_unimplemented_modes_for_model_type(self, model_type):
"""Helper method to get unimplemented mode exceptions for a model type"""
model_type_unimplemented_exceptions = self.unimplemented_exceptions.get(model_type)
return model_type_unimplemented_exceptions if model_type_unimplemented_exceptions is not None else {}
def test_raises_valueError_when_unsupported_model_type_is_given(self):
unsupported_model_type = 'xx'
self.args.model_type = unsupported_model_type
if unsupported_model_type in utils.SUPPORTED_MODEL_TYPES:
self.fail('Test condition not right: Write the test again with proper unsupported_model_type string')
with self.assertRaises(ValueError) as ve:
train_model.get_model(self.args, utils.INPUT_SHAPE)
def test_valueError_not_raised_when_supported_model_type_is_given(self):
try:
for model_type in utils.SUPPORTED_MODEL_TYPES:
self.args.model_type = model_type
if self.args.model_mode not in self.get_unimplemented_modes_for_model_type(self.args.model_type):
train_model.get_model(self.args, utils.INPUT_SHAPE)
except ValueError:
self.fail(
'{} model architecture should be supported and should not raise error'.format(self.args.model_type))
def test_raises_valueError_when_unsupported_model_mode_is_given(self):
unsupported_model_mode = 'xx'
self.args.model_mode = unsupported_model_mode
for supported_mode_type in utils.SUPPORTED_MODEL_TYPES:
self.args.model_type = supported_mode_type
if unsupported_model_mode in utils.SUPPORTED_MODEL_MODES:
self.fail('Test condition not right: Write the test again with proper unsupported_model_mode string')
with self.assertRaises(ValueError) as ve:
train_model.get_model(self.args, utils.INPUT_SHAPE)
def test_valueError_not_raised_when_supported_model_mode_is_given(self):
try:
for supported_mode_type in utils.SUPPORTED_MODEL_TYPES:
self.args.model_type = supported_mode_type
for model_mode in utils.SUPPORTED_MODEL_MODES:
self.args.model_mode = model_mode
model_type_unimplemented_exceptions = self.unimplemented_exceptions.get(self.args.model_type)
if model_type_unimplemented_exceptions is not None \
and self.args.model_mode not in model_type_unimplemented_exceptions:
train_model.get_model(self.args, utils.INPUT_SHAPE)
except ValueError:
self.fail(
'{} model architecture should be supported and should not raise error'.format(self.args.model_type))
@mock.patch('train_model.Inception_V3.build_baseline_model')
@mock.patch('train_model.Inception_V3.build_finetuned_model')
@mock.patch('train_model.Inception_V3.build_custom_model')
def test_inceptionv3_architecture_module_is_used_for_inceptionv3(self, m_custom, m_finetune, m_baseline):
self.args.model_type = utils.INCEPTIONV3_ARCHITECTURE
for supported_mode_type in utils.SUPPORTED_MODEL_MODES:
self.args.model_mode = supported_mode_type
if self.args.model_mode not in self.get_unimplemented_modes_for_model_type(self.args.model_type):
train_model.get_model(self.args, utils.INPUT_SHAPE)
self.assertTrue(any([m_custom.called, m_baseline.called, m_finetune.called]))
@mock.patch('train_model.VGG.build_finetuned_model')
@mock.patch('train_model.VGG.build_custom_model')
def test_vgg_architecture_module_is_used_for_vgg(self, m_custom, m_finetune):
self.args.model_type = utils.VGG_ARCHITECTURE
for supported_mode_type in utils.SUPPORTED_MODEL_MODES:
self.args.model_mode = supported_mode_type
if self.args.model_mode not in self.get_unimplemented_modes_for_model_type(self.args.model_type):
train_model.get_model(self.args, utils.INPUT_SHAPE)
self.assertTrue(any([m_custom.called, m_finetune.called]))
@mock.patch('train_model.ResNet.build_custom_model')
def test_resnet_architecture_module_is_used_for_resnet(self, m_custom):
self.args.model_type = utils.RESNET_ARCHITECTURE
for supported_mode_type in utils.SUPPORTED_MODEL_MODES:
self.args.model_mode = supported_mode_type
if self.args.model_mode not in self.get_unimplemented_modes_for_model_type(self.args.model_type):
train_model.get_model(self.args, utils.INPUT_SHAPE)
self.assertTrue(any([m_custom.called]))
if __name__ == '__main__':
unittest.main()
|
the-stack_106_28531 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the Metadata gRPC Interceptor."""
from unittest import TestCase
import mock
from google.ads.google_ads.interceptors import MetadataInterceptor
class MetadataInterceptorTest(TestCase):
def setUp(self):
self.mock_developer_token = "1234567890"
self.mock_login_customer_id = "0987654321"
super(MetadataInterceptorTest, self).setUp()
def test_init(self):
interceptor = MetadataInterceptor(
self.mock_developer_token, self.mock_login_customer_id
)
self.assertEqual(
interceptor.developer_token_meta,
("developer-token", self.mock_developer_token),
)
self.assertEqual(
interceptor.login_customer_id_meta,
("login-customer-id", self.mock_login_customer_id),
)
def test_init_no_login_customer_id(self):
interceptor = MetadataInterceptor(self.mock_developer_token, None)
self.assertEqual(
interceptor.developer_token_meta,
("developer-token", self.mock_developer_token),
)
self.assertEqual(interceptor.login_customer_id_meta, None)
def test_update_client_call_details_metadata(self):
interceptor = MetadataInterceptor(
self.mock_developer_token, self.mock_login_customer_id
)
mock_metadata = list([("test-key", "test-value")])
mock_client_call_details = mock.Mock()
client_call_details = interceptor._update_client_call_details_metadata(
mock_client_call_details, mock_metadata
)
self.assertEqual(client_call_details.metadata, mock_metadata)
def test_intercept_unary_unary(self):
interceptor = MetadataInterceptor(
self.mock_developer_token, self.mock_login_customer_id
)
mock_continuation = mock.Mock(return_value=None)
mock_client_call_details = mock.Mock()
mock_client_call_details.method = "test/method"
mock_client_call_details.timeout = 5
mock_client_call_details.metadata = [("apples", "oranges")]
mock_request = mock.Mock()
with mock.patch.object(
interceptor,
"_update_client_call_details_metadata",
wraps=interceptor._update_client_call_details_metadata,
) as mock_updater:
interceptor.intercept_unary_unary(
mock_continuation, mock_client_call_details, mock_request
)
mock_updater.assert_called_once_with(
mock_client_call_details,
[
mock_client_call_details.metadata[0],
interceptor.developer_token_meta,
interceptor.login_customer_id_meta,
],
)
mock_continuation.assert_called_once()
def test_intercept_unary_stream(self):
interceptor = MetadataInterceptor(
self.mock_developer_token, self.mock_login_customer_id
)
mock_continuation = mock.Mock(return_value=None)
mock_client_call_details = mock.Mock()
mock_client_call_details.method = "test/method"
mock_client_call_details.timeout = 5
mock_client_call_details.metadata = [("apples", "oranges")]
mock_request = mock.Mock()
with mock.patch.object(
interceptor,
"_update_client_call_details_metadata",
wraps=interceptor._update_client_call_details_metadata,
) as mock_updater:
interceptor.intercept_unary_stream(
mock_continuation, mock_client_call_details, mock_request
)
mock_updater.assert_called_once_with(
mock_client_call_details,
[
mock_client_call_details.metadata[0],
interceptor.developer_token_meta,
interceptor.login_customer_id_meta,
],
)
mock_continuation.assert_called_once()
|
the-stack_106_28532 | #!/usr/bin/env python
# coding: utf-8
from __future__ import unicode_literals
import os
import shutil
import tempfile
import unittest
from mkdocs import config
from mkdocs import utils
from mkdocs.config import config_options
from mkdocs.exceptions import ConfigurationError
from mkdocs.tests.base import dedent
def ensure_utf(string):
return string.encode('utf-8') if not utils.PY3 else string
class ConfigTests(unittest.TestCase):
def test_missing_config_file(self):
def load_missing_config():
config.load_config(config_file='bad_filename.yaml')
self.assertRaises(ConfigurationError, load_missing_config)
def test_missing_site_name(self):
c = config.Config(schema=config.DEFAULT_SCHEMA)
c.load_dict({})
errors, warnings = c.validate()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0][0], 'site_name')
self.assertEqual(str(errors[0][1]), 'Required configuration not provided.')
self.assertEqual(len(warnings), 0)
def test_empty_config(self):
def load_empty_config():
config.load_config(config_file='/dev/null')
self.assertRaises(ConfigurationError, load_empty_config)
def test_nonexistant_config(self):
def load_empty_config():
config.load_config(config_file='/path/that/is/not/real')
self.assertRaises(ConfigurationError, load_empty_config)
def test_invalid_config(self):
file_contents = dedent("""
- ['index.md', 'Introduction']
- ['index.md', 'Introduction']
- ['index.md', 'Introduction']
""")
config_file = tempfile.NamedTemporaryFile('w', delete=False)
try:
config_file.write(ensure_utf(file_contents))
config_file.flush()
config_file.close()
self.assertRaises(
ConfigurationError,
config.load_config, config_file=open(config_file.name, 'rb')
)
finally:
os.remove(config_file.name)
def test_config_option(self):
"""
Users can explicitly set the config file using the '--config' option.
Allows users to specify a config other than the default `mkdocs.yml`.
"""
expected_result = {
'site_name': 'Example',
'pages': [
{'Introduction': 'index.md'}
],
}
file_contents = dedent("""
site_name: Example
pages:
- ['index.md', 'Introduction']
""")
config_file = tempfile.NamedTemporaryFile('w', delete=False)
try:
config_file.write(ensure_utf(file_contents))
config_file.flush()
config_file.close()
result = config.load_config(config_file=config_file.name)
self.assertEqual(result['site_name'], expected_result['site_name'])
self.assertEqual(result['pages'], expected_result['pages'])
finally:
os.remove(config_file.name)
def test_theme(self):
mytheme = tempfile.mkdtemp()
custom = tempfile.mkdtemp()
configs = [
dict(), # default theme
{"theme": "readthedocs"}, # builtin theme
{"theme_dir": mytheme}, # custom only
{"theme": "readthedocs", "theme_dir": custom}, # builtin and custom
]
abs_path = os.path.abspath(os.path.dirname(__file__))
mkdocs_dir = os.path.abspath(os.path.join(abs_path, '..', '..'))
theme_dir = os.path.abspath(os.path.join(mkdocs_dir, 'themes'))
search_asset_dir = os.path.abspath(os.path.join(
mkdocs_dir, 'assets', 'search'))
results = (
[os.path.join(theme_dir, 'mkdocs'), search_asset_dir],
[os.path.join(theme_dir, 'readthedocs'), search_asset_dir],
[mytheme, search_asset_dir],
[custom, os.path.join(theme_dir, 'readthedocs'), search_asset_dir],
)
for config_contents, result in zip(configs, results):
c = config.Config(schema=(
('theme', config_options.Theme(default='mkdocs')),
('theme_dir', config_options.ThemeDir(exists=True)),
))
c.load_dict(config_contents)
c.validate()
self.assertEqual(c['theme_dir'], result)
def test_default_pages(self):
tmp_dir = tempfile.mkdtemp()
try:
open(os.path.join(tmp_dir, 'index.md'), 'w').close()
open(os.path.join(tmp_dir, 'about.md'), 'w').close()
conf = config.Config(schema=config.DEFAULT_SCHEMA)
conf.load_dict({
'site_name': 'Example',
'docs_dir': tmp_dir
})
conf.validate()
self.assertEqual(['index.md', 'about.md'], conf['pages'])
finally:
shutil.rmtree(tmp_dir)
def test_default_pages_nested(self):
tmp_dir = tempfile.mkdtemp()
try:
open(os.path.join(tmp_dir, 'index.md'), 'w').close()
open(os.path.join(tmp_dir, 'getting-started.md'), 'w').close()
open(os.path.join(tmp_dir, 'about.md'), 'w').close()
os.makedirs(os.path.join(tmp_dir, 'subA'))
open(os.path.join(tmp_dir, 'subA', 'index.md'), 'w').close()
os.makedirs(os.path.join(tmp_dir, 'subA', 'subA1'))
open(os.path.join(tmp_dir, 'subA', 'subA1', 'index.md'), 'w').close()
os.makedirs(os.path.join(tmp_dir, 'subC'))
open(os.path.join(tmp_dir, 'subC', 'index.md'), 'w').close()
os.makedirs(os.path.join(tmp_dir, 'subB'))
open(os.path.join(tmp_dir, 'subB', 'index.md'), 'w').close()
conf = config.Config(schema=config.DEFAULT_SCHEMA)
conf.load_dict({
'site_name': 'Example',
'docs_dir': tmp_dir
})
conf.validate()
self.assertEqual([
'index.md',
'about.md',
'getting-started.md',
{'subA': [
os.path.join('subA', 'index.md'),
{'subA1': [
os.path.join('subA', 'subA1', 'index.md')
]}
]},
{'subB': [
os.path.join('subB', 'index.md')
]},
{'subC': [
os.path.join('subC', 'index.md')
]}
], conf['pages'])
finally:
shutil.rmtree(tmp_dir)
def test_doc_dir_in_site_dir(self):
j = os.path.join
test_configs = (
{'docs_dir': j('site', 'docs'), 'site_dir': 'site'},
{'docs_dir': 'docs', 'site_dir': '.'},
{'docs_dir': '.', 'site_dir': '.'},
{'docs_dir': 'docs', 'site_dir': ''},
{'docs_dir': '', 'site_dir': ''},
{'docs_dir': j('..', 'mkdocs', 'docs'), 'site_dir': 'docs'},
)
conf = {
'site_name': 'Example',
}
for test_config in test_configs:
patch = conf.copy()
patch.update(test_config)
# Same as the default schema, but don't verify the docs_dir exists.
c = config.Config(schema=(
('docs_dir', config_options.Dir(default='docs')),
('site_dir', config_options.SiteDir(default='site')),
))
c.load_dict(patch)
self.assertRaises(config_options.ValidationError, c.validate)
|
the-stack_106_28533 | from typing import KeysView
SERVICES_FOR_GROUP = {
"all": "chia_harvester chia_timelord_launcher chia_timelord chia_farmer chia_full_node chia_wallet".split(),
"node": "chia_full_node".split(),
"harvester": "chia_harvester".split(),
"farmer": "chia_harvester chia_farmer chia_full_node chia_wallet".split(),
"farmer-no-wallet": "chia_harvester chia_farmer chia_full_node".split(),
"farmer-only": "chia_farmer".split(),
"timelord": "chia_timelord_launcher chia_timelord chia_full_node".split(),
"timelord-only": "chia_timelord".split(),
"timelord-launcher-only": "chia_timelord_launcher".split(),
"wallet": "chia_wallet chia_full_node".split(),
"wallet-only": "chia_wallet".split(),
"introducer": "chia_introducer".split(),
"simulator": "chia_full_node_simulator".split(),
}
def all_groups() -> KeysView[str]:
return SERVICES_FOR_GROUP.keys()
def services_for_groups(groups):
for group in groups:
for service in SERVICES_FOR_GROUP[group]:
yield service
def validate_service(service) -> bool:
return any(service in _ for _ in SERVICES_FOR_GROUP.values())
|
the-stack_106_28534 | import json
import os
import sys
import re
import copy
import numpy as np
from .base_model import BaseModel
from .base_model import AutoSubRegistrationMeta
from .. import dp_logging
logger = dp_logging.get_child_logger(__name__)
class RegexModel(BaseModel, metaclass=AutoSubRegistrationMeta):
def __init__(self, label_mapping=None, parameters=None):
"""
Regex Model Initializer.
Example regex_patterns:
regex_patterns = {
"LABEL_1": [
"LABEL_1_pattern_1",
"LABEL_1_pattern_2",
...
],
"LABEL_2": [
"LABEL_2_pattern_1",
"LABEL_2_pattern_2",
...
],
...
}
Example encapsulators:
encapsulators = {
'start': r'(?<![\w.\$\%\-])',
'end': r'(?:(?=(\b|[ ]))|(?=[^\w\%\$]([^\w]|$))|$)',
}
:param label_mapping: maps labels to their encoded integers
:type label_mapping: dict
:param parameters: Contains all the appropriate parameters for the model.
Possible parameters are:
max_length, max_num_chars, dim_embed
:type parameters: dict
:return: None
"""
# parameter initialization
if not parameters:
parameters = {}
parameters.setdefault('regex_patterns', {})
parameters.setdefault('encapsulators', {'start': '', 'end': ''})
parameters.setdefault('ignore_case', True)
parameters.setdefault('default_label', 'UNKNOWN')
self._epoch_id = 0
# initialize class
self.set_label_mapping(label_mapping)
self._validate_parameters(parameters)
self._parameters = parameters
def _validate_parameters(self, parameters):
"""
Validate the parameters sent in. Raise error if invalid parameters are
present.
:param parameters: parameter dict containing the following parameters:
regex_patterns: patterns associated with each label_mapping
Example regex_patterns:
regex_patterns = {
"LABEL_1": [
"LABEL_1_pattern_1",
"LABEL_1_pattern_2",
...
],
"LABEL_2": [
"LABEL_2_pattern_1",
"LABEL_2_pattern_2",
...
],
...
}
encapsulators: regex to add to start and end of each regex
(used to capture entities inside of text).
Example encapsulators:
encapsulators = {
'start': r'(?<![\w.\$\%\-])',
'end': r'(?:(?=(\b|[ ]))|(?=[^\w\%\$]([^\w]|$))|$)',
}
ignore_case: whether or not to set the regex ignore case flag
default_label: default label to assign when no regex found
:type parameters: dict
:return: None
"""
_retype = type(re.compile('pattern for py 3.6 & 3.7'))
errors = []
list_of_necessary_params = ['encapsulators', 'regex_patterns',
'ignore_case', 'default_label']
# Make sure the necessary parameters are present and valid.
for param in parameters:
value = parameters[param]
if param == 'encapsulators' and (
not isinstance(value, dict)
or 'start' not in value
or 'end' not in value):
errors.append(
"`{}` must be a dict with keys 'start' and 'end'".format(
param
))
elif param == 'regex_patterns':
if not isinstance(value, dict):
errors.append('`{}` must be a dict of regex pattern lists.'.
format(param))
continue
for key in value:
if key not in self.label_mapping:
errors.append(
"`{}` was a regex pattern not found in the "
"label_mapping".format(key))
elif not isinstance(value[key], list):
errors.append(
"`{}` must be a list of regex patterns, i.e."
"[pattern_1, pattern_2, ...]".format(key))
else:
for i in range(len(value[key])):
if not isinstance(value[key][i], (_retype, str)):
errors.append(
"`{}`, pattern `{}' was not a valid regex "
"pattern (re.Pattern, str)".format(key, i))
elif isinstance(value[key][i], str):
try:
re.compile(value[key][i])
except re.error as e:
errors.append(
"`{}`, pattern {} was not a valid regex"
" pattern: {}".format(key, i, str(e)))
elif param == 'ignore_case' \
and not isinstance(parameters[param], bool):
errors.append("`{}` must be a bool.".format(param))
elif param == 'default_label' \
and not isinstance(parameters[param], str):
errors.append("`{}` must be a string.".format(param))
elif param not in list_of_necessary_params:
errors.append("`{}` is not an accepted parameter.".format(
param))
if errors:
raise ValueError('\n'.join(errors))
def _construct_model(self):
pass
def _reconstruct_model(self):
pass
def _need_to_reconstruct_model(self):
pass
def reset_weights(self):
pass
def predict(self, data, batch_size=None, show_confidences=False,
verbose=True):
"""
Applies the regex patterns (within regex_model) to the input_string,
create predictions for all matching patterns. Each pattern has an
associated entity and the predictions of each character within the
string are given a True or False identification for each entity. All
characters not identified by ANY of the regex patterns in the
pattern_dict are considered background characters, and are replaced with
the default_label value.
:param data: list of strings to predict upon
:type data: iterator
:param batch_size: does not impact this model and should be fixed to not
be required.
:type batch_size: N/A
:param show_confidences: whether user wants prediction confidences
:type show_confidences:
:param verbose: Flag to determine whether to print status or not
:type verbose: bool
:return: char level predictions and confidences
:rtype: dict
"""
start_pattern = ''
end_pattern = ''
regex_patterns = self._parameters['regex_patterns']
default_ind = self.label_mapping[self._parameters['default_label']]
encapsulators = self._parameters['encapsulators']
re_flags = re.IGNORECASE if self._parameters['ignore_case'] else 0
if encapsulators:
start_pattern = encapsulators['start']
end_pattern = encapsulators['end']
pre_compiled_patterns = copy.deepcopy(regex_patterns)
for entity_label, entity_patterns in pre_compiled_patterns.items():
for i in range(len(entity_patterns)):
pattern = (start_pattern
+ pre_compiled_patterns[entity_label][i]
+ end_pattern)
pre_compiled_patterns[entity_label][i] = re.compile(
pattern, flags=re_flags)
# Construct array initial regex predictions where background is
# predicted.
predictions = [np.empty((0,))] * 100
i = 0
for i, input_string in enumerate(data):
# Double array size
if len(predictions) <= i:
predictions.extend([np.empty((0,))] * len(predictions))
pred = np.zeros((len(input_string), self.num_labels), dtype=int)
pred[:, default_ind] = 1
for entity_label, entity_patterns in pre_compiled_patterns.items():
entity_id = self.label_mapping[entity_label]
for re_pattern in entity_patterns:
for each_find in re_pattern.finditer(input_string):
indices = each_find.span(0)
pred[indices[0]:indices[1], default_ind] = 0
pred[indices[0]:indices[1], entity_id] = 1
if verbose:
sys.stdout.flush()
sys.stdout.write("\rData Samples Processed: {:d} ".format(
i + 1))
predictions[i] = pred
if verbose:
logger.info("\rData Samples Processed: {:d} ".format(i + 1))
# Trim array size to number of samples
if len(predictions) > i+1:
del predictions[i+1:]
if show_confidences:
conf = copy.deepcopy(predictions)
for i in range(len(conf)):
conf[i] = conf[i] / \
np.linalg.norm(conf[i], axis=1, ord=1, keepdims=True)
return {"pred": predictions, 'conf': conf}
return {"pred": predictions}
@classmethod
def load_from_disk(cls, dirpath):
"""
Loads whole model from disk with weights
:param dirpath: directory path where you want to load the model from
:type dirpath: str
:return: None
"""
# load parameters
model_param_dirpath = os.path.join(dirpath, "model_parameters.json")
with open(model_param_dirpath, 'r') as fp:
parameters = json.load(fp)
# load label_mapping
labels_dirpath = os.path.join(dirpath, "label_mapping.json")
with open(labels_dirpath, 'r') as fp:
label_mapping = json.load(fp)
loaded_model = cls(label_mapping, parameters)
return loaded_model
def save_to_disk(self, dirpath):
"""
Saves whole model to disk with weights.
:param dirpath: directory path where you want to save the model to
:type dirpath: str
:return: None
"""
if not os.path.isdir(dirpath):
os.makedirs(dirpath)
model_param_dirpath = os.path.join(dirpath, "model_parameters.json")
with open(model_param_dirpath, 'w') as fp:
json.dump(self._parameters, fp)
labels_dirpath = os.path.join(dirpath, "label_mapping.json")
with open(labels_dirpath, 'w') as fp:
json.dump(self.label_mapping, fp)
|
the-stack_106_28536 | # -*- coding: utf-8 -*-
first_names = [
'Adelheid',
'Agnes',
'Albert',
'Anna',
'Arnold',
'Barbara',
'Bernhard',
'Berthold',
'Christine',
'Dietrich',
'Eberhard',
'Elisabeth',
'Fenne',
'Friedrich',
'Georg',
'Gerhard',
'Gerhaus',
'Gertrud',
'Hedwig',
'Heinrich',
'Hermann',
'Hilde',
'Irmel',
'Johannes',
'Jutta',
'Katharina',
'Konrad',
'Kunigunde',
'Lambert',
'Margarete',
'Mechtild',
'Michael',
'Nikolaus',
'Otto',
'Peter',
'Ulrich',
'Walther',
]
|
the-stack_106_28538 |
import librosa
import numpy as np
# function to extract all the features needed for the classification
def extract_features(audio_samples, sample_rate):
print(" Extracting features ..... ")
extracted_features = np.empty((0, 41,))
if not isinstance(audio_samples, list):
audio_samples = [audio_samples]
for sample in audio_samples:
# calculate the zero-crossing feature
zero_cross_feat = librosa.feature.zero_crossing_rate(sample).mean()
# calculate the mfccs features
mfccs = librosa.feature.mfcc(y=sample, sr=sample_rate, n_mfcc=40)
mfccsscaled = np.mean(mfccs.T, axis=0)
# add zero crossing feature to the feature list
mfccsscaled = np.append(mfccsscaled, zero_cross_feat)
mfccsscaled = mfccsscaled.reshape(1, 41, )
extracted_features = np.vstack((extracted_features, mfccsscaled))
# return the extracted features
return extracted_features
|
the-stack_106_28540 | #!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015, 2016, 2017 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import collections
import json
from backtrader import BrokerBase, OrderBase, Order
from backtrader.position import Position
from backtrader.utils.py3 import queue, with_metaclass
from .ccxtstore import CCXTStore
class CCXTOrder(OrderBase):
def __init__(self, owner, data, ccxt_order):
self.owner = owner
self.data = data
self.ccxt_order = ccxt_order
self.executed_fills = []
self.ordtype = self.Buy if ccxt_order['side'] == 'buy' else self.Sell
self.size = float(ccxt_order['amount'])
super(CCXTOrder, self).__init__()
class MetaCCXTBroker(BrokerBase.__class__):
def __init__(cls, name, bases, dct):
'''Class has already been created ... register'''
# Initialize the class
super(MetaCCXTBroker, cls).__init__(name, bases, dct)
CCXTStore.BrokerCls = cls
class CCXTBroker(with_metaclass(MetaCCXTBroker, BrokerBase)):
'''Broker implementation for CCXT cryptocurrency trading library.
This class maps the orders/positions from CCXT to the
internal API of ``backtrader``.
Broker mapping added as I noticed that there differences between the expected
order_types and retuned status's from canceling an order
Added a new mappings parameter to the script with defaults.
Added a get_balance function. Manually check the account balance and update brokers
self.cash and self.value. This helps alleviate rate limit issues.
Added a new get_wallet_balance method. This will allow manual checking of the any coins
The method will allow setting parameters. Useful for dealing with multiple assets
Modified getcash() and getvalue():
Backtrader will call getcash and getvalue before and after next, slowing things down
with rest calls. As such, th
The broker mapping should contain a new dict for order_types and mappings like below:
broker_mapping = {
'order_types': {
bt.Order.Market: 'market',
bt.Order.Limit: 'limit',
bt.Order.Stop: 'stop-loss', #stop-loss for kraken, stop for bitmex
bt.Order.StopLimit: 'stop limit'
},
'mappings':{
'closed_order':{
'key': 'status',
'value':'closed'
},
'canceled_order':{
'key': 'result',
'value':1}
}
}
Added new private_end_point method to allow using any private non-unified end point
'''
order_types = {Order.Market: 'market',
Order.Limit: 'limit',
Order.Stop: 'stop', # stop-loss for kraken, stop for bitmex
Order.StopLimit: 'stop limit'}
mappings = {
'closed_order': {
'key': 'status',
'value': 'closed'
},
'canceled_order': {
'key': 'status',
'value': 'canceled'}
}
def __init__(self, broker_mapping=None, debug=False, **kwargs):
super(CCXTBroker, self).__init__()
if broker_mapping is not None:
try:
self.order_types = broker_mapping['order_types']
except KeyError: # Might not want to change the order types
pass
try:
self.mappings = broker_mapping['mappings']
except KeyError: # might not want to change the mappings
pass
self.store = CCXTStore(**kwargs)
self.currency = self.store.currency
self.positions = collections.defaultdict(Position)
self.debug = debug
self.indent = 4 # For pretty printing dictionaries
self.notifs = queue.Queue() # holds orders which are notified
self.open_orders = list()
self.startingcash = self.store._cash
self.startingvalue = self.store._value
def get_balance(self):
self.store.get_balance()
self.cash = self.store._cash
self.value = self.store._value
return self.cash, self.value
def get_wallet_balance(self, currency, params={}):
balance = self.store.get_wallet_balance(currency, params=params)
cash = balance['free'][currency] if balance['free'][currency] else 0
value = balance['total'][currency] if balance['total'][currency] else 0
return cash, value
def getcash(self):
# Get cash seems to always be called before get value
# Therefore it makes sense to add getbalance here.
# return self.store.getcash(self.currency)
self.cash = self.store._cash
return self.cash
def getvalue(self, datas=None):
# return self.store.getvalue(self.currency)
self.value = self.store._value
return self.value
def get_notification(self):
try:
return self.notifs.get(False)
except queue.Empty:
return None
def notify(self, order):
self.notifs.put(order)
def getposition(self, data, clone=True):
# return self.o.getposition(data._dataname, clone=clone)
pos = self.positions[data._dataname]
if clone:
pos = pos.clone()
return pos
def next(self):
if self.debug:
print('Broker next() called')
for o_order in list(self.open_orders):
oID = o_order.ccxt_order['id']
# Print debug before fetching so we know which order is giving an
# issue if it crashes
if self.debug:
print('Fetching Order ID: {}'.format(oID))
# Get the order
ccxt_order = self.store.fetch_order(oID, o_order.data.p.dataname)
# Check for new fills
if 'trades' in ccxt_order and ccxt_order['trades'] is not None:
for fill in ccxt_order['trades']:
if fill not in o_order.executed_fills:
o_order.execute(fill['datetime'], fill['amount'], fill['price'],
0, 0.0, 0.0,
0, 0.0, 0.0,
0.0, 0.0,
0, 0.0)
o_order.executed_fills.append(fill['id'])
if self.debug:
print(json.dumps(ccxt_order, indent=self.indent))
# Check if the order is closed
if ccxt_order[self.mappings['closed_order']['key']] == self.mappings['closed_order']['value']:
pos = self.getposition(o_order.data, clone=False)
pos.update(o_order.size, o_order.price)
o_order.completed()
self.notify(o_order)
self.open_orders.remove(o_order)
self.get_balance()
def _submit(self, owner, data, exectype, side, amount, price, params):
order_type = self.order_types.get(exectype) if exectype else 'market'
created = int(data.datetime.datetime(0).timestamp()*1000)
# Extract CCXT specific params if passed to the order
params = params['params'] if 'params' in params else params
# params['created'] = created # Add timestamp of order creation for backtesting
ret_ord = self.store.create_order(symbol=data.p.dataname, order_type=order_type, side=side,
amount=amount, price=price, params=params)
_order = self.store.fetch_order(ret_ord['id'], data.p.dataname)
order = CCXTOrder(owner, data, _order)
order.price = ret_ord['price']
self.open_orders.append(order)
self.notify(order)
return order
def buy(self, owner, data, size, price=None, plimit=None,
exectype=None, valid=None, tradeid=0, oco=None,
trailamount=None, trailpercent=None,
**kwargs):
del kwargs['parent']
del kwargs['transmit']
return self._submit(owner, data, exectype, 'buy', size, price, kwargs)
def sell(self, owner, data, size, price=None, plimit=None,
exectype=None, valid=None, tradeid=0, oco=None,
trailamount=None, trailpercent=None,
**kwargs):
del kwargs['parent']
del kwargs['transmit']
return self._submit(owner, data, exectype, 'sell', size, price, kwargs)
def cancel(self, order):
oID = order.ccxt_order['id']
if self.debug:
print('Broker cancel() called')
print('Fetching Order ID: {}'.format(oID))
# check first if the order has already been filled otherwise an error
# might be raised if we try to cancel an order that is not open.
ccxt_order = self.store.fetch_order(oID, order.data.p.dataname)
if self.debug:
print(json.dumps(ccxt_order, indent=self.indent))
if ccxt_order[self.mappings['closed_order']['key']] == self.mappings['closed_order']['value']:
return order
ccxt_order = self.store.cancel_order(oID, order.data.p.dataname)
if self.debug:
print(json.dumps(ccxt_order, indent=self.indent))
print('Value Received: {}'.format(ccxt_order[self.mappings['canceled_order']['key']]))
print('Value Expected: {}'.format(self.mappings['canceled_order']['value']))
if ccxt_order[self.mappings['canceled_order']['key']] == self.mappings['canceled_order']['value']:
self.open_orders.remove(order)
order.cancel()
self.notify(order)
return order
def get_orders_open(self, safe=False):
return self.store.fetch_open_orders()
def private_end_point(self, type, endpoint, params):
'''
Open method to allow calls to be made to any private end point.
See here: https://github.com/ccxt/ccxt/wiki/Manual#implicit-api-methods
- type: String, 'Get', 'Post','Put' or 'Delete'.
- endpoint = String containing the endpoint address eg. 'order/{id}/cancel'
- Params: Dict: An implicit method takes a dictionary of parameters, sends
the request to the exchange and returns an exchange-specific JSON
result from the API as is, unparsed.
To get a list of all available methods with an exchange instance,
including implicit methods and unified methods you can simply do the
following:
print(dir(ccxt.hitbtc()))
'''
endpoint_str = endpoint.replace('/', '_')
endpoint_str = endpoint_str.replace('{', '')
endpoint_str = endpoint_str.replace('}', '')
method_str = 'private_' + type.lower() + endpoint_str.lower()
return self.store.private_end_point(type=type, endpoint=method_str, params=params)
|
the-stack_106_28541 | # Copyright (c) 2010-2020 openpyxlzip
from openpyxlzip.descriptors.serialisable import Serialisable
from openpyxlzip.descriptors import (
Sequence,
Typed,
Alias,
)
from openpyxlzip.descriptors.excel import ExtensionList
from openpyxlzip.descriptors.nested import (
NestedBool,
NestedInteger,
NestedSet
)
from ._chart import ChartBase
from .axis import TextAxis, NumericAxis
from .series import Series
from .label import DataLabelList
class RadarChart(ChartBase):
tagname = "radarChart"
radarStyle = NestedSet(values=(['standard', 'marker', 'filled']))
type = Alias("radarStyle")
varyColors = NestedBool(nested=True, allow_none=True)
ser = Sequence(expected_type=Series, allow_none=True)
dLbls = Typed(expected_type=DataLabelList, allow_none=True)
dataLabels = Alias("dLbls")
extLst = Typed(expected_type=ExtensionList, allow_none=True)
_series_type = "radar"
x_axis = Typed(expected_type=TextAxis)
y_axis = Typed(expected_type=NumericAxis)
__elements__ = ('radarStyle', 'varyColors', 'ser', 'dLbls', 'axId', 'extLst',)
def __init__(self,
radarStyle="standard",
varyColors=None,
ser=(),
dLbls=None,
extLst=None,
**kw
):
self.radarStyle = radarStyle
self.varyColors = varyColors
self.ser = ser
self.dLbls = dLbls
self.x_axis = TextAxis()
self.y_axis = NumericAxis()
self.extLst = extLst
super(RadarChart, self).__init__(**kw)
|
the-stack_106_28542 | """
Suppose an numsay sorted in ascending order is rotated at some pivot unknown to you beforehand.
(i.e., [0,1,2,4,5,6,7] might become [4,5,6,7,0,1,2]).
You are given a target value to search. If found in the numsay return its index, otherwise return -1.
You may assume no duplicate exists in the numsay.
Your algorithm's runtime complexity must be in the order of O(log n).
"""
# def main(nums, target):
# if not nums:
# return -1
# l, r = two_sorted(nums)
# ind = binary_search(nums, 0, l, target)
# if ind == -1:
# ind = binary_search(nums, r + 1, len(nums) - 1, target)
# if ind == -1:
# return -1
# return ind
#
# def two_sorted(nums):
# l = 0
# r = len(nums) - 1
# while l < r:
# mid = (l + r) // 2
# if nums[mid] > nums[l]:
# l = mid
# else:
# r = mid
# return (l, r)
#
#
# def binary_search(nums, l, r, target):
# while l <= r:
# mid = (l + r) // 2
# if nums[mid] == target:
# return mid
# elif nums[mid] > target:
# r = mid - 1
# else:
# l = mid + 1
# return -1
def main(nums, target):
# left pointer
l = 0
# right pointer
r = len(nums) - 1
while l <= r:
mid = (l + r) // 2
if nums[mid] == target:
return mid
# if the first half of the array is sorted
if nums[l] <= nums[mid]:
# check if the target point is in that half
if nums[l] <= target < nums[mid]:
# move the right pointer
r = mid - 1
# else move the left pointer
else:
l = mid + 1
else:
if nums[mid] < target <= nums[r]:
l = mid + 1
else:
r = mid - 1
return -1
print(main([1, 3], 3))
|
the-stack_106_28544 | import geopandas as gpd
import pandas as pd
from gis_utils import gis_main_dir
def main():
dfd = pd.read_csv('{}census/aff_download1/ACS_15_5YR_B19013_with_ann.csv'.format(gis_main_dir))
dfs = gpd.read_file('{}census/va_block_groups/tl_2015_51_bg.shp'.format(gis_main_dir))
dfd = dfd[dfd['marg_err'].str.contains('\d')]
dfd = dfd.convert_objects(convert_numeric=True)
dfs = dfs.convert_objects(convert_numeric=True)
dfj = dfs.merge(dfd, left_on='GEOID', right_on='GEO_idd')
dfj.to_file('{}LocalityData/Norfolk/nor_med_income'.format(gis_main_dir))
if __name__ == "__main__":
main()
|
the-stack_106_28545 | from django.shortcuts import render
from django.http import HttpResponse, Http404
from .models import Category, Location, Image
# Create your views here.
def welcome(request):
try:
category = Category.objects.all()
location = Location.objects.all()
images = Image.objects.all()
except:
raise Http404()
return render(request, 'index.html', {"location": location,"category":category,"images":images})
def search_results(request):
if 'name' in request.GET and request.GET["name"]:
search_term = request.GET.get("name")
searched_name = Image.search_by_name(search_term)
message = f"{search_term}"
print(searched_name)
return render(request, 'search.html', {"message": message, "image": searched_name})
else:
message = "You haven't searched for any term"
return render(request, 'search.html', {'message': message})
|
the-stack_106_28546 | #!/usr/bin/env python
# Copyright (c) 2016 Hewlett Packard Enterprise Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import monasca_analytics.banana.deadpathck.config as deadpathck
import monasca_analytics.banana.emitter as emit
import monasca_analytics.banana.grammar.config as grammar
import monasca_analytics.banana.typeck.config as typeck
from test.util_for_testing import MonanasTestCase
logger = logging.getLogger(__name__)
class DeadPathTestCase(MonanasTestCase):
def setUp(self):
super(DeadPathTestCase, self).setUp()
def tearDown(self):
super(DeadPathTestCase, self).tearDown()
def test_banana_should_remove_everything(self):
banana_str = "" +\
"a = CloudMarkovChainSource()\n" +\
"b = StdoutSink()\n" +\
"c = CloudIngestor()\n" +\
"d = LiNGAM()\n" +\
"a -> c -> d"
emitter = CustomEmitter()
# Convert the grammar into an AST
parser = grammar.banana_grammar(emitter)
ast = parser.parse(banana_str)
# Compute the type table for the given AST
type_table = typeck.typeck(ast)
# Remove from the tree path that are "dead"
deadpathck.deadpathck(ast, type_table, emitter)
self.assertEqual(emitter.nb_errors, 0)
self.assertEqual(emitter.nb_warnings, 4)
self.assertEqual(len(ast.components), 0)
self.assertEqual(len(list(ast.connections.connections)), 0)
def test_banana_should_remove_one(self):
banana_str = "" +\
"a = CloudMarkovChainSource()\n" +\
"b = StdoutSink()\n" +\
"c = CloudIngestor()\n" +\
"d = LiNGAM()\n" +\
"a -> c -> [d, b]"
emitter = CustomEmitter()
# Convert the grammar into an AST
parser = grammar.banana_grammar(emitter)
ast = parser.parse(banana_str)
# Compute the type table for the given AST
type_table = typeck.typeck(ast)
# Remove from the tree path that are "dead"
deadpathck.deadpathck(ast, type_table, emitter)
self.assertEqual(emitter.nb_errors, 0)
self.assertEqual(emitter.nb_warnings, 1)
self.assertEqual(len(ast.components), 3)
self.assertEqual(len(list(ast.connections.connections)), 2)
def test_banana_should_not_remove_anything(self):
banana_str = "" +\
"a = CloudMarkovChainSource()\n" +\
"b = StdoutSink()\n" +\
"c = CloudIngestor()\n" +\
"d = LiNGAM()\n" +\
"a -> c -> d -> b"
emitter = CustomEmitter()
# Convert the grammar into an AST
parser = grammar.banana_grammar(emitter)
ast = parser.parse(banana_str)
# Compute the type table for the given AST
type_table = typeck.typeck(ast)
# Remove from the tree path that are "dead"
deadpathck.deadpathck(ast, type_table, emitter)
self.assertEqual(emitter.nb_errors, 0)
self.assertEqual(emitter.nb_warnings, 0)
self.assertEqual(len(ast.components), 4)
self.assertEqual(len(list(ast.connections.connections)), 3)
class CustomEmitter(emit.Emitter):
def __init__(self):
super(CustomEmitter, self).__init__()
self.nb_warnings = 0
self.nb_errors = 0
def emit_warning(self, span, message):
print(span.get_line(), str(span), message)
self.nb_warnings += 1
def emit_error(self, span, message):
print(span.get_line(), str(span), message)
self.nb_errors += 1
|
the-stack_106_28547 | from mako.template import Template
from dtest.syslog import log
import json
import traceback
import random
import string
def selectParameters(**kwargs):
"""็ปๅๅๆฐ"""
value_len = []
params_list = []
for k in kwargs.keys():
value_len.append(len(kwargs[k]))
value_len.sort()
for v in range(value_len[0]):
once = {}
for k in kwargs.keys():
if isinstance(kwargs[k], list):
once[k] = kwargs[k][v]
params_list.append(once)
return params_list
def getParame(step=None, globalvar=None):
parame = {}
for k in step.get("parameters"):
if isinstance(step.get("parameters")[k], str):
t = Template(step.get("parameters")[k]).render(**globalvar)
try:
t = json.loads(t)
except Exception as e:
traceback.format_exc(e)
if isinstance(t, dict):
for i in k.split(","):
parame[i] = t[i]
elif isinstance(t, str):
parame[k] = t
elif isinstance(t, list):
parame[k] = t
else:
parame[k] = step.get("parameters")[k]
return parame
def getRandStr(str_len):
""" generate random string with specified length
"""
return "".join(
random.choice(string.ascii_letters + string.digits) for _ in range(str_len)
) |
the-stack_106_28548 | from Module import AbstractModule
class Module(AbstractModule):
def __init__(self):
AbstractModule.__init__(self)
def run(
self, network, antecedents, out_attributes, user_options, num_cores,
out_path):
import os
from genomicode import parallel
from genomicode import filelib
from genomicode import alignlib
from Betsy import module_utils as mlib
fastq_node, sai_node, orient_node, sample_node, reference_node = \
antecedents
fastq_files = mlib.find_merged_fastq_files(
sample_node.identifier, fastq_node.identifier)
sai_path = sai_node.identifier
assert filelib.dir_exists(sai_path)
orient = mlib.read_orientation(orient_node.identifier)
ref = alignlib.create_reference_genome(reference_node.identifier)
filelib.safe_mkdir(out_path)
metadata = {}
metadata["tool"] = "bwa %s" % alignlib.get_bwa_version()
# Technically, doesn't need the SampleGroupFile, since that's
# already reflected in the sai data. But better, because the
# sai data might not always be generated by BETSY.
# Find the merged fastq files.
# Find the sai files.
sai_filenames = filelib.list_files_in_path(
sai_path, endswith=".sai", case_insensitive=True)
assert sai_filenames, "No .sai files."
bwa = mlib.findbin("bwa")
# bwa samse -f <output.sam> <reference.fa> <input.sai> <input.fq>
# bwa sampe -f <output.sam> <reference.fa> <input_1.sai> <input_2.sai>
# <input_1.fq> <input_2.fq> >
# list of (pair1.fq, pair1.sai, pair2.fq, pair2.sai, output.sam)
# all full paths
jobs = []
for x in fastq_files:
sample, pair1_fq, pair2_fq = x
# The sai file should be in the format:
# <sai_path>/<sample>.sai Single end read
# <sai_path>/<sample>_1.sai Paired end read
# <sai_path>/<sample>_2.sai Paired end read
# Look for pair1_sai and pair2_sai.
pair1_sai = pair2_sai = None
for sai_filename in sai_filenames:
p, s, e = mlib.splitpath(sai_filename)
assert e == ".sai"
if s == sample:
assert not pair1_sai
pair1_sai = sai_filename
elif s == "%s_1" % (sample):
assert not pair1_sai
pair1_sai = sai_filename
elif s == "%s_2" % (sample):
assert not pair2_sai
pair2_sai = sai_filename
assert pair1_sai, "Missing .sai file: %s" % sample
if pair2_fq:
assert pair2_sai, "Missing .sai file 2: %s" % sample
if pair2_sai:
assert pair2_fq, "Missing .fq file 2: %s" % sample
sam_filename = os.path.join(out_path, "%s.sam" % sample)
log_filename = os.path.join(out_path, "%s.log" % sample)
x = sample, pair1_fq, pair1_sai, pair2_fq, pair2_sai, \
sam_filename, log_filename
jobs.append(x)
orientation = orient.orientation
#orientation = sample_node.data.attributes["orientation"]
assert orientation in ["single", "paired_fr", "paired_rf"]
# Make a list of bwa commands.
sq = mlib.sq
commands = []
for x in jobs:
sample, pair1_fq, pair1_sai, pair2_fq, pair2_sai, \
sam_filename, log_filename = x
if orientation == "single":
assert not pair2_fq
assert not pair2_sai
samse = "samse"
if orientation.startswith("paired"):
samse = "sampe"
x = [
sq(bwa),
samse,
"-f", sq(sam_filename),
sq(ref.fasta_file_full),
]
if orientation == "single":
x += [
sq(pair1_sai),
sq(pair1_fq),
]
else:
y = [
sq(pair1_sai),
sq(pair2_sai),
sq(pair1_fq),
sq(pair2_fq),
]
if orientation == "paired_rf":
y = [
sq(pair2_sai),
sq(pair1_sai),
sq(pair2_fq),
sq(pair1_fq),
]
x += y
x += [
">&", sq(log_filename),
]
x = " ".join(x)
commands.append(x)
metadata["commands"] = commands
metadata["num_cores"] = num_cores
parallel.pshell(commands, max_procs=num_cores)
# Make sure the analysis completed successfully.
x = [x[-2] for x in jobs]
filelib.assert_exists_nz_many(x)
return metadata
def name_outfile(self, antecedents, user_options):
return "bwa.sam"
|
the-stack_106_28550 | """
Ported using Python-Future from the Python 3.3 standard library.
Parse (absolute and relative) URLs.
urlparse module is based upon the following RFC specifications.
RFC 3986 (STD66): "Uniform Resource Identifiers" by T. Berners-Lee, R. Fielding
and L. Masinter, January 2005.
RFC 2732 : "Format for Literal IPv6 Addresses in URL's by R.Hinden, B.Carpenter
and L.Masinter, December 1999.
RFC 2396: "Uniform Resource Identifiers (URI)": Generic Syntax by T.
Berners-Lee, R. Fielding, and L. Masinter, August 1998.
RFC 2368: "The mailto URL scheme", by P.Hoffman , L Masinter, J. Zawinski, July 1998.
RFC 1808: "Relative Uniform Resource Locators", by R. Fielding, UC Irvine, June
1995.
RFC 1738: "Uniform Resource Locators (URL)" by T. Berners-Lee, L. Masinter, M.
McCahill, December 1994
RFC 3986 is considered the current standard and any future changes to
urlparse module should conform with it. The urlparse module is
currently not entirely compliant with this RFC due to defacto
scenarios for parsing, and for backward compatibility purposes, some
parsing quirks from older RFCs are retained. The testcases in
test_urlparse.py provides a good indicator of parsing behavior.
"""
from __future__ import absolute_import, division, unicode_literals
from future.builtins import bytes, chr, dict, int, range, str
from future.utils import raise_with_traceback
import re
import sys
import collections
__all__ = ["urlparse", "urlunparse", "urljoin", "urldefrag",
"urlsplit", "urlunsplit", "urlencode", "parse_qs",
"parse_qsl", "quote", "quote_plus", "quote_from_bytes",
"unquote", "unquote_plus", "unquote_to_bytes"]
# A classification of schemes ('' means apply by default)
uses_relative = ['ftp', 'http', 'gopher', 'nntp', 'imap',
'wais', 'file', 'https', 'shttp', 'mms',
'prospero', 'rtsp', 'rtspu', '', 'sftp',
'svn', 'svn+ssh']
uses_netloc = ['ftp', 'http', 'gopher', 'nntp', 'telnet',
'imap', 'wais', 'file', 'mms', 'https', 'shttp',
'snews', 'prospero', 'rtsp', 'rtspu', 'rsync', '',
'svn', 'svn+ssh', 'sftp', 'nfs', 'git', 'git+ssh']
uses_params = ['ftp', 'hdl', 'prospero', 'http', 'imap',
'https', 'shttp', 'rtsp', 'rtspu', 'sip', 'sips',
'mms', '', 'sftp', 'tel']
# These are not actually used anymore, but should stay for backwards
# compatibility. (They are undocumented, but have a public-looking name.)
non_hierarchical = ['gopher', 'hdl', 'mailto', 'news',
'telnet', 'wais', 'imap', 'snews', 'sip', 'sips']
uses_query = ['http', 'wais', 'imap', 'https', 'shttp', 'mms',
'gopher', 'rtsp', 'rtspu', 'sip', 'sips', '']
uses_fragment = ['ftp', 'hdl', 'http', 'gopher', 'news',
'nntp', 'wais', 'https', 'shttp', 'snews',
'file', 'prospero', '']
# Characters valid in scheme names
scheme_chars = ('abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'0123456789'
'+-.')
# XXX: Consider replacing with functools.lru_cache
MAX_CACHE_SIZE = 20
_parse_cache = {}
def clear_cache():
"""Clear the parse cache and the quoters cache."""
_parse_cache.clear()
_safe_quoters.clear()
# Helpers for bytes handling
# For 3.2, we deliberately require applications that
# handle improperly quoted URLs to do their own
# decoding and encoding. If valid use cases are
# presented, we may relax this by using latin-1
# decoding internally for 3.3
_implicit_encoding = 'ascii'
_implicit_errors = 'strict'
def _noop(obj):
return obj
def _encode_result(obj, encoding=_implicit_encoding,
errors=_implicit_errors):
return obj.encode(encoding, errors)
def _decode_args(args, encoding=_implicit_encoding,
errors=_implicit_errors):
return tuple(x.decode(encoding, errors) if x else '' for x in args)
def _coerce_args(*args):
# Invokes decode if necessary to create str args
# and returns the coerced inputs along with
# an appropriate result coercion function
# - noop for str inputs
# - encoding function otherwise
str_input = isinstance(args[0], str)
for arg in args[1:]:
# We special-case the empty string to support the
# "scheme=''" default argument to some functions
if arg and isinstance(arg, str) != str_input:
raise TypeError("Cannot mix str and non-str arguments")
if str_input:
return args + (_noop,)
return _decode_args(args) + (_encode_result,)
# Result objects are more helpful than simple tuples
class _ResultMixinStr(object):
"""Standard approach to encoding parsed results from str to bytes"""
__slots__ = ()
def encode(self, encoding='ascii', errors='strict'):
return self._encoded_counterpart(*(x.encode(encoding, errors) for x in self))
class _ResultMixinBytes(object):
"""Standard approach to decoding parsed results from bytes to str"""
__slots__ = ()
def decode(self, encoding='ascii', errors='strict'):
return self._decoded_counterpart(*(x.decode(encoding, errors) for x in self))
class _NetlocResultMixinBase(object):
"""Shared methods for the parsed result objects containing a netloc element"""
__slots__ = ()
@property
def username(self):
return self._userinfo[0]
@property
def password(self):
return self._userinfo[1]
@property
def hostname(self):
hostname = self._hostinfo[0]
if not hostname:
hostname = None
elif hostname is not None:
hostname = hostname.lower()
return hostname
@property
def port(self):
port = self._hostinfo[1]
if port is not None:
port = int(port, 10)
# Return None on an illegal port
if not ( 0 <= port <= 65535):
return None
return port
class _NetlocResultMixinStr(_NetlocResultMixinBase, _ResultMixinStr):
__slots__ = ()
@property
def _userinfo(self):
netloc = self.netloc
userinfo, have_info, hostinfo = netloc.rpartition('@')
if have_info:
username, have_password, password = userinfo.partition(':')
if not have_password:
password = None
else:
username = password = None
return username, password
@property
def _hostinfo(self):
netloc = self.netloc
_, _, hostinfo = netloc.rpartition('@')
_, have_open_br, bracketed = hostinfo.partition('[')
if have_open_br:
hostname, _, port = bracketed.partition(']')
_, have_port, port = port.partition(':')
else:
hostname, have_port, port = hostinfo.partition(':')
if not have_port:
port = None
return hostname, port
class _NetlocResultMixinBytes(_NetlocResultMixinBase, _ResultMixinBytes):
__slots__ = ()
@property
def _userinfo(self):
netloc = self.netloc
userinfo, have_info, hostinfo = netloc.rpartition(b'@')
if have_info:
username, have_password, password = userinfo.partition(b':')
if not have_password:
password = None
else:
username = password = None
return username, password
@property
def _hostinfo(self):
netloc = self.netloc
_, _, hostinfo = netloc.rpartition(b'@')
_, have_open_br, bracketed = hostinfo.partition(b'[')
if have_open_br:
hostname, _, port = bracketed.partition(b']')
_, have_port, port = port.partition(b':')
else:
hostname, have_port, port = hostinfo.partition(b':')
if not have_port:
port = None
return hostname, port
from collections import namedtuple
_DefragResultBase = namedtuple('DefragResult', 'url fragment')
_SplitResultBase = namedtuple('SplitResult', 'scheme netloc path query fragment')
_ParseResultBase = namedtuple('ParseResult', 'scheme netloc path params query fragment')
# For backwards compatibility, alias _NetlocResultMixinStr
# ResultBase is no longer part of the documented API, but it is
# retained since deprecating it isn't worth the hassle
ResultBase = _NetlocResultMixinStr
# Structured result objects for string data
class DefragResult(_DefragResultBase, _ResultMixinStr):
__slots__ = ()
def geturl(self):
if self.fragment:
return self.url + '#' + self.fragment
else:
return self.url
class SplitResult(_SplitResultBase, _NetlocResultMixinStr):
__slots__ = ()
def geturl(self):
return urlunsplit(self)
class ParseResult(_ParseResultBase, _NetlocResultMixinStr):
__slots__ = ()
def geturl(self):
return urlunparse(self)
# Structured result objects for bytes data
class DefragResultBytes(_DefragResultBase, _ResultMixinBytes):
__slots__ = ()
def geturl(self):
if self.fragment:
return self.url + b'#' + self.fragment
else:
return self.url
class SplitResultBytes(_SplitResultBase, _NetlocResultMixinBytes):
__slots__ = ()
def geturl(self):
return urlunsplit(self)
class ParseResultBytes(_ParseResultBase, _NetlocResultMixinBytes):
__slots__ = ()
def geturl(self):
return urlunparse(self)
# Set up the encode/decode result pairs
def _fix_result_transcoding():
_result_pairs = (
(DefragResult, DefragResultBytes),
(SplitResult, SplitResultBytes),
(ParseResult, ParseResultBytes),
)
for _decoded, _encoded in _result_pairs:
_decoded._encoded_counterpart = _encoded
_encoded._decoded_counterpart = _decoded
_fix_result_transcoding()
del _fix_result_transcoding
def urlparse(url, scheme='', allow_fragments=True):
"""Parse a URL into 6 components:
<scheme>://<netloc>/<path>;<params>?<query>#<fragment>
Return a 6-tuple: (scheme, netloc, path, params, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
url, scheme, _coerce_result = _coerce_args(url, scheme)
splitresult = urlsplit(url, scheme, allow_fragments)
scheme, netloc, url, query, fragment = splitresult
if scheme in uses_params and ';' in url:
url, params = _splitparams(url)
else:
params = ''
result = ParseResult(scheme, netloc, url, params, query, fragment)
return _coerce_result(result)
def _splitparams(url):
if '/' in url:
i = url.find(';', url.rfind('/'))
if i < 0:
return url, ''
else:
i = url.find(';')
return url[:i], url[i+1:]
def _splitnetloc(url, start=0):
delim = len(url) # position of end of domain part of url, default is end
for c in '/?#': # look for delimiters; the order is NOT important
wdelim = url.find(c, start) # find first of this delim
if wdelim >= 0: # if found
delim = min(delim, wdelim) # use earliest delim position
return url[start:delim], url[delim:] # return (domain, rest)
def urlsplit(url, scheme='', allow_fragments=True):
"""Parse a URL into 5 components:
<scheme>://<netloc>/<path>?<query>#<fragment>
Return a 5-tuple: (scheme, netloc, path, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
url, scheme, _coerce_result = _coerce_args(url, scheme)
allow_fragments = bool(allow_fragments)
key = url, scheme, allow_fragments, type(url), type(scheme)
cached = _parse_cache.get(key, None)
if cached:
return _coerce_result(cached)
if len(_parse_cache) >= MAX_CACHE_SIZE: # avoid runaway growth
clear_cache()
netloc = query = fragment = ''
i = url.find(':')
if i > 0:
if url[:i] == 'http': # optimize the common case
scheme = url[:i].lower()
url = url[i+1:]
if url[:2] == '//':
netloc, url = _splitnetloc(url, 2)
if (('[' in netloc and ']' not in netloc) or
(']' in netloc and '[' not in netloc)):
raise ValueError("Invalid IPv6 URL")
if allow_fragments and '#' in url:
url, fragment = url.split('#', 1)
if '?' in url:
url, query = url.split('?', 1)
v = SplitResult(scheme, netloc, url, query, fragment)
_parse_cache[key] = v
return _coerce_result(v)
for c in url[:i]:
if c not in scheme_chars:
break
else:
# make sure "url" is not actually a port number (in which case
# "scheme" is really part of the path)
rest = url[i+1:]
if not rest or any(c not in '0123456789' for c in rest):
# not a port number
scheme, url = url[:i].lower(), rest
if url[:2] == '//':
netloc, url = _splitnetloc(url, 2)
if (('[' in netloc and ']' not in netloc) or
(']' in netloc and '[' not in netloc)):
raise ValueError("Invalid IPv6 URL")
if allow_fragments and '#' in url:
url, fragment = url.split('#', 1)
if '?' in url:
url, query = url.split('?', 1)
v = SplitResult(scheme, netloc, url, query, fragment)
_parse_cache[key] = v
return _coerce_result(v)
def urlunparse(components):
"""Put a parsed URL back together again. This may result in a
slightly different, but equivalent URL, if the URL that was parsed
originally had redundant delimiters, e.g. a ? with an empty query
(the draft states that these are equivalent)."""
scheme, netloc, url, params, query, fragment, _coerce_result = (
_coerce_args(*components))
if params:
url = "%s;%s" % (url, params)
return _coerce_result(urlunsplit((scheme, netloc, url, query, fragment)))
def urlunsplit(components):
"""Combine the elements of a tuple as returned by urlsplit() into a
complete URL as a string. The data argument can be any five-item iterable.
This may result in a slightly different, but equivalent URL, if the URL that
was parsed originally had unnecessary delimiters (for example, a ? with an
empty query; the RFC states that these are equivalent)."""
scheme, netloc, url, query, fragment, _coerce_result = (
_coerce_args(*components))
if netloc or (scheme and scheme in uses_netloc and url[:2] != '//'):
if url and url[:1] != '/': url = '/' + url
url = '//' + (netloc or '') + url
if scheme:
url = scheme + ':' + url
if query:
url = url + '?' + query
if fragment:
url = url + '#' + fragment
return _coerce_result(url)
def urljoin(base, url, allow_fragments=True):
"""Join a base URL and a possibly relative URL to form an absolute
interpretation of the latter."""
if not base:
return url
if not url:
return base
base, url, _coerce_result = _coerce_args(base, url)
bscheme, bnetloc, bpath, bparams, bquery, bfragment = \
urlparse(base, '', allow_fragments)
scheme, netloc, path, params, query, fragment = \
urlparse(url, bscheme, allow_fragments)
if scheme != bscheme or scheme not in uses_relative:
return _coerce_result(url)
if scheme in uses_netloc:
if netloc:
return _coerce_result(urlunparse((scheme, netloc, path,
params, query, fragment)))
netloc = bnetloc
if path[:1] == '/':
return _coerce_result(urlunparse((scheme, netloc, path,
params, query, fragment)))
if not path and not params:
path = bpath
params = bparams
if not query:
query = bquery
return _coerce_result(urlunparse((scheme, netloc, path,
params, query, fragment)))
segments = bpath.split('/')[:-1] + path.split('/')
# XXX The stuff below is bogus in various ways...
if segments[-1] == '.':
segments[-1] = ''
while '.' in segments:
segments.remove('.')
while 1:
i = 1
n = len(segments) - 1
while i < n:
if (segments[i] == '..'
and segments[i-1] not in ('', '..')):
del segments[i-1:i+1]
break
i = i+1
else:
break
if segments == ['', '..']:
segments[-1] = ''
elif len(segments) >= 2 and segments[-1] == '..':
segments[-2:] = ['']
return _coerce_result(urlunparse((scheme, netloc, '/'.join(segments),
params, query, fragment)))
def urldefrag(url):
"""Removes any existing fragment from URL.
Returns a tuple of the defragmented URL and the fragment. If
the URL contained no fragments, the second element is the
empty string.
"""
url, _coerce_result = _coerce_args(url)
if '#' in url:
s, n, p, a, q, frag = urlparse(url)
defrag = urlunparse((s, n, p, a, q, ''))
else:
frag = ''
defrag = url
return _coerce_result(DefragResult(defrag, frag))
_hexdig = '0123456789ABCDEFabcdef'
_hextobyte = dict(((a + b).encode(), bytes([int(a + b, 16)]))
for a in _hexdig for b in _hexdig)
def unquote_to_bytes(string):
"""unquote_to_bytes('abc%20def') -> b'abc def'."""
# Note: strings are encoded as UTF-8. This is only an issue if it contains
# unescaped non-ASCII characters, which URIs should not.
if not string:
# Is it a string-like object?
string.split
return bytes(b'')
if isinstance(string, str):
string = string.encode('utf-8')
### For Python-Future:
# It is already a byte-string object, but force it to be newbytes here on
# Py2:
string = bytes(string)
###
bits = string.split(b'%')
if len(bits) == 1:
return string
res = [bits[0]]
append = res.append
for item in bits[1:]:
try:
append(_hextobyte[item[:2]])
append(item[2:])
except KeyError:
append(b'%')
append(item)
return bytes(b'').join(res)
_asciire = re.compile('([\x00-\x7f]+)')
def unquote(string, encoding='utf-8', errors='replace'):
"""Replace %xx escapes by their single-character equivalent. The optional
encoding and errors parameters specify how to decode percent-encoded
sequences into Unicode characters, as accepted by the bytes.decode()
method.
By default, percent-encoded sequences are decoded with UTF-8, and invalid
sequences are replaced by a placeholder character.
unquote('abc%20def') -> 'abc def'.
"""
if '%' not in string:
string.split
return string
if encoding is None:
encoding = 'utf-8'
if errors is None:
errors = 'replace'
bits = _asciire.split(string)
res = [bits[0]]
append = res.append
for i in range(1, len(bits), 2):
append(unquote_to_bytes(bits[i]).decode(encoding, errors))
append(bits[i + 1])
return ''.join(res)
def parse_qs(qs, keep_blank_values=False, strict_parsing=False,
encoding='utf-8', errors='replace'):
"""Parse a query given as a string argument.
Arguments:
qs: percent-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
percent-encoded queries should be treated as blank strings.
A true value indicates that blanks should be retained as
blank strings. The default false value indicates that
blank values are to be ignored and treated as if they were
not included.
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
encoding and errors: specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the bytes.decode() method.
"""
parsed_result = {}
pairs = parse_qsl(qs, keep_blank_values, strict_parsing,
encoding=encoding, errors=errors)
for name, value in pairs:
if name in parsed_result:
parsed_result[name].append(value)
else:
parsed_result[name] = [value]
return parsed_result
def parse_qsl(qs, keep_blank_values=False, strict_parsing=False,
encoding='utf-8', errors='replace'):
"""Parse a query given as a string argument.
Arguments:
qs: percent-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
percent-encoded queries should be treated as blank strings. A
true value indicates that blanks should be retained as blank
strings. The default false value indicates that blank values
are to be ignored and treated as if they were not included.
strict_parsing: flag indicating what to do with parsing errors. If
false (the default), errors are silently ignored. If true,
errors raise a ValueError exception.
encoding and errors: specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the bytes.decode() method.
Returns a list, as G-d intended.
"""
qs, _coerce_result = _coerce_args(qs)
pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
r = []
for name_value in pairs:
if not name_value and not strict_parsing:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
if strict_parsing:
raise ValueError("bad query field: %r" % (name_value,))
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if len(nv[1]) or keep_blank_values:
name = nv[0].replace('+', ' ')
name = unquote(name, encoding=encoding, errors=errors)
name = _coerce_result(name)
value = nv[1].replace('+', ' ')
value = unquote(value, encoding=encoding, errors=errors)
value = _coerce_result(value)
r.append((name, value))
return r
def unquote_plus(string, encoding='utf-8', errors='replace'):
"""Like unquote(), but also replace plus signs by spaces, as required for
unquoting HTML form values.
unquote_plus('%7e/abc+def') -> '~/abc def'
"""
string = string.replace('+', ' ')
return unquote(string, encoding, errors)
_ALWAYS_SAFE = frozenset(bytes(b'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
b'abcdefghijklmnopqrstuvwxyz'
b'0123456789'
b'_.-'))
_ALWAYS_SAFE_BYTES = bytes(_ALWAYS_SAFE)
_safe_quoters = {}
class Quoter(collections.defaultdict):
"""A mapping from bytes (in range(0,256)) to strings.
String values are percent-encoded byte values, unless the key < 128, and
in the "safe" set (either the specified safe set, or default set).
"""
# Keeps a cache internally, using defaultdict, for efficiency (lookups
# of cached keys don't call Python code at all).
def __init__(self, safe):
"""safe: bytes object."""
self.safe = _ALWAYS_SAFE.union(bytes(safe))
def __repr__(self):
# Without this, will just display as a defaultdict
return "<Quoter %r>" % dict(self)
def __missing__(self, b):
# Handle a cache miss. Store quoted string in cache and return.
res = chr(b) if b in self.safe else '%{0:02X}'.format(b)
self[b] = res
return res
def quote(string, safe='/', encoding=None, errors=None):
"""quote('abc def') -> 'abc%20def'
Each part of a URL, e.g. the path info, the query, etc., has a
different set of reserved characters that must be quoted.
RFC 2396 Uniform Resource Identifiers (URI): Generic Syntax lists
the following reserved characters.
reserved = ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" |
"$" | ","
Each of these characters is reserved in some component of a URL,
but not necessarily in all of them.
By default, the quote function is intended for quoting the path
section of a URL. Thus, it will not encode '/'. This character
is reserved, but in typical usage the quote function is being
called on a path where the existing slash characters are used as
reserved characters.
string and safe may be either str or bytes objects. encoding must
not be specified if string is a str.
The optional encoding and errors parameters specify how to deal with
non-ASCII characters, as accepted by the str.encode method.
By default, encoding='utf-8' (characters are encoded with UTF-8), and
errors='strict' (unsupported characters raise a UnicodeEncodeError).
"""
if isinstance(string, str):
if not string:
return string
if encoding is None:
encoding = 'utf-8'
if errors is None:
errors = 'strict'
string = string.encode(encoding, errors)
else:
if encoding is not None:
raise TypeError("quote() doesn't support 'encoding' for bytes")
if errors is not None:
raise TypeError("quote() doesn't support 'errors' for bytes")
return quote_from_bytes(string, safe)
def quote_plus(string, safe='', encoding=None, errors=None):
"""Like quote(), but also replace ' ' with '+', as required for quoting
HTML form values. Plus signs in the original string are escaped unless
they are included in safe. It also does not have safe default to '/'.
"""
# Check if ' ' in string, where string may either be a str or bytes. If
# there are no spaces, the regular quote will produce the right answer.
if ((isinstance(string, str) and ' ' not in string) or
(isinstance(string, bytes) and b' ' not in string)):
return quote(string, safe, encoding, errors)
if isinstance(safe, str):
space = str(' ')
else:
space = bytes(b' ')
string = quote(string, safe + space, encoding, errors)
return string.replace(' ', '+')
def quote_from_bytes(bs, safe='/'):
"""Like quote(), but accepts a bytes object rather than a str, and does
not perform string-to-bytes encoding. It always returns an ASCII string.
quote_from_bytes(b'abc def\x3f') -> 'abc%20def%3f'
"""
if not isinstance(bs, (bytes, bytearray)):
raise TypeError("quote_from_bytes() expected bytes")
if not bs:
return str('')
### For Python-Future:
bs = bytes(bs)
###
if isinstance(safe, str):
# Normalize 'safe' by converting to bytes and removing non-ASCII chars
safe = str(safe).encode('ascii', 'ignore')
else:
### For Python-Future:
safe = bytes(safe)
###
safe = bytes([c for c in safe if c < 128])
if not bs.rstrip(_ALWAYS_SAFE_BYTES + safe):
return bs.decode()
try:
quoter = _safe_quoters[safe]
except KeyError:
_safe_quoters[safe] = quoter = Quoter(safe).__getitem__
return str('').join([quoter(char) for char in bs])
def urlencode(query, doseq=False, safe='', encoding=None, errors=None):
"""Encode a sequence of two-element tuples or dictionary into a URL query string.
If any values in the query arg are sequences and doseq is true, each
sequence element is converted to a separate parameter.
If the query arg is a sequence of two-element tuples, the order of the
parameters in the output will match the order of parameters in the
input.
The query arg may be either a string or a bytes type. When query arg is a
string, the safe, encoding and error parameters are sent the quote_plus for
encoding.
"""
if hasattr(query, "items"):
query = query.items()
else:
# It's a bother at times that strings and string-like objects are
# sequences.
try:
# non-sequence items should not work with len()
# non-empty strings will fail this
if len(query) and not isinstance(query[0], tuple):
raise TypeError
# Zero-length sequences of all types will get here and succeed,
# but that's a minor nit. Since the original implementation
# allowed empty dicts that type of behavior probably should be
# preserved for consistency
except TypeError:
ty, va, tb = sys.exc_info()
raise_with_traceback(TypeError("not a valid non-string sequence "
"or mapping object"), tb)
l = []
if not doseq:
for k, v in query:
if isinstance(k, bytes):
k = quote_plus(k, safe)
else:
k = quote_plus(str(k), safe, encoding, errors)
if isinstance(v, bytes):
v = quote_plus(v, safe)
else:
v = quote_plus(str(v), safe, encoding, errors)
l.append(k + '=' + v)
else:
for k, v in query:
if isinstance(k, bytes):
k = quote_plus(k, safe)
else:
k = quote_plus(str(k), safe, encoding, errors)
if isinstance(v, bytes):
v = quote_plus(v, safe)
l.append(k + '=' + v)
elif isinstance(v, str):
v = quote_plus(v, safe, encoding, errors)
l.append(k + '=' + v)
else:
try:
# Is this a sufficient test for sequence-ness?
x = len(v)
except TypeError:
# not a sequence
v = quote_plus(str(v), safe, encoding, errors)
l.append(k + '=' + v)
else:
# loop over the sequence
for elt in v:
if isinstance(elt, bytes):
elt = quote_plus(elt, safe)
else:
elt = quote_plus(str(elt), safe, encoding, errors)
l.append(k + '=' + elt)
return str('&').join(l)
# Utilities to parse URLs (most of these return None for missing parts):
# unwrap('<URL:type://host/path>') --> 'type://host/path'
# splittype('type:opaquestring') --> 'type', 'opaquestring'
# splithost('//host[:port]/path') --> 'host[:port]', '/path'
# splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'
# splitpasswd('user:passwd') -> 'user', 'passwd'
# splitport('host:port') --> 'host', 'port'
# splitquery('/path?query') --> '/path', 'query'
# splittag('/path#tag') --> '/path', 'tag'
# splitattr('/path;attr1=value1;attr2=value2;...') ->
# '/path', ['attr1=value1', 'attr2=value2', ...]
# splitvalue('attr=value') --> 'attr', 'value'
# urllib.parse.unquote('abc%20def') -> 'abc def'
# quote('abc def') -> 'abc%20def')
def to_bytes(url):
"""to_bytes(u"URL") --> 'URL'."""
# Most URL schemes require ASCII. If that changes, the conversion
# can be relaxed.
# XXX get rid of to_bytes()
if isinstance(url, str):
try:
url = url.encode("ASCII").decode()
except UnicodeError:
raise UnicodeError("URL " + repr(url) +
" contains non-ASCII characters")
return url
def unwrap(url):
"""unwrap('<URL:type://host/path>') --> 'type://host/path'."""
url = str(url).strip()
if url[:1] == '<' and url[-1:] == '>':
url = url[1:-1].strip()
if url[:4] == 'URL:': url = url[4:].strip()
return url
_typeprog = None
def splittype(url):
"""splittype('type:opaquestring') --> 'type', 'opaquestring'."""
global _typeprog
if _typeprog is None:
import re
_typeprog = re.compile('^([^/:]+):')
match = _typeprog.match(url)
if match:
scheme = match.group(1)
return scheme.lower(), url[len(scheme) + 1:]
return None, url
_hostprog = None
def splithost(url):
"""splithost('//host[:port]/path') --> 'host[:port]', '/path'."""
global _hostprog
if _hostprog is None:
import re
_hostprog = re.compile('^//([^/?]*)(.*)$')
match = _hostprog.match(url)
if match:
host_port = match.group(1)
path = match.group(2)
if path and not path.startswith('/'):
path = '/' + path
return host_port, path
return None, url
_userprog = None
def splituser(host):
"""splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'."""
global _userprog
if _userprog is None:
import re
_userprog = re.compile('^(.*)@(.*)$')
match = _userprog.match(host)
if match: return match.group(1, 2)
return None, host
_passwdprog = None
def splitpasswd(user):
"""splitpasswd('user:passwd') -> 'user', 'passwd'."""
global _passwdprog
if _passwdprog is None:
import re
_passwdprog = re.compile('^([^:]*):(.*)$',re.S)
match = _passwdprog.match(user)
if match: return match.group(1, 2)
return user, None
# splittag('/path#tag') --> '/path', 'tag'
_portprog = None
def splitport(host):
"""splitport('host:port') --> 'host', 'port'."""
global _portprog
if _portprog is None:
import re
_portprog = re.compile('^(.*):([0-9]+)$')
match = _portprog.match(host)
if match: return match.group(1, 2)
return host, None
_nportprog = None
def splitnport(host, defport=-1):
"""Split host and port, returning numeric port.
Return given default port if no ':' found; defaults to -1.
Return numerical port if a valid number are found after ':'.
Return None if ':' but not a valid number."""
global _nportprog
if _nportprog is None:
import re
_nportprog = re.compile('^(.*):(.*)$')
match = _nportprog.match(host)
if match:
host, port = match.group(1, 2)
try:
if not port: raise ValueError("no digits")
nport = int(port)
except ValueError:
nport = None
return host, nport
return host, defport
_queryprog = None
def splitquery(url):
"""splitquery('/path?query') --> '/path', 'query'."""
global _queryprog
if _queryprog is None:
import re
_queryprog = re.compile('^(.*)\?([^?]*)$')
match = _queryprog.match(url)
if match: return match.group(1, 2)
return url, None
_tagprog = None
def splittag(url):
"""splittag('/path#tag') --> '/path', 'tag'."""
global _tagprog
if _tagprog is None:
import re
_tagprog = re.compile('^(.*)#([^#]*)$')
match = _tagprog.match(url)
if match: return match.group(1, 2)
return url, None
def splitattr(url):
"""splitattr('/path;attr1=value1;attr2=value2;...') ->
'/path', ['attr1=value1', 'attr2=value2', ...]."""
words = url.split(';')
return words[0], words[1:]
_valueprog = None
def splitvalue(attr):
"""splitvalue('attr=value') --> 'attr', 'value'."""
global _valueprog
if _valueprog is None:
import re
_valueprog = re.compile('^([^=]*)=(.*)$')
match = _valueprog.match(attr)
if match: return match.group(1, 2)
return attr, None
|
the-stack_106_28551 | import pyaztro
from scape.events import register
ASTRO = ""
@register(outgoing=True, disable_errors=True, pattern=r"^\.hc (.*)")
async def astro(e):
await e.edit("Fetching data...")
if not e.pattern_match.group(1):
x = ASTRO
if not x:
await e.edit("Not Found.")
return
else:
x = e.pattern_match.group(1)
horoscope = pyaztro.Aztro(sign=x)
mood = horoscope.mood
lt = horoscope.lucky_time
desc = horoscope.description
col = horoscope.color
com = horoscope.compatibility
ln = horoscope.lucky_number
result = (
f"**Horoscope for `{x}`**:\n"
f"**Mood :** `{mood}`\n"
f"**Lucky Time :** `{lt}`\n"
f"**Lucky Color :** `{col}`\n"
f"**Lucky Number :** `{ln}`\n"
f"**Compatibility :** `{com}`\n"
f"**Description :** `{desc}`\n"
)
await e.edit(result) |
the-stack_106_28552 |
VALOR_MAXIMO = 100
for numero in range(1, VALOR_MAXIMO + 1):
saida = ""
if numero % 2 == 0:
saida += "Fizz"
if numero % 3 == 0:
saida += "Buzz"
if numero % 5 == 0:
saida += "Zyzz"
if saida == "":
saida = numero
print(saida)
|
the-stack_106_28555 | #!/usr/bin/env python
# coding: utf-8
#### Optimized by Eduardo Coronado and Andrew Carr, Duke University
import numpy as np
def get_perplexity(x_ji, doc_arrays, topic_idx, n_kv, m_k, beta, alpha, gamma, V):
'''Computes the models perplexity given inferences at epoch i, in other words provides a metric
related to the likelihood of each work pertaining to a topic'''
phi = word_topic_dist(n_kv, topic_idx, beta, V)
theta = doc_topic_dist(doc_arrays, topic_idx, m_k, alpha, gamma)
log_lik = 0
N = 0
for x_i, p_jk in zip(x_ji, theta):
for word in x_i:
p_w = sum(p_k *p_kv for p_k, p_kv in zip(p_jk, phi[word,:]))
log_lik -= np.log(p_w)
N += len(x_i)
perplex = np.exp(log_lik/N)
return perplex
def doc_topic_dist(doc_arrays, topic_idx, m_k, alpha, gamma):
'''Computes effects of each doc and topic into a probability distribution'''
topic_tbl_effect = np.array(m_k, dtype=float)
topic_tbl_effect[0] = gamma
topic_tbl_effect *= alpha / topic_tbl_effect[topic_idx].sum()
theta = []
for j, doc_j in enumerate(doc_arrays):
n_jt = doc_j['n_jt']
p_jk = topic_tbl_effect.copy()
for tbl in doc_j['t_j']:
if tbl == 0: continue
k = doc_j['k_jt'][tbl]
p_jk[k] += n_jt[tbl]
p_jk = p_jk[topic_idx]
p_jk /= p_jk.sum()
theta.append(p_jk)
return np.array(theta)
def word_topic_dist(n_kv, topic_idx, beta, V):
'''Compute word probability distribution per topic'''
phi = n_kv/n_kv.sum(axis=0)[None, :]
phi = phi[:, topic_idx]
phi[:,0] = beta / V
return phi |
the-stack_106_28556 | #!/usr/bin/env python
from .util import create_url
class PartialDeleteAPI:
"""Create a job to partially delete the contents of the table with the given
time range.
This class is inherited by :class:`tdclient.api.API`.
"""
def partial_delete(self, db, table, to, _from, params=None):
"""Create a job to partially delete the contents of the table with the given
time range.
Args:
db (str): Target database name.
table (str): Target table name.
to (int): Time in Unix Epoch format indicating the End date and time of the
data to be deleted. Should be set only by the hour. Minutes and seconds
values will not be accepted.
_from (int): Time in Unix Epoch format indicating the Start date and time of
the data to be deleted. Should be set only by the hour. Minutes and
seconds values will not be accepted.
params (dict, optional): Extra parameters.
- pool_name (str, optional):
Indicates the resource pool to execute this
job. If not provided, the account's default resource pool would be
used.
- domain_key (str, optional):
Domain key that will be assigned to the
partial delete job to be created
Returns:
str: Job ID.
"""
params = {} if params is None else params
params["to"] = str(to)
params["from"] = str(_from)
with self.post(
create_url("/v3/table/partialdelete/{db}/{table}", db=db, table=table),
params,
) as res:
code, body = res.status, res.read()
if code != 200:
self.raise_error("Partial delete failed", res, body)
js = self.checked_json(body, ["job_id"])
return str(js["job_id"])
|
the-stack_106_28557 | import datetime
import importlib
import pathlib
author = "dynamicslab"
project = "pysindy" # package name
# no need to edit below this line
copyright = f"{datetime.datetime.now().year}, {author}"
module = importlib.import_module(project)
version = release = getattr(module, "__version__")
master_doc = "index"
extensions = [
"sphinxcontrib.apidoc",
"sphinx.ext.autodoc",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"sphinx.ext.autosummary",
"sphinx.ext.napoleon",
"sphinx.ext.mathjax",
"sphinx_nbexamples",
]
apidoc_module_dir = f"../{project}"
apidoc_excluded_paths = ["tests"]
apidoc_toc_file = False
autodoc_default_options = {"members": True}
autodoc_member_order = "bysource"
autoclass_content = "init"
language = None
here = pathlib.Path(__file__).parent
if (here / "static/custom.css").exists():
html_static_path = ["static"]
def setup(app):
app.add_stylesheet("custom.css")
exclude_patterns = ["build", "_build"]
# pygments_style = "sphinx"
add_module_names = True
add_function_parentheses = False
todo_include_todos = True
html_theme = "sphinx_rtd_theme"
html_show_sourcelink = False
html_show_sphinx = False
html_show_copyright = True
default_role = "any"
html_sourcelink_suffix = ""
example_gallery_config = dict(
dont_preprocess=True,
examples_dirs=["../examples"],
gallery_dirs=["examples"],
pattern=".+.ipynb",
)
# -- Extensions to the Napoleon GoogleDocstring class ---------------------
# michaelgoerz.net/notes/extending-sphinx-napoleon-docstring-sections.html
from sphinx.ext.napoleon.docstring import GoogleDocstring # noqa: E402
def parse_keys_section(self, section):
return self._format_fields("Keys", self._consume_fields())
GoogleDocstring._parse_keys_section = parse_keys_section
def parse_attributes_section(self, section):
return self._format_fields("Attributes", self._consume_fields())
GoogleDocstring._parse_attributes_section = parse_attributes_section
def parse_class_attributes_section(self, section):
return self._format_fields("Class Attributes", self._consume_fields())
GoogleDocstring._parse_class_attributes_section = parse_class_attributes_section
def patched_parse(self):
"""
we now patch the parse method to guarantee that the the above methods are
assigned to the _section dict
"""
self._sections["keys"] = self._parse_keys_section
self._sections["class attributes"] = self._parse_class_attributes_section
self._unpatched_parse()
GoogleDocstring._unpatched_parse = GoogleDocstring._parse
GoogleDocstring._parse = patched_parse
|
the-stack_106_28558 | """empty message
Revision ID: 8f8001e98e65
Revises: 2721189b0c8f
Create Date: 2020-02-07 12:42:57.248894
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '8f8001e98e65'
down_revision = '2721189b0c8f'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('proposal', sa.Column('changes_requested_discussion', sa.Boolean(), nullable=True))
op.add_column('proposal', sa.Column('changes_requested_discussion_reason', sa.String(length=255), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('proposal', 'changes_requested_discussion_reason')
op.drop_column('proposal', 'changes_requested_discussion')
# ### end Alembic commands ###
|
the-stack_106_28559 | from keras.layers import *
from keras.models import *
from keras.optimizers import *
from config import KERAS_FILTERS as FILTERS, KERAS_KERNEL_SIZE as KERNEL_SIZE, \
KERAS_LEARNING_RATE as LEARNING_RATE, KERAS_DROPOUT as DROPOUT
def build_model(board_shape: tuple, action_size: int):
board_x, board_y = board_shape
# s: BATCH_SIZE x board_x x board_y
input_boards = Input(shape=board_shape)
# BATCH_SIZE x board_x x board_y x 1
x_image = Reshape((board_x, board_y, 1))(input_boards)
# BATCH_SIZE x board_x x board_y x FILTERS
h_conv1 = conv_layer(x_image, padding='same')
# BATCH_SIZE x board_x x board_y x FILTERS
h_conv2 = conv_layer(h_conv1, padding='same')
# BATCH_SIZE x (board_x - 2) x (board_y - 2) x FILTERS
h_conv3 = conv_layer(h_conv2, padding='valid')
# BATCH_SIZE x (board_x - 4) x (board_y - 4) x FILTERS
h_conv4 = conv_layer(h_conv3, padding='valid')
h_conv4_flat = Flatten()(h_conv4)
# BATCH_SIZE x 1024
s_fc1 = dropout_layer(h_conv4_flat, units=1024)
# BATCH_SIZE x 512
s_fc2 = dropout_layer(s_fc1, units=512)
# BATCH_SIZE x ACTION_SIZE
pi = Dense(action_size, activation='softmax', name='pi')(s_fc2)
# BATCH_SIZE x 1
v = Dense(1, activation='tanh', name='v')(s_fc2)
model = Model(inputs=input_boards, outputs=[pi, v])
model.compile(loss={'pi': 'categorical_crossentropy', 'v': 'mean_squared_error'},
optimizer=Adam(LEARNING_RATE))
return model
def conv_layer(x, padding):
x = Conv2D(FILTERS, KERNEL_SIZE, padding=padding)(x)
x = BatchNormalization(axis=3)(x)
x = Activation('relu')(x)
return x
def dropout_layer(x, units):
x = Dense(units=units)(x)
x = BatchNormalization(axis=1)(x)
x = Activation('relu')(x)
x = Dropout(DROPOUT)(x)
return x
|
the-stack_106_28562 | import os, sys
sys.path.append(os.getcwd())
import pickle as pkl
from urllib import request
import pandas as pd
import numpy as np
from zipfile import ZipFile
dataset_list = []
def load_datasets(datasets=None):
"""
Args:
datasets (list of str): Lists of datasets to load by name. If None, all available datasets are loaded iteratively.
"""
if datasets is None:
datasets = [d.name for d in dataset_list]
for dataset in dataset_list:
if dataset.name in datasets:
yield dataset.load()
def classproperty(method):
class ClassPropertyDescriptor:
def __init__(self, method):
self.method = method
def __get__(self, obj, objtype=None):
return self.method(objtype)
return ClassPropertyDescriptor(method)
class Dataset:
def __init__(self, dataframe):
self.dataframe = dataframe
self.n_examples, self.n_features = self.data.shape
self.n_classes = len(set(self.target))
@property
def data(self):
return self.dataframe.loc[:, self.dataframe.columns != 'class'].to_numpy(dtype=float)
@property
def target(self):
return self.dataframe.loc[:, 'class'].to_numpy()
def __repr__(self):
return f'Dataset f{type(self).name} with {self.n_examples} examples and {self.n_features} features'
@classproperty
def path_to_raw_file(cls):
return os.path.dirname(__file__) + '/raw/' + cls.name + '.raw'
@classmethod
def load(cls):
if not os.path.exists(cls.path_to_raw_file):
cls.download_dataset()
return cls(cls.create_dataframe())
@classmethod
def download_dataset(cls):
content = request.urlopen(cls.url)
os.makedirs(os.path.dirname(__file__) + '/raw/', exist_ok=True)
with open(cls.path_to_raw_file, 'wb') as file:
for line in content:
file.write(line)
@classmethod
def create_dataframe(cls):
raise NotImplementedError
class BreastCancerWisconsinDiagnostic(Dataset):
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/wdbc.data"
name = "breast_cancer_wisconsin_diagnostic"
bibtex_label = "street1993nuclear"
@classmethod
def create_dataframe(cls):
with open(cls.path_to_raw_file, 'r') as file:
col_names = ['id', 'class'] + [f'attr {i}' for i in range(30)]
df = pd.read_csv(file, names=col_names, header=None)
df.drop(columns=col_names[0], inplace=True)
return df
dataset_list.append(BreastCancerWisconsinDiagnostic)
class Cardiotocography10(Dataset):
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/00193/CTG.xls"
name = "cardiotocography_10"
bibtex_label = "ayres2000sisporto"
@classmethod
def create_dataframe(cls):
with pd.ExcelFile(cls.path_to_raw_file) as file:
df = pd.read_excel(file, sheet_name=file.sheet_names[1], header=0, skiprows=[0] + [i for i in range(2128, 2131)])
cols = list(df)
cols_to_drop = cols[:10] + cols[31:43] + cols[-2:]
df.drop(columns=cols_to_drop, inplace=True)
df.rename(columns={'CLASS':'class'}, inplace=True)
return df
dataset_list.append(Cardiotocography10)
class ClimateModelSimulationCrashes(Dataset):
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/00252/pop_failures.dat"
name = "climate_model_simulation_crashes"
bibtex_label = "lucas2013failure"
@classmethod
def create_dataframe(cls):
with open(cls.path_to_raw_file, 'r') as file:
df = pd.read_csv(file, header=0, delim_whitespace=True)
df.drop(columns=list(df)[:2], inplace=True)
df.rename(columns={list(df)[-1]:'class'}, inplace=True)
return df
dataset_list.append(ClimateModelSimulationCrashes)
class ConnectionistBenchSonar(Dataset):
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/undocumented/connectionist-bench/sonar/sonar.all-data"
name = "connectionist_bench_sonar"
bibtex_label = "gorman1988analysis"
@classmethod
def create_dataframe(cls):
with open(cls.path_to_raw_file, 'r') as file:
df = pd.read_csv(file, header=None)
df.rename(columns={list(df)[-1]:'class'}, inplace=True)
return df
dataset_list.append(ConnectionistBenchSonar)
class DiabeticRetinopathyDebrecen(Dataset):
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/00329/messidor_features.arff"
name = "diabetic_retinopathy_debrecen"
bibtex_label = "antal2014ensemble"
@classmethod
def create_dataframe(cls):
with open(cls.path_to_raw_file, 'r') as file:
df = pd.read_csv(file, header=None, skiprows=list(range(24)))
df.rename(columns={list(df)[-1]:'class'}, inplace=True)
return df
dataset_list.append(DiabeticRetinopathyDebrecen)
class Fertility(Dataset):
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/00244/fertility_Diagnosis.txt"
name = "fertility"
bibtex_label = "gil2012predicting"
@classmethod
def create_dataframe(cls):
with open(cls.path_to_raw_file, 'r') as file:
df = pd.read_csv(file, header=None)
df.rename(columns={list(df)[-1]:'class'}, inplace=True)
return df
dataset_list.append(Fertility)
class HabermansSurvival(Dataset):
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/haberman/haberman.data"
name = "habermans_survival"
bibtex_label = "haberman1976generalized"
@classmethod
def create_dataframe(cls):
with open(cls.path_to_raw_file, 'r') as file:
df = pd.read_csv(file, header=None)
df.rename(columns={list(df)[-1]:'class'}, inplace=True)
return df
dataset_list.append(HabermansSurvival)
class ImageSegmentation(Dataset):
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/image/segmentation.data"
name = "image_segmentation"
bibtex_label = None
@classmethod
def create_dataframe(cls):
with open(cls.path_to_raw_file, 'r') as file:
df = pd.read_csv(file, header=None, skiprows=list(range(4)))
df.rename(columns={list(df)[0]:'class'}, inplace=True)
return df
dataset_list.append(ImageSegmentation)
class Ionosphere(Dataset):
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/ionosphere/ionosphere.data"
name = "ionosphere"
bibtex_label = "sigillito1989classification"
@classmethod
def create_dataframe(cls):
with open(cls.path_to_raw_file, 'r') as file:
df = pd.read_csv(file, header=None)
df.rename(columns={list(df)[-1]:'class'}, inplace=True)
return df
dataset_list.append(Ionosphere)
class Iris(Dataset):
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data"
name = "iris"
bibtex_label = "fisher1936use"
@classmethod
def create_dataframe(cls):
with open(cls.path_to_raw_file, 'r') as file:
df = pd.read_csv(file, header=None, names=['sepal length', 'sepal width', 'petal length', 'petal width', 'class'])
return df
dataset_list.append(Iris)
class Parkinson(Dataset):
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/parkinsons/parkinsons.data"
name = "parkinson"
bibtex_label = "little2007exploiting"
@classmethod
def create_dataframe(cls):
with open(cls.path_to_raw_file, 'r') as file:
df = pd.read_csv(file, header=0)
df.rename(columns={'status':'class'}, inplace=True)
df.drop(columns='name', inplace=True)
return df
dataset_list.append(Parkinson)
class PlanningRelax(Dataset):
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/00230/plrx.txt"
name = "planning_relax"
bibtex_label = "bhatt2012planning"
@classmethod
def create_dataframe(cls):
with open(cls.path_to_raw_file, 'r') as file:
df = pd.read_csv(file, header=None, sep='\t', )
df.drop(columns=list(df)[-1], inplace=True)
df.rename(columns={list(df)[-1]:'class'}, inplace=True)
return df
dataset_list.append(PlanningRelax)
class QSARBiodegradation(Dataset):
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/00254/biodeg.csv"
name = "qsar_biodegradation"
bibtex_label = "mansouri2013quantitative"
@classmethod
def create_dataframe(cls):
with open(cls.path_to_raw_file, 'r') as file:
df = pd.read_csv(file, header=None, sep=';')
df.rename(columns={list(df)[-1]:'class'}, inplace=True)
return df
dataset_list.append(QSARBiodegradation)
class Seeds(Dataset):
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/00236/seeds_dataset.txt"
name = "seeds"
bibtex_label = "charytanowicz2010complete"
@classmethod
def create_dataframe(cls):
with open(cls.path_to_raw_file, 'r') as file:
df = pd.read_csv(file, header=None, delim_whitespace=True)
df.rename(columns={list(df)[-1]:'class'}, inplace=True)
return df
dataset_list.append(Seeds)
class Spambase(Dataset):
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/spambase/spambase.data"
name = "spambase"
bibtex_label = None
@classmethod
def create_dataframe(cls):
with open(cls.path_to_raw_file, 'r') as file:
df = pd.read_csv(file, header=None)
df.rename(columns={list(df)[-1]:'class'}, inplace=True)
return df
dataset_list.append(Spambase)
class VertebralColumn3C(Dataset):
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/00212/vertebral_column_data.zip"
name = "vertebral_column_3c"
bibtex_label = "berthonnaud2005analysis"
@classmethod
def create_dataframe(cls):
with ZipFile(cls.path_to_raw_file, 'r') as zipfile:
with zipfile.open('column_3C.dat') as file:
df = pd.read_csv(file, header=None, delim_whitespace=True)
df.rename(columns={list(df)[-1]:'class'}, inplace=True)
return df
dataset_list.append(VertebralColumn3C)
class WallFollowingRobot24(Dataset):
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/00194/sensor_readings_24.data"
name = "wall_following_robot_24"
bibtex_label = "freire2009short"
@classmethod
def create_dataframe(cls):
with open(cls.path_to_raw_file, 'r') as file:
df = pd.read_csv(file, header=None)
df.rename(columns={list(df)[-1]:'class'}, inplace=True)
return df
dataset_list.append(WallFollowingRobot24)
class Wine(Dataset):
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data"
name = "wine"
bibtex_label = "aeberhard1994comparative"
@classmethod
def create_dataframe(cls):
with open(cls.path_to_raw_file, 'r') as file:
col_names = [
'class',
'Alcohol',
'Malic acid',
'Ash',
'Alcalinity of ash',
'Magnesium',
'Total phenols',
'Flavanoids',
'Nonflavanoid phenols',
'Proanthocyanins',
'Color intensity',
'Hue',
'OD280/OD315 of diluted wines',
'Proline'
]
df = pd.read_csv(file, names=col_names, header=None)
return df
dataset_list.append(Wine)
class Yeast(Dataset):
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/yeast/yeast.data"
name = "yeast"
bibtex_label = "horton1996probabilistic"
@classmethod
def create_dataframe(cls):
with open(cls.path_to_raw_file, 'r') as file:
df = pd.read_csv(file, header=None, delim_whitespace=True)
df.drop(columns=list(df)[0], inplace=True)
df.rename(columns={list(df)[-1]:'class'}, inplace=True)
return df
dataset_list.append(Yeast)
if __name__ == "__main__":
for i, d in enumerate(load_datasets()):
assert not np.isnan(d.data.sum())
print(i, d.name, d.n_examples, d.n_classes)
|
the-stack_106_28563 | import nbp
import numpy as np
import matplotlib.pyplot as plt
data = nbp.Parser('sodium-chloride-example.npz').parse()
for key, val in data.items():
if isinstance(val, np.ndarray):
print(key, val.shape)
print('flipped ', val[:, None].shape)
elif isinstance(val, dict):
for sk, sv in val.items():
print(sk, sv)
# units epsilon0 = 55.3e-4 eV/A
# 1 kJ/mol = 1.0364x10-2 eV
if 1:
# for simulating
system = nbp.System(data['ch_length'],
data['sigma'][:, None],
data['epsilon'][:, None] * 1.0364e-2,
data['charge'][:, None],
data['pos'],
lj=True, ewald=False, use_neighbours=False,
epsilon0=55.3e-4)
op_sys = system.optimize(max_steps=500, cov=system.info().cutoff()/2**7, num_particles=0.05)
print('\n\n\noptimized\n\n\n')
op_sys.simulate(100, 300)
else:
# for analysis
traj = np.load('data/trajectory_300.npy')
lj = np.load('data/lj_300.npy')
system = nbp.System(data['ch_length'],
np.ones((traj.shape[1], 1)),
np.ones((traj.shape[1], 1)),
np.ones((traj.shape[1], 1)),
traj[0],
lj=True, ewald=False, use_neighbours=False,
epsilon0=1)
for i in traj[1:50]:
system.update_state(nbp.SystemState(i, system))
energies = []
for i in range(0, len(op_sys.states())):
energies.append(op_sys.states()[i].energy())
plt.plot(energies)
plt.show()
# import pickle
#
# pickle.dump([simu_sys, energies], '')
print('ok')
|
the-stack_106_28564 | import FWCore.ParameterSet.Config as cms
def printGeomInfo(process):
process.load("SimGeneral.HepPDTESSource.pdt_cfi")
process.load("Geometry.TrackerNumberingBuilder.trackerNumberingGeometry_cfi")
process.load("Geometry.MuonNumbering.muonNumberingInitialization_cfi")
process.load("Geometry.HcalCommonData.hcalDDDSimConstants_cff")
process.load("IOMC.RandomEngine.IOMC_cff")
process.load('IOMC.EventVertexGenerators.VtxSmearedFlat_cfi')
process.load('GeneratorInterface.Core.generatorSmeared_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load("SimG4Core.Application.g4SimHits_cfi")
process.source = cms.Source("EmptySource")
process.generator = cms.EDProducer("FlatRandomEGunProducer",
PGunParameters = cms.PSet(
PartID = cms.vint32(14),
MinEta = cms.double(-3.5),
MaxEta = cms.double(3.5),
MinPhi = cms.double(-3.14159265359),
MaxPhi = cms.double(3.14159265359),
MinE = cms.double(9.99),
MaxE = cms.double(10.01)
),
AddAntiParticle = cms.bool(False),
Verbosity = cms.untracked.int32(0),
firstRun = cms.untracked.uint32(1)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.p1 = cms.Path(process.generator*process.VtxSmeared*process.generatorSmeared*process.g4SimHits)
process.g4SimHits.UseMagneticField = False
process.g4SimHits.Physics.type = 'SimG4Core/Physics/DummyPhysics'
process.g4SimHits.Physics.DummyEMPhysics = True
process.g4SimHits.Physics.DefaultCutValue = 10.
process.g4SimHits.Watchers = cms.VPSet(cms.PSet(
DumpSummary = cms.untracked.bool(True),
DumpLVTree = cms.untracked.bool(True),
DumpMaterial = cms.untracked.bool(False),
DumpLVList = cms.untracked.bool(True),
DumpLV = cms.untracked.bool(True),
DumpSolid = cms.untracked.bool(True),
DumpAttributes = cms.untracked.bool(False),
DumpPV = cms.untracked.bool(True),
DumpRotation = cms.untracked.bool(False),
DumpReplica = cms.untracked.bool(False),
DumpTouch = cms.untracked.bool(False),
DumpSense = cms.untracked.bool(False),
Name = cms.untracked.string('TotemT*'),
Names = cms.untracked.vstring(' '),
type = cms.string('PrintGeomInfoAction')
))
return(process)
|
the-stack_106_28569 | from simple_tensor.segmentation.deeplab import *
segmentation = DeepLab(num_classes=1,
model_path = "/home/model/resnet_v2_101/resnet_v2_101.ckpt",
is_training=True)
train_generator = segmentation.batch_generator(batch_size=4,
dataset_path='/home/dataset/part_segmentation/', message="TRAIN")
val_generator = segmentation.batch_generator(batch_size=4,
dataset_path='/home/dataset/part_segmentation/', message="VAL")
# train
segmentation.optimize(subdivisions=10,
iterations = 10000,
best_loss= 1000000,
train_batch=train_generator,
val_batch=val_generator,
save_path='/home/model/melon_segmentation/')
|
the-stack_106_28570 | from tests import PyResTests, Basic
from pyres import failure
from pyres.job import Job
class FailureTests(PyResTests):
def setUp(self):
PyResTests.setUp(self)
self.queue_name = 'basic'
self.job_class = Basic
def test_count(self):
self.resq.enqueue(self.job_class,"test1")
job = Job.reserve(self.queue_name,self.resq)
job.fail("problem")
assert failure.count(self.resq) == 1
assert self.redis.llen('resque:failed') == 1
def test_create(self):
self.resq.enqueue(self.job_class,"test1")
job = Job.reserve(self.queue_name,self.resq)
e = Exception('test')
fail = failure.create(e, self.queue_name, job._payload)
assert isinstance(fail._payload, dict)
fail.save(self.resq)
assert failure.count(self.resq) == 1
assert self.redis.llen('resque:failed') == 1
def test_all(self):
self.resq.enqueue(self.job_class,"test1")
job = Job.reserve(self.queue_name,self.resq)
e = Exception('problem')
job.fail(e)
assert len(failure.all(self.resq, 0, 20)) == 1
def test_clear(self):
self.resq.enqueue(self.job_class,"test1")
job = Job.reserve(self.queue_name,self.resq)
e = Exception('problem')
job.fail(e)
assert self.redis.llen('resque:failed') == 1
failure.clear(self.resq)
assert self.redis.llen('resque:failed') == 0
def test_requeue(self):
self.resq.enqueue(self.job_class,"test1")
job = Job.reserve(self.queue_name,self.resq)
e = Exception('problem')
fail_object = job.fail(e)
assert self.resq.size(self.queue_name) == 0
failure.requeue(self.resq, fail_object)
assert self.resq.size(self.queue_name) == 1
job = Job.reserve(self.queue_name,self.resq)
assert job._queue == self.queue_name
mod_with_class = '{module}.{klass}'.format(
module=self.job_class.__module__,
klass=self.job_class.__name__)
self.assertEqual(job._payload, {'class':mod_with_class,'args':['test1'],'enqueue_timestamp': job.enqueue_timestamp})
|
the-stack_106_28571 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import io
import os
import sys
from shutil import rmtree
from setuptools import find_packages, setup, Command
# Package meta-data.
NAME = 'mp3chaps'
VERSION = '0.2'
KEYWORDS = 'mp3 chapters'
DESCRIPTION = 'tool for inserting chapter marks in mp3 files'
URL = 'https://github.com/dskrad/mp3chaps'
EMAIL = '[email protected]'
AUTHOR = 'David Karimeddini'
# What packages are required for this module to be executed?
REQUIRED = [
'docopt>=0.6.2', 'eyeD3>=0.8.4'
]
# The rest you shouldn't have to touch too much :)
# ------------------------------------------------
# Except, perhaps the License and Trove Classifiers!
# If you do change the License, remember to change the Trove Classifier for that!
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.rst' is present in your MANIFEST.in file!
with io.open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = '\n' + f.read()
class UploadCommand(Command):
"""Support setup.py upload."""
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous buildsโฆ')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distributionโฆ')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPi via Twineโฆ')
os.system('twine upload dist/*')
sys.exit()
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=long_description,
author=AUTHOR,
author_email=EMAIL,
url=URL,
#packages=find_packages(exclude=('tests',)),
# If your package is a single module, use this instead of 'packages':
py_modules=["mp3chaps"],
entry_points={
'console_scripts': [ 'mp3chaps=mp3chaps:main' ],
},
install_requires=REQUIRED,
include_package_data=True,
license='MIT',
keywords=KEYWORDS,
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
# $ setup.py publish support.
cmdclass={
'upload': UploadCommand
},
)
|
the-stack_106_28572 | import os
from pathlib import Path
from my.bash import _parse_file
test_dir = os.path.dirname(os.path.abspath(__file__))
history_file = Path(os.path.join(test_dir, "bash", "history"))
def test_single_file() -> None:
history = list(_parse_file(history_file))
assert len(history) == 4
assert history[0].command == "ls"
assert history[1].command == "git status"
assert (
history[2].command
== '''echo "$(
date
uname
)"'''
)
assert history[3].command == "ls"
|
the-stack_106_28575 |
import os
import math
from kivy.app import App
from kivy.clock import Clock
from kivy3 import Scene, Renderer, PerspectiveCamera, Mesh, Material
from kivy3.extras.geometries import BoxGeometry
from kivy3.loaders import OBJLoader
from kivy.uix.floatlayout import FloatLayout
from urdf_parser_py.urdf import URDF
# Resources pathes
_this_path = os.path.dirname(os.path.realpath(__file__))
class ObjectTrackball(FloatLayout):
def __init__(self, camera, radius, *args, **kw):
super(ObjectTrackball, self).__init__(*args, **kw)
self.camera = camera
self.radius = radius
self.phi = 90
self.theta = 0
self._touches = []
self.camera.pos.z = radius
camera.look_at((0, 0, 0))
def define_rotate_angle(self, touch):
theta_angle = (touch.dx / self.width) * -360
phi_angle = -1 * (touch.dy / self.height) * 360
return phi_angle, theta_angle
def on_touch_down(self, touch):
touch.grab(self)
self._touches.append(touch)
def on_touch_up(self, touch):
touch.ungrab(self)
self._touches.remove(touch)
def on_touch_move(self, touch):
if touch in self._touches and touch.grab_current == self:
if len(self._touches) == 1:
self.do_rotate(touch)
elif len(self._touches) == 2:
pass
def do_rotate(self, touch):
d_phi, d_theta = self.define_rotate_angle(touch)
self.phi += d_phi
self.theta += d_theta
_phi = math.radians(self.phi)
_theta = math.radians(self.theta)
z = self.radius * math.cos(_theta) * math.sin(_phi)
x = self.radius * math.sin(_theta) * math.sin(_phi)
y = self.radius * math.cos(_phi)
self.camera.pos = x, y, z
self.camera.look_at((0, 0, 0))
class MainApp(App):
def build(self):
self.renderer = Renderer()
scene = Scene()
camera = PerspectiveCamera(90, 1, 1, 2500)
geo = BoxGeometry(20, 20, 20)
material = Material(color=(0., 1., 1.), diffuse=(0., 1., 1.),
specular=(.35, .35, .35))
obj = Mesh(geo, material)
self.camera = camera
root = ObjectTrackball(camera, 100)
scene.add(obj)
robot = URDF.from_xml_file('reach7.urdf')
print(robot)
self.renderer.render(scene, camera)
self.renderer.main_light.intensity = 500
root.add_widget(self.renderer)
self.renderer.bind(size=self._adjust_aspect)
return root
def _adjust_aspect(self, inst, val):
rsize = self.renderer.size
aspect = rsize[0] / float(rsize[1])
self.renderer.camera.aspect = aspect
if __name__ == '__main__':
MainApp().run()
|
the-stack_106_28576 | from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Implements parse_requirements as standalone functionality
with open("requirements.txt") as f:
reqs = [l.strip('\n') for l in f if l.strip('\n') and not l.startswith('#')]
setup(
name='blastradius',
version='0.1.25',
description='Interactive Terraform graph visualizations',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
author='Patrick McMurchie',
author_email='[email protected]',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
scripts=['bin/blast-radius'],
install_requires=reqs,
)
|
the-stack_106_28577 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2009 Edgewall Software
# Copyright (C) 2003-2005 Jonas Borgstrรถm <[email protected]>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Jonas Borgstrรถm <[email protected]>
import copy
import re
from genshi.builder import tag
from trac.cache import cached
from trac.config import (
BoolOption, ConfigSection, ListOption, Option, OrderedExtensionsOption
)
from trac.core import *
from trac.perm import IPermissionRequestor, PermissionCache, PermissionSystem
from trac.resource import IResourceManager
from trac.util import Ranges, as_int
from trac.util.text import shorten_line
from trac.util.translation import _, N_, gettext
from trac.wiki import IWikiSyntaxProvider, WikiParser
class TicketFieldList(list):
"""Improved ticket field list, allowing access by name."""
__slots__ = ['_map']
def __init__(self, *args):
super(TicketFieldList, self).__init__(*args)
self._map = dict((value['name'], value) for value in self)
def append(self, value):
super(TicketFieldList, self).append(value)
self._map[value['name']] = value
def by_name(self, name, default=None):
return self._map.get(name, default)
def __copy__(self):
return TicketFieldList(self)
def __deepcopy__(self, memo):
return TicketFieldList(copy.deepcopy(value, memo) for value in self)
class ITicketActionController(Interface):
"""Extension point interface for components willing to participate
in the ticket workflow.
This is mainly about controlling the changes to the ticket ''status'',
though not restricted to it.
"""
def get_ticket_actions(req, ticket):
"""Return an iterable of `(weight, action)` tuples corresponding to
the actions that are contributed by this component. The list is
dependent on the current state of the ticket and the actual request
parameter.
`action` is a key used to identify that particular action.
(note that 'history' and 'diff' are reserved and should not be used
by plugins)
The actions will be presented on the page in descending order of the
integer weight. The first action in the list is used as the default
action.
When in doubt, use a weight of 0.
"""
def get_all_status():
"""Returns an iterable of all the possible values for the ''status''
field this action controller knows about.
This will be used to populate the query options and the like.
It is assumed that the initial status of a ticket is 'new' and
the terminal status of a ticket is 'closed'.
"""
def render_ticket_action_control(req, ticket, action):
"""Return a tuple in the form of `(label, control, hint)`
`label` is a short text that will be used when listing the action,
`control` is the markup for the action control and `hint` should
explain what will happen if this action is taken.
This method will only be called if the controller claimed to handle
the given `action` in the call to `get_ticket_actions`.
Note that the radio button for the action has an `id` of
`"action_%s" % action`. Any `id`s used in `control` need to be made
unique. The method used in the default ITicketActionController is to
use `"action_%s_something" % action`.
"""
def get_ticket_changes(req, ticket, action):
"""Return a dictionary of ticket field changes.
This method must not have any side-effects because it will also
be called in preview mode (`req.args['preview']` will be set, then).
See `apply_action_side_effects` for that. If the latter indeed triggers
some side-effects, it is advised to emit a warning
(`trac.web.chrome.add_warning(req, reason)`) when this method is called
in preview mode.
This method will only be called if the controller claimed to handle
the given `action` in the call to `get_ticket_actions`.
"""
def apply_action_side_effects(req, ticket, action):
"""Perform side effects once all changes have been made to the ticket.
Multiple controllers might be involved, so the apply side-effects
offers a chance to trigger a side-effect based on the given `action`
after the new state of the ticket has been saved.
This method will only be called if the controller claimed to handle
the given `action` in the call to `get_ticket_actions`.
"""
class ITicketChangeListener(Interface):
"""Extension point interface for components that require notification
when tickets are created, modified, or deleted."""
def ticket_created(ticket):
"""Called when a ticket is created."""
def ticket_changed(ticket, comment, author, old_values):
"""Called when a ticket is modified.
`old_values` is a dictionary containing the previous values of the
fields that have changed.
"""
def ticket_deleted(ticket):
"""Called when a ticket is deleted."""
def ticket_comment_modified(ticket, cdate, author, comment, old_comment):
"""Called when a ticket comment is modified."""
def ticket_change_deleted(ticket, cdate, changes):
"""Called when a ticket change is deleted.
`changes` is a dictionary of tuple `(oldvalue, newvalue)`
containing the ticket change of the fields that have changed."""
class ITicketManipulator(Interface):
"""Miscellaneous manipulation of ticket workflow features."""
def prepare_ticket(req, ticket, fields, actions):
"""Not currently called, but should be provided for future
compatibility."""
def validate_ticket(req, ticket):
"""Validate a ticket after it's been populated from user input.
Must return a list of `(field, message)` tuples, one for each problem
detected. `field` can be `None` to indicate an overall problem with the
ticket. Therefore, a return value of `[]` means everything is OK."""
class IMilestoneChangeListener(Interface):
"""Extension point interface for components that require notification
when milestones are created, modified, or deleted."""
def milestone_created(milestone):
"""Called when a milestone is created."""
def milestone_changed(milestone, old_values):
"""Called when a milestone is modified.
`old_values` is a dictionary containing the previous values of the
milestone properties that changed. Currently those properties can be
'name', 'due', 'completed', or 'description'.
"""
def milestone_deleted(milestone):
"""Called when a milestone is deleted."""
class TicketSystem(Component):
implements(IPermissionRequestor, IWikiSyntaxProvider, IResourceManager)
change_listeners = ExtensionPoint(ITicketChangeListener)
milestone_change_listeners = ExtensionPoint(IMilestoneChangeListener)
realm = 'ticket'
ticket_custom_section = ConfigSection('ticket-custom',
"""In this section, you can define additional fields for tickets. See
TracTicketsCustomFields for more details.""")
action_controllers = OrderedExtensionsOption('ticket', 'workflow',
ITicketActionController, default='ConfigurableTicketWorkflow',
include_missing=False,
doc="""Ordered list of workflow controllers to use for ticket actions.
""")
restrict_owner = BoolOption('ticket', 'restrict_owner', 'false',
"""Make the owner field of tickets use a drop-down menu.
Be sure to understand the performance implications before activating
this option. See
[TracTickets#Assign-toasDrop-DownList Assign-to as Drop-Down List].
Please note that e-mail addresses are '''not''' obfuscated in the
resulting drop-down menu, so this option should not be used if
e-mail addresses must remain protected.
""")
default_version = Option('ticket', 'default_version', '',
"""Default version for newly created tickets.""")
default_type = Option('ticket', 'default_type', 'defect',
"""Default type for newly created tickets.""")
default_priority = Option('ticket', 'default_priority', 'major',
"""Default priority for newly created tickets.""")
default_milestone = Option('ticket', 'default_milestone', '',
"""Default milestone for newly created tickets.""")
default_component = Option('ticket', 'default_component', '',
"""Default component for newly created tickets.""")
default_severity = Option('ticket', 'default_severity', '',
"""Default severity for newly created tickets.""")
default_summary = Option('ticket', 'default_summary', '',
"""Default summary (title) for newly created tickets.""")
default_description = Option('ticket', 'default_description', '',
"""Default description for newly created tickets.""")
default_keywords = Option('ticket', 'default_keywords', '',
"""Default keywords for newly created tickets.""")
default_owner = Option('ticket', 'default_owner', '< default >',
"""Default owner for newly created tickets.""")
default_cc = Option('ticket', 'default_cc', '',
"""Default cc: list for newly created tickets.""")
default_resolution = Option('ticket', 'default_resolution', 'fixed',
"""Default resolution for resolving (closing) tickets.""")
optional_fields = ListOption('ticket', 'optional_fields',
'milestone, version', doc=
"""Comma-separated list of `select` fields that can have
an empty value. (//since 1.1.2//)""")
def __init__(self):
self.log.debug('action controllers for ticket workflow: %r',
[c.__class__.__name__ for c in self.action_controllers])
# Public API
def get_available_actions(self, req, ticket):
"""Returns a sorted list of available actions"""
# The list should not have duplicates.
actions = {}
for controller in self.action_controllers:
weighted_actions = controller.get_ticket_actions(req, ticket) or []
for weight, action in weighted_actions:
if action in actions:
actions[action] = max(actions[action], weight)
else:
actions[action] = weight
all_weighted_actions = [(weight, action) for action, weight in
actions.items()]
return [x[1] for x in sorted(all_weighted_actions, reverse=True)]
def get_all_status(self):
"""Returns a sorted list of all the states all of the action
controllers know about."""
valid_states = set()
for controller in self.action_controllers:
valid_states.update(controller.get_all_status() or [])
return sorted(valid_states)
def get_ticket_field_labels(self):
"""Produce a (name,label) mapping from `get_ticket_fields`."""
labels = dict((f['name'], f['label'])
for f in self.get_ticket_fields())
labels['attachment'] = _("Attachment")
return labels
def get_ticket_fields(self):
"""Returns list of fields available for tickets.
Each field is a dict with at least the 'name', 'label' (localized)
and 'type' keys.
It may in addition contain the 'custom' key, the 'optional' and the
'options' keys. When present 'custom' and 'optional' are always `True`.
"""
fields = copy.deepcopy(self.fields)
label = 'label' # workaround gettext extraction bug
for f in fields:
f[label] = gettext(f[label])
return fields
def reset_ticket_fields(self):
"""Invalidate ticket field cache."""
del self.fields
@cached
def fields(self):
"""Return the list of fields available for tickets."""
from trac.ticket import model
fields = TicketFieldList()
# Basic text fields
fields.append({'name': 'summary', 'type': 'text',
'label': N_('Summary')})
fields.append({'name': 'reporter', 'type': 'text',
'label': N_('Reporter')})
# Owner field, by default text but can be changed dynamically
# into a drop-down depending on configuration (restrict_owner=true)
fields.append({'name': 'owner', 'type': 'text',
'label': N_('Owner')})
# Description
fields.append({'name': 'description', 'type': 'textarea',
'format': 'wiki', 'label': N_('Description')})
# Default select and radio fields
selects = [('type', N_('Type'), model.Type),
('status', N_('Status'), model.Status),
('priority', N_('Priority'), model.Priority),
('milestone', N_('Milestone'), model.Milestone),
('component', N_('Component'), model.Component),
('version', N_('Version'), model.Version),
('severity', N_('Severity'), model.Severity),
('resolution', N_('Resolution'), model.Resolution)]
for name, label, cls in selects:
options = [val.name for val in cls.select(self.env)]
if not options:
# Fields without possible values are treated as if they didn't
# exist
continue
field = {'name': name, 'type': 'select', 'label': label,
'value': getattr(self, 'default_' + name, ''),
'options': options}
if name in ('status', 'resolution'):
field['type'] = 'radio'
field['optional'] = True
elif name in self.optional_fields:
field['optional'] = True
fields.append(field)
# Advanced text fields
fields.append({'name': 'keywords', 'type': 'text', 'format': 'list',
'label': N_('Keywords')})
fields.append({'name': 'cc', 'type': 'text', 'format': 'list',
'label': N_('Cc')})
# Date/time fields
fields.append({'name': 'time', 'type': 'time',
'format': 'relative', 'label': N_('Created')})
fields.append({'name': 'changetime', 'type': 'time',
'format': 'relative', 'label': N_('Modified')})
for field in self.custom_fields:
if field['name'] in [f['name'] for f in fields]:
self.log.warning('Duplicate field name "%s" (ignoring)',
field['name'])
continue
if field['name'] in self.reserved_field_names:
self.log.warning('Field name "%s" is a reserved name '
'(ignoring)', field['name'])
continue
if not re.match('^[a-zA-Z][a-zA-Z0-9_]+$', field['name']):
self.log.warning('Invalid name for custom field: "%s" '
'(ignoring)', field['name'])
continue
fields.append(field)
return fields
reserved_field_names = ['report', 'order', 'desc', 'group', 'groupdesc',
'col', 'row', 'format', 'max', 'page', 'verbose',
'comment', 'or']
def get_custom_fields(self):
return copy.deepcopy(self.custom_fields)
@cached
def custom_fields(self):
"""Return the list of custom ticket fields available for tickets."""
fields = TicketFieldList()
config = self.ticket_custom_section
for name in [option for option, value in config.options()
if '.' not in option]:
field = {
'name': name,
'custom': True,
'type': config.get(name),
'order': config.getint(name + '.order', 0),
'label': config.get(name + '.label') or
name.replace("_", " ").strip().capitalize(),
'value': config.get(name + '.value', '')
}
if field['type'] == 'select' or field['type'] == 'radio':
field['options'] = config.getlist(name + '.options', sep='|')
if '' in field['options'] or \
field['name'] in self.optional_fields:
field['optional'] = True
if '' in field['options']:
field['options'].remove('')
elif field['type'] == 'text':
field['format'] = config.get(name + '.format', 'plain')
elif field['type'] == 'textarea':
field['format'] = config.get(name + '.format', 'plain')
field['height'] = config.getint(name + '.rows')
elif field['type'] == 'time':
field['format'] = config.get(name + '.format', 'datetime')
fields.append(field)
fields.sort(lambda x, y: cmp((x['order'], x['name']),
(y['order'], y['name'])))
return fields
def get_field_synonyms(self):
"""Return a mapping from field name synonyms to field names.
The synonyms are supposed to be more intuitive for custom queries."""
# i18n TODO - translated keys
return {'created': 'time', 'modified': 'changetime'}
def eventually_restrict_owner(self, field, ticket=None):
"""Restrict given owner field to be a list of users having
the TICKET_MODIFY permission (for the given ticket)
"""
if self.restrict_owner:
field['type'] = 'select'
allowed_owners = self.get_allowed_owners(ticket)
allowed_owners.insert(0, '< default >')
field['options'] = allowed_owners
field['optional'] = 'owner' in self.optional_fields
def get_allowed_owners(self, ticket=None):
"""Returns a list of permitted ticket owners (those possessing the
TICKET_MODIFY permission). Returns `None` if the option `[ticket]`
`restrict_owner` is `False`.
If `ticket` is not `None`, fine-grained permission checks are used
to determine the allowed owners for the specified resource.
:since: 1.0.3
"""
if self.restrict_owner:
allowed_owners = []
for user in PermissionSystem(self.env) \
.get_users_with_permission('TICKET_MODIFY'):
if not ticket or \
'TICKET_MODIFY' in PermissionCache(self.env, user,
ticket.resource):
allowed_owners.append(user)
allowed_owners.sort()
return allowed_owners
# IPermissionRequestor methods
def get_permission_actions(self):
return ['TICKET_APPEND', 'TICKET_CREATE', 'TICKET_CHGPROP',
'TICKET_VIEW', 'TICKET_EDIT_CC', 'TICKET_EDIT_DESCRIPTION',
'TICKET_EDIT_COMMENT', 'TICKET_BATCH_MODIFY',
('TICKET_MODIFY', ['TICKET_APPEND', 'TICKET_CHGPROP']),
('TICKET_ADMIN', ['TICKET_CREATE', 'TICKET_MODIFY',
'TICKET_VIEW', 'TICKET_EDIT_CC',
'TICKET_EDIT_DESCRIPTION',
'TICKET_EDIT_COMMENT',
'TICKET_BATCH_MODIFY'])]
# IWikiSyntaxProvider methods
def get_link_resolvers(self):
return [('bug', self._format_link),
('ticket', self._format_link),
('comment', self._format_comment_link)]
def get_wiki_syntax(self):
yield (
# matches #... but not &#... (HTML entity)
r"!?(?<!&)#"
# optional intertrac shorthand #T... + digits
r"(?P<it_ticket>%s)%s" % (WikiParser.INTERTRAC_SCHEME,
Ranges.RE_STR),
lambda x, y, z: self._format_link(x, 'ticket', y[1:], y, z))
def _format_link(self, formatter, ns, target, label, fullmatch=None):
intertrac = formatter.shorthand_intertrac_helper(ns, target, label,
fullmatch)
if intertrac:
return intertrac
try:
link, params, fragment = formatter.split_link(target)
r = Ranges(link)
if len(r) == 1:
num = r.a
ticket = formatter.resource(self.realm, num)
from trac.ticket.model import Ticket
if Ticket.id_is_valid(num) and \
'TICKET_VIEW' in formatter.perm(ticket):
# TODO: attempt to retrieve ticket view directly,
# something like: t = Ticket.view(num)
for type, summary, status, resolution in \
self.env.db_query("""
SELECT type, summary, status, resolution
FROM ticket WHERE id=%s
""", (str(num),)):
title = self.format_summary(summary, status,
resolution, type)
href = formatter.href.ticket(num) + params + fragment
return tag.a(label, title=title, href=href,
class_='%s ticket' % status)
else:
ranges = str(r)
if params:
params = '&' + params[1:]
label_wrap = label.replace(',', u',\u200b')
ranges_wrap = ranges.replace(',', u', ')
return tag.a(label_wrap,
title=_("Tickets %(ranges)s", ranges=ranges_wrap),
href=formatter.href.query(id=ranges) + params)
except ValueError:
pass
return tag.a(label, class_='missing ticket')
def _format_comment_link(self, formatter, ns, target, label):
resource = None
if ':' in target:
elts = target.split(':')
if len(elts) == 3:
cnum, realm, id = elts
if cnum != 'description' and cnum and not cnum[0].isdigit():
realm, id, cnum = elts # support old comment: style
id = as_int(id, None)
resource = formatter.resource(realm, id)
else:
resource = formatter.resource
cnum = target
if resource and resource.id and resource.realm == self.realm and \
cnum and (all(c.isdigit() for c in cnum) or cnum == 'description'):
href = title = class_ = None
if self.resource_exists(resource):
from trac.ticket.model import Ticket
ticket = Ticket(self.env, resource.id)
if cnum != 'description' and not ticket.get_change(cnum):
title = _("ticket comment does not exist")
class_ = 'missing ticket'
elif 'TICKET_VIEW' in formatter.perm(resource):
href = formatter.href.ticket(resource.id) + \
"#comment:%s" % cnum
if resource.id != formatter.resource.id:
if cnum == 'description':
title = _("Description for Ticket #%(id)s",
id=resource.id)
else:
title = _("Comment %(cnum)s for Ticket #%(id)s",
cnum=cnum, id=resource.id)
class_ = ticket['status'] + ' ticket'
else:
title = _("Description") if cnum == 'description' \
else _("Comment %(cnum)s",
cnum=cnum)
class_ = 'ticket'
else:
title = _("no permission to view ticket")
class_ = 'forbidden ticket'
else:
title = _("ticket does not exist")
class_ = 'missing ticket'
return tag.a(label, class_=class_, href=href, title=title)
return label
# IResourceManager methods
def get_resource_realms(self):
yield self.realm
def get_resource_description(self, resource, format=None, context=None,
**kwargs):
if format == 'compact':
return '#%s' % resource.id
elif format == 'summary':
from trac.ticket.model import Ticket
ticket = Ticket(self.env, resource.id)
args = [ticket[f] for f in ('summary', 'status', 'resolution',
'type')]
return self.format_summary(*args)
return _("Ticket #%(shortname)s", shortname=resource.id)
def format_summary(self, summary, status=None, resolution=None, type=None):
summary = shorten_line(summary)
if type:
summary = type + ': ' + summary
if status:
if status == 'closed' and resolution:
status += ': ' + resolution
return "%s (%s)" % (summary, status)
else:
return summary
def resource_exists(self, resource):
"""
>>> from trac.test import EnvironmentStub
>>> from trac.resource import Resource, resource_exists
>>> env = EnvironmentStub()
>>> resource_exists(env, Resource('ticket', 123456))
False
>>> from trac.ticket.model import Ticket
>>> t = Ticket(env)
>>> int(t.insert())
1
>>> resource_exists(env, t.resource)
True
"""
if self.env.db_query("SELECT id FROM ticket WHERE id=%s",
(resource.id,)):
if resource.version is None:
return True
revcount = self.env.db_query("""
SELECT count(DISTINCT time) FROM ticket_change WHERE ticket=%s
""", (resource.id,))
return revcount[0][0] >= resource.version
else:
return False
|
the-stack_106_28578 | #
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# System Imports
import json
import os
import pytest
# waflib imports
from waflib import Errors
# lmbrwaflib imports
from lmbrwaflib import unit_test
from lmbrwaflib import lumberyard
BASIC_ENGINE_JSON = {
'FileVersion': 1,
'LumberyardVersion': '0.0.0.0',
'LumberyardCopyrightYear': 2019
}
@pytest.mark.parametrize(
"engine_json, expected_error", [
pytest.param(None, Errors.WafError, id="no_engine_json"),
pytest.param(BASIC_ENGINE_JSON, None, id="internal_engine_json"),
]
)
def test_get_engine_node_internal(tmpdir, engine_json, expected_error):
if engine_json:
engine_json_content = json.dumps(engine_json,
sort_keys=True,
separators=(',', ': '),
indent=4)
tmpdir.ensure('dev', dir=True)
tmpdir.join('dev/engine.json').write(engine_json_content)
try:
fake_context = unit_test.FakeContext(str(tmpdir.realpath()), False)
fake_context.path = fake_context.srcnode
lumberyard.get_engine_node(fake_context)
except expected_error:
pass
@pytest.mark.parametrize(
"external_engine_json, ext_engine_subpath, expected_error", [
pytest.param(None, '../external1', Errors.WafError, id="Invalid_External_engine_path"),
pytest.param(BASIC_ENGINE_JSON, '../external1', None, id="Valid_External_engine_rel_path"),
pytest.param(BASIC_ENGINE_JSON, 'external1', None, id="Valid_External_engine_abs_path")
]
)
def test_get_engine_node_external(tmpdir, external_engine_json, ext_engine_subpath, expected_error):
tmp_working_path = str(tmpdir.realpath())
c = os.path.join(tmp_working_path, os.path.normpath(ext_engine_subpath))
b = os.path.normpath(c)
tmp_working_engine_path = os.path.realpath(b)
if ext_engine_subpath.startswith('..'):
json_external_engine_path = ext_engine_subpath
else:
json_external_engine_path = tmp_working_engine_path
engine_json = {
'ExternalEnginePath': json_external_engine_path,
'FileVersion': 1,
'LumberyardVersion': '0.0.0.0',
'LumberyardCopyrightYear': 2019
}
rel_path = os.path.relpath(tmp_working_engine_path, tmp_working_path)
tmpdir.ensure(rel_path, dir=True)
engine_json_content = json.dumps(engine_json,
sort_keys=True,
separators=(',', ': '),
indent=4)
tmpdir.ensure('dev', dir=True)
tmpdir.join('dev/engine.json').write(engine_json_content)
if external_engine_json:
external_engine_json_content = json.dumps(external_engine_json,
sort_keys=True,
separators=(',', ': '),
indent=4)
if rel_path.startswith('..'):
rel_path = rel_path[3:]
tmpdir.ensure(rel_path, dir=True)
tmpdir.join('{}/engine.json'.format(rel_path)).write(external_engine_json_content)
try:
fake_context = unit_test.FakeContext(tmp_working_path, False)
fake_context.path = fake_context.srcnode
lumberyard.get_engine_node(fake_context)
except expected_error:
pass
def test_get_all_eligible_use_keywords():
class MockPlatformSettings(object):
def __init__(self):
self.aliases = ['alias_foo']
class MockContext(object):
def get_all_platform_names(self):
return ['platform_foo']
def get_platform_settings(self,platform_name):
assert platform_name == 'platform_foo'
return MockPlatformSettings()
mockCtx = MockContext()
related_keywords = lumberyard.get_all_eligible_use_keywords(mockCtx)
expected_use_keywords = ['use', 'test_use', 'test_all_use', 'platform_foo_use', 'alias_foo_use']
assert len(related_keywords) == len(expected_use_keywords)
for expected_use in expected_use_keywords:
assert expected_use in related_keywords
@pytest.mark.parametrize(
"engine_root_version, experimental_string, expected", [
pytest.param('0.0.0.0', 'False', True),
pytest.param('0.0.0.0', 'True', True),
pytest.param('0.0.0.1', 'False', False),
pytest.param('0.0.0.1', 'True', True)
]
)
def test_should_build_experimental_targets(engine_root_version, experimental_string, expected):
class FakeOptions(object):
def __init__(self, experimental_string):
self.enable_experimental_features = experimental_string
class FakeExperimentContext(object):
def __init__(self, engine_root_version, experimental_string):
self.engine_root_version = engine_root_version
self.options = FakeOptions(experimental_string)
fake_context = FakeExperimentContext(engine_root_version, experimental_string)
result = lumberyard.should_build_experimental_targets(fake_context)
assert result == expected
|
the-stack_106_28580 | # Copyright 2020 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import tempfile
import unittest
import numpy as np
import tensorflow as tf
import torch
from tensorflow_addons.optimizers import SGDW
import fastestimator as fe
from fastestimator.test.unittest_util import is_equal
def get_model_weight_tf(model):
weight = []
for layer in model.layers:
weight.append(layer.get_weights())
return weight
def get_model_weight_lenet_torch(model):
if torch.cuda.device_count() > 1:
model = model.module
weight = []
weight.append(model.conv1.weight.data.numpy())
weight.append(model.conv2.weight.data.numpy())
weight.append(model.conv3.weight.data.numpy())
weight.append(model.fc1.weight.data.numpy())
weight.append(model.fc1.weight.data.numpy())
return weight
class TestLoadModelAndSaveModel(unittest.TestCase):
def test_save_model_and_load_model_tf(self):
m1 = fe.build(fe.architecture.tensorflow.LeNet, optimizer_fn="adam")
weight1 = get_model_weight_tf(m1)
temp_folder = tempfile.mkdtemp()
fe.backend.save_model(m1, save_dir=temp_folder, model_name="test")
m2 = fe.build(fe.architecture.tensorflow.LeNet, optimizer_fn="adam")
weight2 = get_model_weight_tf(m2)
self.assertFalse(is_equal(weight1, weight2))
fe.backend.load_model(m2, weights_path=os.path.join(temp_folder, "test.h5"))
weight3 = get_model_weight_tf(m2)
self.assertTrue(is_equal(weight1, weight3))
def test_save_model_and_load_model_tf_optimizer(self):
m1 = fe.build(fe.architecture.tensorflow.LeNet,
optimizer_fn=lambda: SGDW(weight_decay=2e-5, learning_rate=2e-4))
temp_folder = tempfile.mkdtemp()
fe.backend.save_model(m1, save_dir=temp_folder, model_name="test", save_optimizer=True)
m2 = fe.build(fe.architecture.tensorflow.LeNet,
optimizer_fn=lambda: SGDW(weight_decay=1e-5, learning_rate=1e-4))
fe.backend.load_model(m2, weights_path=os.path.join(temp_folder, "test.h5"), load_optimizer=True)
self.assertTrue(np.allclose(fe.backend.get_lr(model=m2), 2e-4))
self.assertTrue(np.allclose(tf.keras.backend.get_value(m2.current_optimizer.weight_decay), 2e-5))
def test_save_model_and_load_model_torch(self):
m1 = fe.build(fe.architecture.pytorch.LeNet, optimizer_fn="adam")
weight1 = get_model_weight_lenet_torch(m1)
temp_folder = tempfile.mkdtemp()
fe.backend.save_model(m1, save_dir=temp_folder, model_name="test")
m2 = fe.build(fe.architecture.pytorch.LeNet, optimizer_fn="adam")
weight2 = get_model_weight_lenet_torch(m2)
self.assertFalse(is_equal(weight1, weight2))
fe.backend.load_model(m2, weights_path=os.path.join(temp_folder, "test.pt"))
weight3 = get_model_weight_lenet_torch(m2)
self.assertTrue(is_equal(weight1, weight3))
|
the-stack_106_28582 | a=input('Enter the name of the file you want to open:\n')
if len(a)<1:
doc=open('Num.txt','r+')
print('Opening Num.txt...')
else:
doc=open(a,'r+')
Prevnum=''
for line in doc:
x=line.strip()
number=Prevnum+x
Prevnum=number
#print(x)
#for i in x:
#print(number)
#prod=1
i=0
j=13 #use j=4 for test case..
bigProd=0
bigSeq=''
while (j<=len(number)):
prod=1
for m in number[i:j]:
prod=prod*int(m)
sequence=number[i:j]
if prod>bigProd:
bigProd=prod
bigseq=sequence
i+=1
j+=1
print('The biggest product of 13 adjacent numbers is',bigProd,'the sequence is',bigseq)
|
the-stack_106_28583 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class PagedServicePartitionInfoList(Model):
"""The list of partition in the cluster for a service. The list is paged when
all of the results cannot fit in a single message. The next set of results
can be obtained by executing the same query with the continuation token
provided in this list.
:param continuation_token: The continuation token parameter is used to
obtain next set of results. The continuation token is included in the
response of the API when the results from the system do not fit in a
single response. When this value is passed to the next API call, the API
returns next set of results. If there are no further results, then the
continuation token is not included in the response.
:type continuation_token: str
:param items: List of service partition information.
:type items: list[~azure.servicefabric.models.ServicePartitionInfo]
"""
_attribute_map = {
'continuation_token': {'key': 'ContinuationToken', 'type': 'str'},
'items': {'key': 'Items', 'type': '[ServicePartitionInfo]'},
}
def __init__(self, continuation_token=None, items=None):
super(PagedServicePartitionInfoList, self).__init__()
self.continuation_token = continuation_token
self.items = items
|
the-stack_106_28584 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Custom version for quantized training and evaluation functions.
The main difference between this and the third_party graph_rewriter_builder.py
is that this version uses experimental_create_training_graph which allows the
customization of freeze_bn_delay.
"""
import re
import tensorflow as tf
from tensorflow.contrib.quantize.python import common
from tensorflow.contrib.quantize.python import input_to_ops
from tensorflow.contrib.quantize.python import quant_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
def build(graph_rewriter_config,
quant_overrides_config=None,
is_training=True,
is_export=False):
"""Returns a function that modifies default graph based on options.
Args:
graph_rewriter_config: graph_rewriter_pb2.GraphRewriter proto.
quant_overrides_config: quant_overrides_pb2.QuantOverrides proto.
is_training: whether in training or eval mode.
is_export: whether exporting the graph.
"""
def graph_rewrite_fn():
"""Function to quantize weights and activation of the default graph."""
if (graph_rewriter_config.quantization.weight_bits != 8 or
graph_rewriter_config.quantization.activation_bits != 8):
raise ValueError('Only 8bit quantization is supported')
graph = tf.get_default_graph()
# Insert custom quant ops.
if quant_overrides_config is not None:
input_to_ops_map = input_to_ops.InputToOps(graph)
for q in quant_overrides_config.quant_configs:
producer = graph.get_operation_by_name(q.op_name)
if producer is None:
raise ValueError('Op name does not exist in graph.')
context = _get_context_from_op(producer)
consumers = input_to_ops_map.ConsumerOperations(producer)
if q.fixed_range:
_insert_fixed_quant_op(
context,
q.quant_op_name,
producer,
consumers,
init_min=q.min,
init_max=q.max,
quant_delay=q.delay if is_training else 0)
else:
raise ValueError('Learned ranges are not yet supported.')
# Quantize the graph by inserting quantize ops for weights and activations
if is_training:
tf.contrib.quantize.experimental_create_training_graph(
input_graph=graph,
quant_delay=graph_rewriter_config.quantization.delay,
freeze_bn_delay=graph_rewriter_config.quantization.delay)
else:
tf.contrib.quantize.experimental_create_eval_graph(
input_graph=graph,
quant_delay=graph_rewriter_config.quantization.delay
if not is_export else 0)
tf.contrib.layers.summarize_collection('quant_vars')
return graph_rewrite_fn
def _get_context_from_op(op):
"""Gets the root context name from the op name."""
context_re = re.search(r'^(.*)/([^/]+)', op.name)
if context_re:
return context_re.group(1)
return ''
def _insert_fixed_quant_op(context,
name,
producer,
consumers,
init_min=-6.0,
init_max=6.0,
quant_delay=None):
"""Adds a fake quant op with fixed ranges.
Args:
context: The parent scope of the op to be quantized.
name: The name of the fake quant op.
producer: The producer op to be quantized.
consumers: The consumer ops to the producer op.
init_min: The minimum range for the fake quant op.
init_max: The maximum range for the fake quant op.
quant_delay: Number of steps to wait before activating the fake quant op.
Raises:
ValueError: When producer operation is not directly connected to the
consumer operation.
"""
name_prefix = name if not context else context + '/' + name
inputs = producer.outputs[0]
quant = quant_ops.FixedQuantize(
inputs, init_min=init_min, init_max=init_max, scope=name_prefix)
if quant_delay and quant_delay > 0:
activate_quant = math_ops.greater_equal(
common.CreateOrGetQuantizationStep(),
quant_delay,
name=name_prefix + '/activate_quant')
quant = control_flow_ops.cond(
activate_quant,
lambda: quant,
lambda: inputs,
name=name_prefix + '/delayed_quant')
if consumers:
tensors_modified_count = common.RerouteTensor(
quant, inputs, can_modify=consumers)
# Some operations can have multiple output tensors going to the same
# consumer. Since consumers is a set, we need to ensure that
# tensors_modified_count is greater than or equal to the length of the set
# of consumers.
if tensors_modified_count < len(consumers):
raise ValueError('No inputs quantized for ops: [%s]' % ', '.join(
[consumer.name for consumer in consumers]))
|
the-stack_106_28588 | """
Copyright (C) 2020 Dabble Lab - All Rights Reserved
You may use, distribute and modify this code under the
terms and conditions defined in file 'LICENSE.txt', which
is part of this source code package.
For additional copyright information please
visit : http://dabblelab.com/copyright
"""
from ask_sdk_core.utils import is_request_type, is_intent_name
from ask_sdk_core.dispatch_components import (AbstractRequestHandler, AbstractExceptionHandler, AbstractRequestInterceptor, AbstractResponseInterceptor)
from ask_sdk_core.skill_builder import SkillBuilder
import logging
import json
import requests
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
#Handlers
class GetRemoteDataIntentHandler(AbstractRequestHandler):
def can_handle(self, handler_input):
return( is_request_type("LaunchRequest")(handler_input) or
is_intent_name("GetRemoteDataIntent")(handler_input))
def handle(self,handler_input):
speech_output = "This is the default message without API call."
data = requests.get("http://api.open-notify.org/astros.json")
data = json.loads(data.text)
speech_output = "There are currently {} astronauts in space.".format(len(data["people"]))
i= 0
while(i<len(data["people"])):
if(i==0):
name = data["people"][i]['name']
speech_output = "{} Their names are: {}, ".format(speech_output,name)
i+=1
elif(i==len(data["people"])-1):
name = data["people"][i]['name']
speech_output = "{} and {}.".format(speech_output,name)
i+=1
else:
name = data["people"][i]['name']
speech_output = "{} {},".format(speech_output,name)
i+=1
return (
handler_input.response_builder
.speak(speech_output)
.response
)
class CancelOrStopIntentHandler(AbstractRequestHandler):
def can_handle(self, handler_input):
return (is_intent_name("AMAZON.CancelIntent")(handler_input) or
is_intent_name("AMAZON.StopIntent")(handler_input))
def handle(self, handler_input):
speech_output = "This is the cancel message."
return (
handler_input.response_builder
.speak(speech_output)
.set_should_end_session(True)
.response
)
class HelpIntentHandler(AbstractRequestHandler):
def can_handle(self, handler_input):
return is_intent_name("AMAZON.HelpIntent")(handler_input)
def handle(self, handler_input):
speech_output = "This is the help response."
reprompt = "This is the help reprompt."
return (
handler_input.response_builder
.speak(speech_output)
.ask(reprompt)
.response
)
# This function handles utterances that can't be matched to any other intent handler.
class FallbackIntentHandler(AbstractRequestHandler):
def can_handle(self, handler_input):
return is_intent_name("AMAZON.FallbackIntent")(handler_input)
def handle(self, handler_input):
speech_output = "This is the fallback response."
reprompt = "This is the fallback reprompt."
return (
handler_input.response_builder
.speak(speech_output)
.ask(reprompt)
.response
)
class SessionEndedRequesthandler(AbstractRequestHandler):
def can_handle(self, handler_input):
return is_request_type("SessionEndedRequest")(handler_input)
def handle(self, handler_input):
logger.info("Session ended with the reason: {}".format(handler_input.request_envelope.request.reason))
return handler_input.response_builder.response
# This function handles syntax or routing errors. If you receive an error stating the request
# handler is not found, you have not implemented a handler for the intent or included
# it in the skill builder below
class CatchAllExceptionHandler(AbstractExceptionHandler):
def can_handle(self, handler_input, exception):
return True
def handle(self, handler_input, exception):
logger.error(exception, exc_info=True)
speech_output = "Sorry, I couldn't do what you asked. Please try again."
reprompt = "What would you like to do?"
return (
handler_input.response_builder
.speak(speech_output)
.ask(reprompt)
.response
)
class RequestLogger(AbstractRequestInterceptor):
def process(self, handler_input):
logger.debug("Alexa Request: {}".format(
handler_input.request_envelope.request))
class ResponseLogger(AbstractResponseInterceptor):
def process(self, handler_input, response):
logger.debug("Alexa Response: {}".format(response))
sb = SkillBuilder()
sb.add_request_handler(GetRemoteDataIntentHandler())
sb.add_request_handler(CancelOrStopIntentHandler())
sb.add_request_handler(HelpIntentHandler())
sb.add_request_handler(FallbackIntentHandler())
sb.add_request_handler(SessionEndedRequesthandler())
sb.add_exception_handler(CatchAllExceptionHandler())
sb.add_global_request_interceptor(RequestLogger())
sb.add_global_response_interceptor(ResponseLogger())
lambda_handler = sb.lambda_handler()
|
the-stack_106_28590 | import logging
import logging.handlers
import sys
def init():
logging.getLogger().setLevel(logging.NOTSET)
# Add stdout handler, with level INFO
console = logging.StreamHandler(sys.stdout)
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(name)-13s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger().addHandler(console)
# Add file rotating handler, with level DEBUG
rotating_handler = logging.handlers.RotatingFileHandler(filename='temp/rotating.log', maxBytes=10242880,
backupCount=5)
rotating_handler.setLevel(logging.ERROR)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
rotating_handler.setFormatter(formatter)
logging.getLogger().addHandler(rotating_handler)
# return logging
# log = logging.getLogger("app." + __name__)
#
# log.debug('Debug message, should only appear in the file.')
# log.info('Info message, should appear in file and stdout.')
# log.warning('Warning message, should appear in file and stdout. ')
# log.error('Error message, should appear in file and stdout.')
|
the-stack_106_28591 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pdb
# 3rd party imports
import numpy as np
import xarray as xr
__author__ = "Louis Richard"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020-2021"
__license__ = "MIT"
__version__ = "2.3.7"
__status__ = "Prototype"
def _idx_closest(lst0, lst1):
return [(np.abs(np.asarray(lst0) - k)).argmin() for k in lst1]
def eis_skymap_combine_sc(skymaps):
r"""Generate composite skymap from the EIS sensors across the MMS
spacecraft.
Parameters
----------
skymaps : list of xarray.DataArray
Skymap distribution for all spacecraft.
Returns
-------
out : xarray.Dataset
Composite skymap distribution
See Also
--------
pyrfu.mms.get_eis_allt, pyrfu.mms.eis_pad,
pyrfu.mms.eis_spec_combine_sc, pyrfu.mms.eis_spec_combine_sc
"""
# Determine spacecraft with smallest number of time steps to use as
# reference spacecraft
time_size = [len(probe.time.data) for probe in skymaps]
ref_sc_time_size, ref_sc_loc = [np.min(time_size), np.argmin(time_size)]
ref_probe = skymaps[ref_sc_loc]
# Define common energy grid across EIS instruments
n_en_chans = [probe.energy.shape[1] for probe in skymaps]
size_en, loc_ref_en = [np.min(n_en_chans), np.argmin(n_en_chans)]
ref_energy = skymaps[loc_ref_en].energy.data[0, :]
pdb.set_trace()
energy_data, e_plus, e_minu = [[], [], []]
for probe in skymaps:
idx = _idx_closest(probe.energy.data[0, :], ref_energy)
energy_data.append(probe.energy.data[0, idx])
e_minu.append(probe.attrs["energy_dminus"][idx])
e_plus.append(probe.attrs["energy_dplus"][idx])
energy_data = np.stack(energy_data)
common_energy = np.nanmean(energy_data, axis=0)
common_energy = np.tile(common_energy, (ref_sc_time_size, 1))
#
e_minu = np.stack(e_minu)
e_plus = np.stack(e_plus)
common_minu = np.nanmean(e_minu, axis=0)
common_plus = np.nanmean(e_plus, axis=0)
# Use azimuthal and elevation angle from reference spacecraft (in
# practice they are the same for all spacecraft)
phi = ref_probe.phi.data
theta = ref_probe.theta.data
allmms_skymap = np.zeros([ref_sc_time_size, size_en, phi.shape[1],
len(theta), len(skymaps)])
for p, skymap in enumerate(skymaps):
idx_en = _idx_closest(skymap.energy.data[0, :], common_energy[0, :])
allmms_skymap[..., p] = skymap.data[:ref_sc_time_size, idx_en, ...]
# Average the four spacecraft
allmms_skymap_avg = np.nanmean(allmms_skymap, axis=-1)
# Create combined skymap
out_dict = {"time": ref_probe.time.data,
"idx0": range(common_energy.shape[1]),
"idx1": range(phi.shape[1]), "idx2": range(len(theta)),
"data": (["time", "idx0", "idx1", "idx2"], allmms_skymap_avg),
"energy": (["time", "idx0"], common_energy),
"phi": (["time", "idx1"], phi), "theta": (["idx2"], theta)}
out = xr.Dataset(out_dict)
out.attrs["energy_dminus"] = common_minu
out.attrs["energy_dplus"] = common_plus
return out
|
the-stack_106_28593 | class Solution:
def maxUncrossedLines(self, A: List[int], B: List[int]) -> int:
m = len(A)
n = len(B)
dp = [[0] * (n + 1) for _ in range(m + 1)]
for i in range(1, m + 1):
for j in range(1, n + 1):
dp[i][j] = dp[i - 1][j - 1] + 1 if A[i - 1] == B[j - 1] \
else max(dp[i - 1][j], dp[i][j - 1])
return dp[m][n]
|
the-stack_106_28595 | import augment
import numpy as np
import torch
import torchaudio
import argparse
import yaml
import os
import random
from tqdm import tqdm
import threading
def aug_pitch(audio, sr=16000, low_pitch=-350, high_pitch=300):
random_pitch_shift = lambda: np.random.randint(low_pitch, high_pitch)
y = augment.EffectChain().pitch(random_pitch_shift).rate(sr).apply(audio, src_info={'rate': sr})
return y
def aug_reverb(audio, sr=16000, max_reverb=100):
y = augment.EffectChain().reverb(random.randint(1, max_reverb),
random.randint(1, max_reverb),
random.randint(1, max_reverb)) \
.channels(1) \
.apply(audio, src_info={'rate': sr})
return y
def aug_dropout(audio, sr=16000, max_time_drop=0.3, max_times=8):
effect = augment.EffectChain()
for _ in range(random.randint(1, max_times)):
effect = effect.time_dropout(max_seconds=max_time_drop)
y = effect.apply(audio, src_info={'rate': sr})
return y
def aug_tempo(audio, sr=16000, tempo=1, min_tempo=0.85, max_tempo=1.15):
if tempo == 1:
while abs(tempo - 1) < 0.01:
tempo = random.uniform(min_tempo, max_tempo)
y = augment.EffectChain().tempo(tempo).apply(audio, src_info={'rate': sr})
return y
def aug_noise(audio, sr=16000, low_noise=5, high_noise=13):
noise_generator = lambda: torch.zeros_like(audio).uniform_()
y = augment.EffectChain().additive_noise(noise_generator, snr=random.randint(low_noise, high_noise)) \
.apply(audio, src_info={'rate': sr})
return y
def aug_sinc(audio, sr=16000, min_sinc=40, max_sinc=180):
sinc = random.randint(min_sinc, max_sinc)
y = augment.EffectChain().sinc('-a', str(sinc), '500-100').apply(audio, src_info={'rate': sr})
return y
def aug_gain(audio, sr=16000, volume=25):
gain_volume = 0
while gain_volume == 0:
gain_volume = random.randint(-volume, volume)
y = augment.EffectChain().gain(gain_volume).apply(audio, src_info={'rate': sr})
return y
def aug_combination(audio, sr=16000, path=None):
# effect_list = [aug_pitch, aug_reverb, aug_dropout, aug_tempo, aug_noise, aug_sinc, aug_gain]
effect_list = [aug_pitch, aug_reverb, aug_dropout, aug_tempo, aug_sinc]
num_to_select = random.randint(3, len(effect_list))
effects = random.sample(effect_list, num_to_select)
for effect in effects:
audio = effect(audio, sr=sr)
torchaudio.save(path, audio, sr, format='mp3')
def main(config):
random.seed(1234)
temp_dir = config["path"]["temp_dir"]
subs = ["hum", "song"]
for sub in subs:
sound_path = os.path.join(temp_dir, "train", sub)
tries = config["tries"]
aug_path = os.path.join(temp_dir, 'augment', 'train', sub)
os.makedirs(aug_path, exist_ok=True)
thds = []
for file in tqdm(os.listdir(sound_path)):
audio, sr = torchaudio.load(os.path.join(sound_path, file))
for i in range(tries):
filename = file[:-4] + "_aug" + str(i) + file[-4:]
t1 = threading.Thread(target=aug_combination, args=(audio, sr, os.path.join(aug_path, filename),))
thds.append(t1)
t1.start()
for t in thds:
t.join()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--config", type=str, required=False,
default="config/preprocess.yaml",
help="path to preprocess.yaml")
parser.add_argument("--tempdir", type=str, required=False, help="path to input/outdir")
parser.add_argument("--tries", type=int, default=5, required=False, help="number of tries")
args = parser.parse_args()
config = yaml.load(open(args.config, "r"), Loader=yaml.FullLoader)
config["tries"] = args.tries
if args.tempdir is not None:
config["path"]["temp_dir"] = args.tempdir
main(config) |
the-stack_106_28597 | from modules.ImageCreator import ImageCreator
from modules.BamHandler import BamHandler
from modules.FastaHandler import FastaHandler
import os
class Bed2ImageAPI:
"""
Works as a main class and handles user interaction with different modules.
"""
def __init__(self, bam_file_path, reference_file_path):
# --- initialize handlers ---
self.bam_handler = BamHandler(bam_file_path)
self.fasta_handler = FastaHandler(reference_file_path)
@staticmethod
def create_image(bam_handler, fasta_handler, bed_record, output_dir, file_name):
"""
Create an image from a bed record
:param bam_handler: Handles bam file
:param fasta_handler: Handles fasta file
:param bed_record: Bed record
:return: Imagearray, label
"""
chromosome_name, start_position, end_position, ref, alts, genotype, qual, g_filter, in_conf = \
tuple(bed_record.rstrip().split('\t'))
start_position = int(start_position)
genotype = int(genotype)
pileups = bam_handler.get_pileupcolumns_aligned_to_a_site(chromosome_name, start_position)
image_creator = ImageCreator(fasta_handler, pileups, chromosome_name, start_position, genotype, alts)
image_array, image_shape = image_creator.create_image(start_position, ref, alts)
image_creator.save_image_as_png(image_array, output_dir, file_name)
return image_array, genotype, image_shape |
the-stack_106_28598 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import os.path
import shutil
from glob import glob
import sys
import setuptools
from setuptools import Extension
from setuptools.command.build_ext import build_ext
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "pythia"))
with open("README.md", encoding="utf8") as f:
readme = f.read()
with open("LICENSE") as f:
license = f.read()
with open("requirements.txt") as f:
reqs = f.read()
DISTNAME = "pythia"
DESCRIPTION = "pythia: a modular framework for vision and language multimodal \
research."
LONG_DESCRIPTION = readme
AUTHOR = "Facebook AI Research"
LICENSE = license
REQUIREMENTS = (reqs.strip().split("\n"),)
ext_modules = [
Extension(
'cphoc',
sources=['pythia/utils/phoc/src/cphoc.c'],
language='c',
libraries=["pthread", "dl", "util", "rt", "m"],
extra_compile_args=["-O3"],
),
]
class BuildExt(build_ext):
def run(self):
build_ext.run(self)
cphoc_lib = glob('build/lib.*/cphoc.*.so')[0]
shutil.copy(cphoc_lib, 'pythia/utils/phoc/cphoc.so')
if __name__ == "__main__":
setuptools.setup(
name=DISTNAME,
install_requires=REQUIREMENTS,
packages=setuptools.find_packages(),
ext_modules=ext_modules,
cmdclass={'build_ext': BuildExt},
version="0.3",
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
license=LICENSE,
setup_requires=["pytest-runner"],
tests_require=["flake8", "pytest"],
)
|
the-stack_106_28599 | # -*- coding: utf-8 -*-
# Copyright (c) 2016-2019 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
from math import pi
from numpy import sign, nan, append, zeros, array, sqrt, where
from numpy import max as max_
from pandas import Series, DataFrame, concat
from pandapower.pypower.idx_gen import GEN_BUS, PMIN, PMAX, QMIN, QMAX, GEN_STATUS
from pandapower.pypower.idx_cost import COST, NCOST
from pandapower.pypower.idx_bus import BUS_I, BASE_KV
import pandapower as pp
try:
import pplog as logging
except ImportError:
import logging
logger = logging.getLogger(__name__)
try:
from pypower import ppoption, runpf, runopf, rundcpf, rundcopf
ppopt = ppoption.ppoption(VERBOSE=0, OUT_ALL=0)
pypower_import = True
except ImportError:
pypower_import = False
ppc_elms = ["bus", "branch", "gen"]
def _create_costs(net, ppc, gen_lookup, type, idx):
if ppc['gencost'][idx, 0] == 1:
if not len(ppc['gencost'][idx, COST:]) == 2*ppc['gencost'][idx, NCOST]:
logger.error("In gencost line %s, the number n does not fit to the number of values" %
idx)
raise NotImplementedError
pp.create_pwl_cost(net, gen_lookup.element.at[idx],
gen_lookup.element_type.at[idx],
ppc['gencost'][idx, 4:], type)
elif ppc['gencost'][idx, 0] == 2:
ncost = ppc['gencost'][idx, NCOST]
if ncost == 2:
cp2 = 0
cp1 = ppc['gencost'][idx, COST]
cp0 = ppc['gencost'][idx, COST + 1]
elif ncost == 3:
cp2 = ppc['gencost'][idx, COST]
cp1 = ppc['gencost'][idx, COST + 1]
cp0 = ppc['gencost'][idx, COST + 2]
pp.create_poly_cost(net, gen_lookup.element.at[idx], gen_lookup.element_type.at[idx],
cp1_eur_per_mw=cp1, cp2_eur_per_mw2=cp2, cp0_eur=cp0)
else:
logger.info("Cost mode of gencost line %s is unknown." % idx)
def _gen_bus_info(ppc, idx_gen):
bus_name = int(ppc["gen"][idx_gen, GEN_BUS])
# assumption: there is only one bus with this bus_name:
idx_bus = int(where(ppc["bus"][:, BUS_I] == bus_name)[0][0])
current_bus_type = int(ppc["bus"][idx_bus, 1])
same_bus_gen_idx = where(ppc["gen"][:, GEN_BUS] == ppc["gen"][idx_gen, GEN_BUS])[0].astype(int)
same_bus_in_service_gen_idx = same_bus_gen_idx[where(ppc["gen"][same_bus_gen_idx, GEN_STATUS] > 0)]
first_same_bus_in_service_gen_idx = same_bus_in_service_gen_idx[0] if len(
same_bus_in_service_gen_idx) else None
last_same_bus_in_service_gen_idx = same_bus_in_service_gen_idx[-1] if len(
same_bus_in_service_gen_idx) else None
return current_bus_type, idx_bus, same_bus_gen_idx, first_same_bus_in_service_gen_idx, \
last_same_bus_in_service_gen_idx
def from_ppc(ppc, f_hz=50, validate_conversion=False, **kwargs):
"""
This function converts pypower case files to pandapower net structure.
INPUT:
**ppc** : The pypower case file.
OPTIONAL:
**f_hz** (float, 50) - The frequency of the network.
**validate_conversion** (bool, False) - If True, validate_from_ppc is run after conversion.
For running the validation, the ppc must already contain the pypower
powerflow results or pypower must be importable.
****kwargs** keyword arguments for validate_from_ppc if validate_conversion is True
OUTPUT:
**net** : pandapower net.
EXAMPLE:
import pandapower.converter as pc
from pypower import case4gs
ppc_net = case4gs.case4gs()
net = pc.from_ppc(ppc_net, f_hz=60)
"""
# --- catch common failures
if Series(ppc['bus'][:, BASE_KV] <= 0).any():
logger.info('There are false baseKV given in the pypower case file.')
# --- general_parameters
baseMVA = ppc['baseMVA'] # MVA
omega = pi * f_hz # 1/s
MAX_VAL = 99999.
net = pp.create_empty_network(f_hz=f_hz, sn_mva=baseMVA)
# --- bus data -> create buses, sgen, load, shunt
for i in range(len(ppc['bus'])):
# create buses
pp.create_bus(net, name=int(ppc['bus'][i, 0]), vn_kv=ppc['bus'][i, 9], type="b",
zone=ppc['bus'][i, 6], in_service=bool(ppc['bus'][i, 1] != 4),
max_vm_pu=ppc['bus'][i, 11], min_vm_pu=ppc['bus'][i, 12])
# create sgen, load
if ppc['bus'][i, 2] > 0:
pp.create_load(net, i, p_mw=ppc['bus'][i, 2], q_mvar=ppc['bus'][i, 3],
controllable=False)
elif ppc['bus'][i, 2] < 0:
pp.create_sgen(net, i, p_mw=-ppc['bus'][i, 2], q_mvar=-ppc['bus'][i, 3],
type="", controllable=False)
elif ppc['bus'][i, 3] != 0:
pp.create_load(net, i, p_mw=ppc['bus'][i, 2], q_mvar=ppc['bus'][i, 3],
controllable=False)
# create shunt
if ppc['bus'][i, 4] != 0 or ppc['bus'][i, 5] != 0:
pp.create_shunt(net, i, p_mw=ppc['bus'][i, 4],
q_mvar=-ppc['bus'][i, 5])
# unused data of ppc: Vm, Va (partwise: in ext_grid), zone
# --- gen data -> create ext_grid, gen, sgen
gen_lookup = DataFrame(nan, columns=['element', 'element_type'],
index=range(len(ppc['gen'][:, 0])))
# if in ppc is only one gen -> numpy initially uses one dim array -> change to two dim array
if len(ppc["gen"].shape) == 1:
ppc["gen"] = array(ppc["gen"], ndmin=2)
for i in range(len(ppc['gen'][:, 0])):
current_bus_type, current_bus_idx, same_bus_gen_idx, first_same_bus_in_service_gen_idx, \
last_same_bus_in_service_gen_idx = _gen_bus_info(ppc, i)
# create ext_grid
if current_bus_type == 3:
if i == first_same_bus_in_service_gen_idx:
gen_lookup.element.loc[i] = pp.create_ext_grid(
net, bus=current_bus_idx, vm_pu=ppc['gen'][last_same_bus_in_service_gen_idx, 5],
va_degree=ppc['bus'][current_bus_idx, 8], in_service=bool(ppc['gen'][i, 7] > 0),
max_p_mw=ppc['gen'][i, PMAX], min_p_mw=ppc['gen'][i, PMIN],
max_q_mvar=ppc['gen'][i, QMAX], min_q_mvar=ppc['gen'][i, QMIN])
gen_lookup.element_type.loc[i] = 'ext_grid'
if ppc['gen'][i, 4] > ppc['gen'][i, 3]:
logger.info('min_q_mvar of gen %d must be less than max_q_mvar but is not.' % i)
if -ppc['gen'][i, 9] < -ppc['gen'][i, 8]:
logger.info('max_p_mw of gen %d must be less than min_p_mw but is not.' % i)
else:
current_bus_type = 1
# create gen
elif current_bus_type == 2:
if i == first_same_bus_in_service_gen_idx:
gen_lookup.element.loc[i] = pp.create_gen(
net, bus=current_bus_idx, vm_pu=ppc['gen'][last_same_bus_in_service_gen_idx, 5],
p_mw=ppc['gen'][i, 1],
in_service=bool(ppc['gen'][i, 7] > 0), controllable=True,
max_p_mw=ppc['gen'][i, PMAX], min_p_mw=ppc['gen'][i, PMIN],
max_q_mvar=ppc['gen'][i, QMAX], min_q_mvar=ppc['gen'][i, QMIN])
gen_lookup.element_type.loc[i] = 'gen'
if ppc['gen'][i, 1] < 0:
logger.info('p_mw of gen %d must be less than zero but is not.' % i)
if ppc['gen'][i, 4] > ppc['gen'][i, 3]:
logger.info('min_q_mvar of gen %d must be less than max_q_mvar but is not.' % i)
if -ppc['gen'][i, 9] < -ppc['gen'][i, 8]:
logger.info('max_p_mw of gen %d must be less than min_p_mw but is not.' % i)
else:
current_bus_type = 1
# create sgen
if current_bus_type == 1:
gen_lookup.element.loc[i] = pp.create_sgen(
net, bus=current_bus_idx, p_mw=ppc['gen'][i, 1],
q_mvar=ppc['gen'][i, 2], type="", in_service=bool(ppc['gen'][i, 7] > 0),
max_p_mw=ppc['gen'][i, PMAX], min_p_mw=ppc['gen'][i, PMIN],
max_q_mvar=ppc['gen'][i, QMAX], min_q_mvar=ppc['gen'][i, QMIN],
controllable=True)
gen_lookup.element_type.loc[i] = 'sgen'
if ppc['gen'][i, 1] < 0:
logger.info('p_mw of sgen %d must be less than zero but is not.' % i)
if ppc['gen'][i, 4] > ppc['gen'][i, 3]:
logger.info('min_q_mvar of gen %d must be less than max_q_mvar but is not.' % i)
if -ppc['gen'][i, 9] < -ppc['gen'][i, 8]:
logger.info('max_p_mw of gen %d must be less than min_p_mw but is not.' % i)
# unused data of ppc: Vg (partwise: in ext_grid and gen), mBase, Pc1, Pc2, Qc1min, Qc1max,
# Qc2min, Qc2max, ramp_agc, ramp_10, ramp_30,ramp_q, apf
# --- branch data -> create line, trafo
for i in range(len(ppc['branch'])):
from_bus = pp.get_element_index(net, 'bus', name=int(ppc['branch'][i, 0]))
to_bus = pp.get_element_index(net, 'bus', name=int(ppc['branch'][i, 1]))
from_vn_kv = ppc['bus'][from_bus, 9]
to_vn_kv = ppc['bus'][to_bus, 9]
if (from_vn_kv == to_vn_kv) & ((ppc['branch'][i, 8] == 0) | (ppc['branch'][i, 8] == 1)) & \
(ppc['branch'][i, 9] == 0): # create line
Zni = ppc['bus'][to_bus, 9]**2/baseMVA # ohm
max_i_ka = ppc['branch'][i, 5]/ppc['bus'][to_bus, 9]/sqrt(3)
if max_i_ka == 0.0:
max_i_ka = MAX_VAL
logger.debug("ppc branch rateA is zero -> Using MAX_VAL instead to calculate " +
"maximum branch flow")
pp.create_line_from_parameters(
net, from_bus=from_bus, to_bus=to_bus, length_km=1,
r_ohm_per_km=ppc['branch'][i, 2]*Zni, x_ohm_per_km=ppc['branch'][i, 3]*Zni,
c_nf_per_km=ppc['branch'][i, 4]/Zni/omega*1e9/2,
max_i_ka=max_i_ka, type='ol', max_loading_percent=100,
in_service=bool(ppc['branch'][i, 10]))
else: # create transformer
if from_vn_kv >= to_vn_kv:
hv_bus = from_bus
vn_hv_kv = from_vn_kv
lv_bus = to_bus
vn_lv_kv = to_vn_kv
tap_side = 'hv'
else:
hv_bus = to_bus
vn_hv_kv = to_vn_kv
lv_bus = from_bus
vn_lv_kv = from_vn_kv
tap_side = 'lv'
if from_vn_kv == to_vn_kv:
logger.warning('The pypower branch %d (from_bus, to_bus)=(%d, %d) is considered'
' as a transformer because of a ratio != 0 | 1 but it connects '
'the same voltage level', i, ppc['branch'][i, 0],
ppc['branch'][i, 1])
rk = ppc['branch'][i, 2]
xk = ppc['branch'][i, 3]
zk = (rk ** 2 + xk ** 2) ** 0.5
sn = ppc['branch'][i, 5]
if sn == 0.0:
sn = MAX_VAL
logger.debug("ppc branch rateA is zero -> Using MAX_VAL instead to calculate " +
"apparent power")
ratio_1 = 0 if ppc['branch'][i, 8] == 0 else (ppc['branch'][i, 8] - 1) * 100
i0_percent = -ppc['branch'][i, 4] * 100 * baseMVA / sn
if i0_percent < 0:
logger.info('A transformer always behaves inductive consumpting but the '
'susceptance of pypower branch %d (from_bus, to_bus)=(%d, %d) is '
'positive.', i, ppc['branch'][i, 0], ppc['branch'][i, 1])
pp.create_transformer_from_parameters(
net, hv_bus=hv_bus, lv_bus=lv_bus, sn_mva=sn, vn_hv_kv=vn_hv_kv,
vn_lv_kv=vn_lv_kv, vk_percent=sign(xk) * zk * sn * 100 / baseMVA,
vkr_percent=rk * sn * 100 / baseMVA, max_loading_percent=100,
pfe_kw=0, i0_percent=i0_percent, shift_degree=ppc['branch'][i, 9],
tap_step_percent=abs(ratio_1) if ratio_1 else nan,
tap_pos=sign(ratio_1) if ratio_1 else nan,
tap_side=tap_side if ratio_1 else None, tap_neutral=0 if ratio_1 else nan)
# unused data of ppc: rateB, rateC
# --- gencost -> create polynomial_cost, piecewise_cost
if 'gencost' in ppc:
if len(ppc['gencost'].shape) == 1:
# reshape gencost if only one gencost is given -> no indexError
ppc['gencost'] = ppc['gencost'].reshape((1, -1))
if ppc['gencost'].shape[0] <= gen_lookup.shape[0]:
idx_p = range(ppc['gencost'].shape[0])
idx_q = []
elif ppc['gencost'].shape[0] > gen_lookup.shape[0]:
idx_p = range(gen_lookup.shape[0])
idx_q = range(gen_lookup.shape[0], ppc['gencost'].shape[0])
if ppc['gencost'].shape[0] >= 2*gen_lookup.shape[0]:
idx_p = range(gen_lookup.shape[0])
idx_q = range(gen_lookup.shape[0], 2*gen_lookup.shape[0])
for idx in idx_p:
_create_costs(net, ppc, gen_lookup, 'p', idx)
for idx in idx_q:
_create_costs(net, ppc, gen_lookup, 'q', idx)
# areas are unconverted
if validate_conversion:
logger.setLevel(logging.DEBUG)
if not validate_from_ppc(ppc, net, **kwargs):
logger.error("Validation failed.")
return net
def _validate_diff_res(diff_res, max_diff_values):
to_iterate = set(max_diff_values.keys()) & {'gen_q_mvar', 'branch_p_mw', 'branch_q_mvar',
'gen_p_mw', 'bus_va_degree', 'bus_vm_pu'}
if not len(to_iterate):
logger.warning("There are no keys to validate.")
val = True
for i in to_iterate:
elm = i.split("_")[0]
sought = ["p", "q"] if elm != "bus" else ["vm", "va"]
col = int(array([0, 1])[[j in i for j in sought]][0]) if elm != "branch" else \
list(array([[0, 2], [1, 3]])[[j in i for j in sought]][0])
val &= bool(max_(abs(diff_res[elm][:, col])) < max_diff_values[i])
return val
def validate_from_ppc(ppc_net, net, pf_type="runpp", max_diff_values={
"bus_vm_pu": 1e-6, "bus_va_degree": 1e-5, "branch_p_mw": 1e-6, "branch_q_mvar": 1e-6,
"gen_p_mw": 1e-6, "gen_q_mvar": 1e-6}, run=True):
"""
This function validates the pypower case files to pandapower net structure conversion via a \
comparison of loadflow calculation results. (Hence the opf cost conversion is not validated.)
INPUT:
**ppc_net** - The pypower case file, which must already contain the pypower powerflow
results or pypower must be importable.
**net** - The pandapower network.
OPTIONAL:
**pf_type** ("runpp", string) - Type of validated power flow. Possible are ("runpp",
"rundcpp", "runopp", "rundcopp")
**max_diff_values** - Dict of maximal allowed difference values. The keys must be
'vm_pu', 'va_degree', 'p_branch_mw', 'q_branch_mvar', 'p_gen_mw' and 'q_gen_mvar' and
the values floats.
**run** (True, bool or list of two bools) - changing the value to False avoids trying to run
(optimal) loadflows. Giving a list of two bools addresses first pypower and second
pandapower.
OUTPUT:
**conversion_success** - conversion_success is returned as False if pypower or pandapower
cannot calculate a powerflow or if the maximum difference values (max_diff_values )
cannot be hold.
EXAMPLE:
import pandapower.converter as pc
net = cv.from_ppc(ppc_net, f_hz=50)
conversion_success = cv.validate_from_ppc(ppc_net, net)
NOTE:
The user has to take care that the loadflow results already are included in the provided \
ppc_net or pypower is importable.
"""
# check in case of optimal powerflow comparison whether cost information exist
if "opp" in pf_type:
if not (len(net.polynomial_cost) | len(net.piecewise_linear_cost)):
if "gencost" in ppc_net:
if not len(ppc_net["gencost"]):
logger.debug('ppc and pandapower net do not include cost information.')
return True
else:
logger.error('The pandapower net does not include cost information.')
return False
else:
logger.debug('ppc and pandapower net do not include cost information.')
return True
# guarantee run parameter as list, for pypower and pandapower (optimal) powerflow run
run = [run, run] if isinstance(run, bool) else run
# --- check pypower powerflow success, if possible
if pypower_import and run[0]:
try:
if pf_type == "runpp":
ppc_net = runpf.runpf(ppc_net, ppopt)[0]
elif pf_type == "rundcpp":
ppc_net = rundcpf.rundcpf(ppc_net, ppopt)[0]
elif pf_type == "runopp":
ppc_net = runopf.runopf(ppc_net, ppopt)
elif pf_type == "rundcopp":
ppc_net = rundcopf.rundcopf(ppc_net, ppopt)
else:
raise ValueError("The pf_type %s is unknown" % pf_type)
except:
logger.debug("The pypower run did not work.")
ppc_success = True
if 'success' in ppc_net.keys():
if ppc_net['success'] != 1:
ppc_success = False
logger.error("The given ppc data indicates an unsuccessful pypower powerflow: " +
"'ppc_net['success'] != 1'")
if (ppc_net['branch'].shape[1] < 17):
ppc_success = False
logger.error("The shape of given ppc data indicates missing pypower powerflow results.")
# --- try to run a pandapower powerflow
if run[1]:
if pf_type == "runpp":
try:
pp.runpp(net, init="dc", calculate_voltage_angles=True, trafo_model="pi")
except pp.LoadflowNotConverged:
try:
pp.runpp(net, calculate_voltage_angles=True, init="flat", trafo_model="pi")
except pp.LoadflowNotConverged:
try:
pp.runpp(net, trafo_model="pi", calculate_voltage_angles=False)
if "bus_va_degree" in max_diff_values.keys():
max_diff_values["bus_va_degree"] = 1e2 if max_diff_values[
"bus_va_degree"] < 1e2 else max_diff_values["bus_va_degree"]
logger.info("voltage_angles could be calculated.")
except pp.LoadflowNotConverged:
logger.error('The pandapower powerflow does not converge.')
elif pf_type == "rundcpp":
try:
pp.rundcpp(net, trafo_model="pi")
except pp.LoadflowNotConverged:
logger.error('The pandapower dc powerflow does not converge.')
elif pf_type == "runopp":
try:
pp.runopp(net, init="flat", calculate_voltage_angles=True)
except pp.OPFNotConverged:
try:
pp.runopp(net, init="pf", calculate_voltage_angles=True)
except (pp.OPFNotConverged, pp.LoadflowNotConverged, KeyError):
try:
pp.runopp(net, init="flat", calculate_voltage_angles=False)
logger.info("voltage_angles could be calculated.")
if "bus_va_degree" in max_diff_values.keys():
max_diff_values["bus_va_degree"] = 1e2 if max_diff_values[
"bus_va_degree"] < 1e2 else max_diff_values["bus_va_degree"]
except pp.OPFNotConverged:
try:
pp.runopp(net, init="pf", calculate_voltage_angles=False)
if "bus_va_degree" in max_diff_values.keys():
max_diff_values["bus_va_degree"] = 1e2 if max_diff_values[
"bus_va_degree"] < 1e2 else max_diff_values["bus_va_degree"]
logger.info("voltage_angles could be calculated.")
except (pp.OPFNotConverged, pp.LoadflowNotConverged, KeyError):
logger.error('The pandapower optimal powerflow does not converge.')
elif pf_type == "rundcopp":
try:
pp.rundcopp(net)
except pp.LoadflowNotConverged:
logger.error('The pandapower dc optimal powerflow does not converge.')
else:
raise ValueError("The pf_type %s is unknown" % pf_type)
# --- prepare powerflow result comparison by reordering pp results as they are in ppc results
if not ppc_success:
return False
if "opp" in pf_type:
if not net.OPF_converged:
return
elif not net.converged:
return False
# --- store pypower powerflow results
ppc_res = dict.fromkeys(ppc_elms)
ppc_res["branch"] = ppc_net['branch'][:, 13:17]
ppc_res["bus"] = ppc_net['bus'][:, 7:9]
ppc_res["gen"] = ppc_net['gen'][:, 1:3]
# --- pandapower bus result table
pp_res = dict.fromkeys(ppc_elms)
pp_res["bus"] = array(net.res_bus.sort_index()[['vm_pu', 'va_degree']])
# --- pandapower gen result table
pp_res["gen"] = zeros([1, 2])
# consideration of parallel generators via storing how much generators have been considered
# each node
# if in ppc is only one gen -> numpy initially uses one dim array -> change to two dim array
if len(ppc_net["gen"].shape) == 1:
ppc_net["gen"] = array(ppc_net["gen"], ndmin=2)
GENS = DataFrame(ppc_net['gen'][:, [0]].astype(int))
GEN_uniq = GENS.drop_duplicates()
already_used_gen = Series(zeros(GEN_uniq.shape[0]).astype(int),
index=[int(v) for v in GEN_uniq.values])
change_q_compare = []
for i, j in GENS.iterrows():
current_bus_type, current_bus_idx, same_bus_gen_idx, first_same_bus_in_service_gen_idx, \
last_same_bus_in_service_gen_idx = _gen_bus_info(ppc_net, i)
if current_bus_type == 3 and i == first_same_bus_in_service_gen_idx:
pp_res["gen"] = append(pp_res["gen"], array(net.res_ext_grid[
net.ext_grid.bus == current_bus_idx][['p_mw', 'q_mvar']]).reshape((1, 2)), 0)
elif current_bus_type == 2 and i == first_same_bus_in_service_gen_idx:
pp_res["gen"] = append(pp_res["gen"], array(net.res_gen[
net.gen.bus == current_bus_idx][['p_mw', 'q_mvar']]).reshape((1, 2)), 0)
else:
pp_res["gen"] = append(pp_res["gen"], array(net.res_sgen[
net.sgen.bus == current_bus_idx][['p_mw', 'q_mvar']])[
already_used_gen.at[int(j)]].reshape((1, 2)), 0)
already_used_gen.at[int(j)] += 1
change_q_compare += [int(j)]
pp_res["gen"] = pp_res["gen"][1:, :] # delete initial zero row
# --- pandapower branch result table
pp_res["branch"] = zeros([1, 4])
# consideration of parallel branches via storing how often branches were considered
# each node-to-node-connection
try:
init1 = concat([net.line.from_bus, net.line.to_bus], axis=1,
sort=True).drop_duplicates()
init2 = concat([net.trafo.hv_bus, net.trafo.lv_bus], axis=1,
sort=True).drop_duplicates()
except TypeError:
# legacy pandas < 0.21
init1 = concat([net.line.from_bus, net.line.to_bus], axis=1).drop_duplicates()
init2 = concat([net.trafo.hv_bus, net.trafo.lv_bus], axis=1).drop_duplicates()
init1['hv_bus'] = nan
init1['lv_bus'] = nan
init2['from_bus'] = nan
init2['to_bus'] = nan
try:
already_used_branches = concat([init1, init2], axis=0, sort=True)
except TypeError:
# pandas < 0.21 legacy
already_used_branches = concat([init1, init2], axis=0)
already_used_branches['number'] = zeros([already_used_branches.shape[0], 1]).astype(int)
BRANCHES = DataFrame(ppc_net['branch'][:, [0, 1, 8, 9]])
for i in BRANCHES.index:
from_bus = pp.get_element_index(net, 'bus', name=int(ppc_net['branch'][i, 0]))
to_bus = pp.get_element_index(net, 'bus', name=int(ppc_net['branch'][i, 1]))
from_vn_kv = ppc_net['bus'][from_bus, 9]
to_vn_kv = ppc_net['bus'][to_bus, 9]
ratio = BRANCHES[2].at[i]
angle = BRANCHES[3].at[i]
# from line results
if (from_vn_kv == to_vn_kv) & ((ratio == 0) | (ratio == 1)) & (angle == 0):
pp_res["branch"] = append(pp_res["branch"], array(net.res_line[
(net.line.from_bus == from_bus) &
(net.line.to_bus == to_bus)]
[['p_from_mw', 'q_from_mvar', 'p_to_mw', 'q_to_mvar']])[
int(already_used_branches.number.loc[
(already_used_branches.from_bus == from_bus) &
(already_used_branches.to_bus == to_bus)].values)].reshape(1, 4), 0)
already_used_branches.number.loc[(already_used_branches.from_bus == from_bus) &
(already_used_branches.to_bus == to_bus)] += 1
# from trafo results
else:
if from_vn_kv >= to_vn_kv:
pp_res["branch"] = append(pp_res["branch"], array(net.res_trafo[
(net.trafo.hv_bus == from_bus) &
(net.trafo.lv_bus == to_bus)]
[['p_hv_mw', 'q_hv_mvar', 'p_lv_mw', 'q_lv_mvar']])[
int(already_used_branches.number.loc[
(already_used_branches.hv_bus == from_bus) &
(already_used_branches.lv_bus == to_bus)].values)].reshape(1, 4), 0)
already_used_branches.number.loc[(already_used_branches.hv_bus == from_bus) &
(already_used_branches.lv_bus == to_bus)] += 1
else: # switch hv-lv-connection of pypower connection buses
pp_res["branch"] = append(pp_res["branch"], array(net.res_trafo[
(net.trafo.hv_bus == to_bus) &
(net.trafo.lv_bus == from_bus)]
[['p_lv_mw', 'q_lv_mvar', 'p_hv_mw', 'q_hv_mvar']])[
int(already_used_branches.number.loc[
(already_used_branches.hv_bus == to_bus) &
(already_used_branches.lv_bus == from_bus)].values)].reshape(1, 4), 0)
already_used_branches.number.loc[
(already_used_branches.hv_bus == to_bus) &
(already_used_branches.lv_bus == from_bus)] += 1
pp_res["branch"] = pp_res["branch"][1:, :] # delete initial zero row
# --- do the powerflow result comparison
diff_res = dict.fromkeys(ppc_elms)
diff_res["bus"] = ppc_res["bus"] - pp_res["bus"]
diff_res["bus"][:, 1] -= diff_res["bus"][0, 1] # remove va_degree offset
diff_res["branch"] = ppc_res["branch"] - pp_res["branch"]
diff_res["gen"] = ppc_res["gen"] - pp_res["gen"]
# comparison of buses with several generator units only as q sum
for i in GEN_uniq.loc[GEN_uniq[0].isin(change_q_compare)].index:
next_is = GEN_uniq.index[GEN_uniq.index > i]
if len(next_is) > 0:
next_i = next_is[0]
else:
next_i = GENS.index[-1] + 1
if (next_i - i) > 1:
diff_res["gen"][i:next_i, 1] = sum(diff_res["gen"][i:next_i, 1])
# logger info
logger.debug("Maximum voltage magnitude difference between pypower and pandapower: "
"%.2e pu" % max_(abs(diff_res["bus"][:, 0])))
logger.debug("Maximum voltage angle difference between pypower and pandapower: "
"%.2e degree" % max_(abs(diff_res["bus"][:, 1])))
logger.debug("Maximum branch flow active power difference between pypower and pandapower: "
"%.2e kW" % max_(abs(diff_res["branch"][:, [0, 2]] * 1e3)))
logger.debug("Maximum branch flow reactive power difference between pypower and "
"pandapower: %.2e MVAr" % max_(abs(diff_res["branch"][:, [1, 3]])))
logger.debug("Maximum active power generation difference between pypower and pandapower: "
"%.2e MW" % max_(abs(diff_res["gen"][:, 0])))
logger.debug("Maximum reactive power generation difference between pypower and pandapower: "
"%.2e kVAr" % max_(abs(diff_res["gen"][:, 1] * 1e3)))
if _validate_diff_res(diff_res, {"bus_vm_pu": 1e-3, "bus_va_degree": 1e-3, "branch_p_mw": 1e-6,
"branch_q_mvar": 1e-6}) and \
(max_(abs(diff_res["gen"])) > 1e-1).any():
logger.debug("The active/reactive power generation difference possibly results "
"because of a pypower error. Please validate "
"the results via pypower loadflow.") # this occurs e.g. at ppc case9
# give a return
if isinstance(max_diff_values, dict):
return _validate_diff_res(diff_res, max_diff_values)
else:
logger.debug("'max_diff_values' must be a dict.")
|
the-stack_106_28600 | # This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Codon tables based on those from the NCBI.
These tables are based on parsing the NCBI file
ftp://ftp.ncbi.nih.gov/entrez/misc/data/gc.prt
using Scripts/update_ncbi_codon_table.py
Last updated at Version 4.0
"""
from __future__ import print_function
from Bio import Alphabet
from Bio.Alphabet import IUPAC
from Bio.Data import IUPACData
unambiguous_dna_by_name = {}
unambiguous_dna_by_id = {}
unambiguous_rna_by_name = {}
unambiguous_rna_by_id = {}
generic_by_name = {} # unambiguous DNA or RNA
generic_by_id = {} # unambiguous DNA or RNA
ambiguous_dna_by_name = {}
ambiguous_dna_by_id = {}
ambiguous_rna_by_name = {}
ambiguous_rna_by_id = {}
ambiguous_generic_by_name = {} # ambiguous DNA or RNA
ambiguous_generic_by_id = {} # ambiguous DNA or RNA
# standard IUPAC unambiguous codons
standard_dna_table = None
standard_rna_table = None
# In the future, the back_table could return a statistically
# appropriate distribution of codons, so do not cache the results of
# back_table lookups!
class TranslationError(Exception):
pass
class CodonTable(object):
"""A codon-table, or genetic code."""
nucleotide_alphabet = Alphabet.generic_nucleotide
protein_alphabet = Alphabet.generic_protein
forward_table = {} # only includes codons which actually code
back_table = {} # for back translations
start_codons = []
stop_codons = []
# Not always called from derived classes!
def __init__(self, nucleotide_alphabet=nucleotide_alphabet,
protein_alphabet=protein_alphabet,
forward_table=forward_table, back_table=back_table,
start_codons=start_codons, stop_codons=stop_codons):
"""Initialize the class."""
self.nucleotide_alphabet = nucleotide_alphabet
self.protein_alphabet = protein_alphabet
self.forward_table = forward_table
self.back_table = back_table
self.start_codons = start_codons
self.stop_codons = stop_codons
def __str__(self):
"""Returns a simple text representation of the codon table.
e.g.
>>> import Bio.Data.CodonTable
>>> print(Bio.Data.CodonTable.standard_dna_table)
>>> print(Bio.Data.CodonTable.generic_by_id[1])
"""
if self.id:
answer = "Table %i" % self.id
else:
answer = "Table ID unknown"
if self.names:
answer += " " + ", ".join([x for x in self.names if x])
# Use the main four letters (and the conventional ordering)
# even for ambiguous tables
letters = self.nucleotide_alphabet.letters
if isinstance(self.nucleotide_alphabet, Alphabet.DNAAlphabet) \
or (letters is not None and "T" in letters):
letters = "TCAG"
else:
# Should be either RNA or generic nucleotides,
# e.g. Bio.Data.CodonTable.generic_by_id[1]
letters = "UCAG"
# Build the table...
answer += "\n\n |" + "|".join(" %s " % c2 for c2 in letters) + "|"
answer += "\n--+" + "+".join("---------" for c2 in letters) + "+--"
for c1 in letters:
for c3 in letters:
line = c1 + " |"
for c2 in letters:
codon = c1 + c2 + c3
line += " %s" % codon
if codon in self.stop_codons:
line += " Stop|"
else:
try:
amino = self.forward_table[codon]
except KeyError:
amino = "?"
except TranslationError:
amino = "?"
if codon in self.start_codons:
line += " %s(s)|" % amino
else:
line += " %s |" % amino
line += " " + c3
answer += "\n" + line
answer += "\n--+" + "+".join("---------" for c2 in letters) + "+--"
return answer
def make_back_table(table, default_stop_codon):
"""Back a back-table (naive single codon mapping).
ONLY RETURNS A SINGLE CODON, chosen from the possible alternatives
based on their sort order.
"""
# Do the sort so changes in the hash implementation won't affect
# the result when one amino acid is coded by more than one codon.
back_table = {}
for key in sorted(table):
back_table[table[key]] = key
back_table[None] = default_stop_codon
return back_table
class NCBICodonTable(CodonTable):
nucleotide_alphabet = Alphabet.generic_nucleotide
protein_alphabet = IUPAC.protein
def __init__(self, id, names, table, start_codons, stop_codons):
"""Initialize the class."""
self.id = id
self.names = names
self.forward_table = table
self.back_table = make_back_table(table, stop_codons[0])
self.start_codons = start_codons
self.stop_codons = stop_codons
class NCBICodonTableDNA(NCBICodonTable):
nucleotide_alphabet = IUPAC.unambiguous_dna
class NCBICodonTableRNA(NCBICodonTable):
nucleotide_alphabet = IUPAC.unambiguous_rna
# ######## Deal with ambiguous forward translations
class AmbiguousCodonTable(CodonTable):
def __init__(self, codon_table,
ambiguous_nucleotide_alphabet,
ambiguous_nucleotide_values,
ambiguous_protein_alphabet,
ambiguous_protein_values):
"""Initialize the class."""
CodonTable.__init__(self,
ambiguous_nucleotide_alphabet,
ambiguous_protein_alphabet,
AmbiguousForwardTable(codon_table.forward_table,
ambiguous_nucleotide_values,
ambiguous_protein_values),
codon_table.back_table,
# These two are WRONG! I need to get the
# list of ambiguous codons which code for
# the stop codons XXX
list_ambiguous_codons(codon_table.start_codons, ambiguous_nucleotide_values),
list_ambiguous_codons(codon_table.stop_codons, ambiguous_nucleotide_values)
)
self._codon_table = codon_table
# Be sneaky and forward attribute lookups to the original table.
# This lets us get the names, if the original table is an NCBI
# table.
def __getattr__(self, name):
return getattr(self._codon_table, name)
def list_possible_proteins(codon, forward_table, ambiguous_nucleotide_values):
c1, c2, c3 = codon
x1 = ambiguous_nucleotide_values[c1]
x2 = ambiguous_nucleotide_values[c2]
x3 = ambiguous_nucleotide_values[c3]
possible = {}
stops = []
for y1 in x1:
for y2 in x2:
for y3 in x3:
try:
possible[forward_table[y1 + y2 + y3]] = 1
except KeyError:
# If tripping over a stop codon
stops.append(y1 + y2 + y3)
if stops:
if possible:
raise TranslationError("ambiguous codon %r codes for both"
" proteins and stop codons" % codon)
# This is a true stop codon - tell the caller about it
raise KeyError(codon)
return list(possible)
def list_ambiguous_codons(codons, ambiguous_nucleotide_values):
"""Extends a codon list to include all possible ambigous codons.
e.g.::
['TAG', 'TAA'] -> ['TAG', 'TAA', 'TAR']
['UAG', 'UGA'] -> ['UAG', 'UGA', 'URA']
Note that ['TAG', 'TGA'] -> ['TAG', 'TGA'], this does not add 'TRR'.
Thus only two more codons are added in the following:
e.g.::
['TGA', 'TAA', 'TAG'] -> ['TGA', 'TAA', 'TAG', 'TRA', 'TAR']
Returns a new (longer) list of codon strings.
"""
# Note ambiguous_nucleotide_values['R'] = 'AG' (etc)
# This will generate things like 'TRR' from ['TAG', 'TGA'], which
# we don't want to include:
c1_list = sorted(letter for (letter, meanings)
in ambiguous_nucleotide_values.items()
if set(codon[0] for codon in codons).issuperset(set(meanings)))
c2_list = sorted(letter for (letter, meanings)
in ambiguous_nucleotide_values.items()
if set(codon[1] for codon in codons).issuperset(set(meanings)))
c3_list = sorted(letter for (letter, meanings)
in ambiguous_nucleotide_values.items()
if set(codon[2] for codon in codons).issuperset(set(meanings)))
# candidates is a list (not a set) to preserve the iteration order
candidates = []
for c1 in c1_list:
for c2 in c2_list:
for c3 in c3_list:
codon = c1 + c2 + c3
if codon not in candidates and codon not in codons:
candidates.append(codon)
answer = codons[:] # copy
# print "Have %i new candidates" % len(candidates)
for ambig_codon in candidates:
wanted = True
# e.g. 'TRR' -> 'TAA', 'TAG', 'TGA', 'TGG'
for codon in [c1 + c2 + c3
for c1 in ambiguous_nucleotide_values[ambig_codon[0]]
for c2 in ambiguous_nucleotide_values[ambig_codon[1]]
for c3 in ambiguous_nucleotide_values[ambig_codon[2]]]:
if codon not in codons:
# This ambiguous codon can code for a non-stop, exclude it!
wanted = False
# print "Rejecting %s" % ambig_codon
continue
if wanted:
answer.append(ambig_codon)
return answer
assert list_ambiguous_codons(['TGA', 'TAA'], IUPACData.ambiguous_dna_values) == ['TGA', 'TAA', 'TRA']
assert list_ambiguous_codons(['TAG', 'TGA'], IUPACData.ambiguous_dna_values) == ['TAG', 'TGA']
assert list_ambiguous_codons(['TAG', 'TAA'], IUPACData.ambiguous_dna_values) == ['TAG', 'TAA', 'TAR']
assert list_ambiguous_codons(['UAG', 'UAA'], IUPACData.ambiguous_rna_values) == ['UAG', 'UAA', 'UAR']
assert list_ambiguous_codons(['TGA', 'TAA', 'TAG'],
IUPACData.ambiguous_dna_values) == ['TGA', 'TAA', 'TAG', 'TAR', 'TRA']
# Forward translation is "onto", that is, any given codon always maps
# to the same protein, or it doesn't map at all. Thus, I can build
# off of an existing table to produce the ambiguous mappings.
#
# This handles the general case. Perhaps it's overkill?
# >>> t = CodonTable.ambiguous_dna_by_id[1]
# >>> t.forward_table["AAT"]
# 'N'
# >>> t.forward_table["GAT"]
# 'D'
# >>> t.forward_table["RAT"]
# 'B'
# >>> t.forward_table["YTA"]
# 'L'
class AmbiguousForwardTable(object):
def __init__(self, forward_table, ambiguous_nucleotide, ambiguous_protein):
"""Initialize the class."""
self.forward_table = forward_table
self.ambiguous_nucleotide = ambiguous_nucleotide
self.ambiguous_protein = ambiguous_protein
inverted = {}
for name, val in ambiguous_protein.items():
for c in val:
x = inverted.get(c, {})
x[name] = 1
inverted[c] = x
for name, val in inverted.items():
inverted[name] = list(val)
self._inverted = inverted
self._cache = {}
def get(self, codon, failobj=None):
try:
return self.__getitem__(codon)
except KeyError:
return failobj
def __getitem__(self, codon):
try:
x = self._cache[codon]
except KeyError:
pass
else:
if x is TranslationError:
raise TranslationError(codon) # no unique translation
if x is KeyError:
raise KeyError(codon) # it's a stop codon
return x
try:
x = self.forward_table[codon]
self._cache[codon] = x
return x
except KeyError:
pass
# XXX Need to make part of this into a method which returns
# a list of all possible encodings for a codon!
try:
possible = list_possible_proteins(codon,
self.forward_table,
self.ambiguous_nucleotide)
except KeyError:
self._cache[codon] = KeyError
raise KeyError(codon) # stop codon
except TranslationError:
self._cache[codon] = TranslationError
raise TranslationError(codon) # does not code
assert len(possible) > 0, "unambiguous codons must code"
# Hah! Only one possible protein, so use it
if len(possible) == 1:
self._cache[codon] = possible[0]
return possible[0]
# See if there's an ambiguous protein encoding for the multiples.
# Find residues which exist in every coding set.
ambiguous_possible = {}
for amino in possible:
for term in self._inverted[amino]:
ambiguous_possible[term] = ambiguous_possible.get(term, 0) + 1
n = len(possible)
possible = []
for amino, val in ambiguous_possible.items():
if val == n:
possible.append(amino)
# No amino acid encoding for the results
if len(possible) == 0:
self._cache[codon] = TranslationError
raise TranslationError(codon) # no valid translation
# All of these are valid, so choose one
# To be unique, sort by smallet ambiguity then alphabetically
# Can get this if "X" encodes for everything.
# def _sort(x, y, table = self.ambiguous_protein):
# a = cmp(len(table[x]), len(table[y]))
# if a == 0:
# return cmp(x, y)
# return a
# Sort by key is 2.x and 3.x compatible
possible.sort(key=lambda x: (len(self.ambiguous_protein[x]), x))
x = possible[0]
self._cache[codon] = x
return x
def register_ncbi_table(name, alt_name, id,
table, start_codons, stop_codons):
"""Turns codon table data into objects, and stores them in the dictionaries (PRIVATE)."""
# In most cases names are divided by "; ", however there is also
# Table 11 'Bacterial, Archaeal and Plant Plastid Code', previously
# 'Bacterial and Plant Plastid' which used to be just 'Bacterial'
names = [x.strip() for x in name.replace(" and ", "; ").replace(", ", "; ").split("; ")]
dna = NCBICodonTableDNA(id, names + [alt_name], table, start_codons,
stop_codons)
ambig_dna = AmbiguousCodonTable(dna,
IUPAC.ambiguous_dna,
IUPACData.ambiguous_dna_values,
IUPAC.extended_protein,
IUPACData.extended_protein_values)
# replace all T's with U's for the RNA tables
rna_table = {}
generic_table = {}
for codon, val in table.items():
generic_table[codon] = val
codon = codon.replace("T", "U")
generic_table[codon] = val
rna_table[codon] = val
rna_start_codons = []
generic_start_codons = []
for codon in start_codons:
generic_start_codons.append(codon)
codon = codon.replace("T", "U")
generic_start_codons.append(codon)
rna_start_codons.append(codon)
rna_stop_codons = []
generic_stop_codons = []
for codon in stop_codons:
generic_stop_codons.append(codon)
codon = codon.replace("T", "U")
generic_stop_codons.append(codon)
rna_stop_codons.append(codon)
generic = NCBICodonTable(id, names + [alt_name], generic_table,
generic_start_codons, generic_stop_codons)
# The following isn't very elegant, but seems to work nicely.
_merged_values = dict(IUPACData.ambiguous_rna_values.items())
_merged_values["T"] = "U"
ambig_generic = AmbiguousCodonTable(generic,
Alphabet.NucleotideAlphabet(),
_merged_values,
IUPAC.extended_protein,
IUPACData.extended_protein_values)
rna = NCBICodonTableRNA(id, names + [alt_name], rna_table,
rna_start_codons, rna_stop_codons)
ambig_rna = AmbiguousCodonTable(rna,
IUPAC.ambiguous_rna,
IUPACData.ambiguous_rna_values,
IUPAC.extended_protein,
IUPACData.extended_protein_values)
if id == 1:
global standard_dna_table, standard_rna_table
standard_dna_table = dna
standard_rna_table = rna
unambiguous_dna_by_id[id] = dna
unambiguous_rna_by_id[id] = rna
generic_by_id[id] = generic
ambiguous_dna_by_id[id] = ambig_dna
ambiguous_rna_by_id[id] = ambig_rna
ambiguous_generic_by_id[id] = ambig_generic
if alt_name is not None:
names.append(alt_name)
for name in names:
unambiguous_dna_by_name[name] = dna
unambiguous_rna_by_name[name] = rna
generic_by_name[name] = generic
ambiguous_dna_by_name[name] = ambig_dna
ambiguous_rna_by_name[name] = ambig_rna
ambiguous_generic_by_name[name] = ambig_generic
##########################################################################
# Start of auto-generated output from Scripts/update_ncbi_codon_table.py #
##########################################################################
register_ncbi_table(name='Standard',
alt_name='SGC0', id=1,
table={
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TGT': 'C', 'TGC': 'C', 'TGG': 'W', 'CTT': 'L', 'CTC': 'L',
'CTA': 'L', 'CTG': 'L', 'CCT': 'P', 'CCC': 'P', 'CCA': 'P',
'CCG': 'P', 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q', 'CAG': 'Q',
'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R', 'ATT': 'I',
'ATC': 'I', 'ATA': 'I', 'ATG': 'M', 'ACT': 'T', 'ACC': 'T',
'ACA': 'T', 'ACG': 'T', 'AAT': 'N', 'AAC': 'N', 'AAA': 'K',
'AAG': 'K', 'AGT': 'S', 'AGC': 'S', 'AGA': 'R', 'AGG': 'R',
'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V', 'GCT': 'A',
'GCC': 'A', 'GCA': 'A', 'GCG': 'A', 'GAT': 'D', 'GAC': 'D',
'GAA': 'E', 'GAG': 'E', 'GGT': 'G', 'GGC': 'G', 'GGA': 'G',
'GGG': 'G', },
stop_codons=['TAA', 'TAG', 'TGA'],
start_codons=['TTG', 'CTG', 'ATG'])
register_ncbi_table(name='Vertebrate Mitochondrial',
alt_name='SGC1', id=2,
table={
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TGT': 'C', 'TGC': 'C', 'TGA': 'W', 'TGG': 'W', 'CTT': 'L',
'CTC': 'L', 'CTA': 'L', 'CTG': 'L', 'CCT': 'P', 'CCC': 'P',
'CCA': 'P', 'CCG': 'P', 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q',
'CAG': 'Q', 'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R',
'ATT': 'I', 'ATC': 'I', 'ATA': 'M', 'ATG': 'M', 'ACT': 'T',
'ACC': 'T', 'ACA': 'T', 'ACG': 'T', 'AAT': 'N', 'AAC': 'N',
'AAA': 'K', 'AAG': 'K', 'AGT': 'S', 'AGC': 'S', 'GTT': 'V',
'GTC': 'V', 'GTA': 'V', 'GTG': 'V', 'GCT': 'A', 'GCC': 'A',
'GCA': 'A', 'GCG': 'A', 'GAT': 'D', 'GAC': 'D', 'GAA': 'E',
'GAG': 'E', 'GGT': 'G', 'GGC': 'G', 'GGA': 'G', 'GGG': 'G', },
stop_codons=['TAA', 'TAG', 'AGA', 'AGG'],
start_codons=['ATT', 'ATC', 'ATA', 'ATG', 'GTG'])
register_ncbi_table(name='Yeast Mitochondrial',
alt_name='SGC2', id=3,
table={
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TGT': 'C', 'TGC': 'C', 'TGA': 'W', 'TGG': 'W', 'CTT': 'T',
'CTC': 'T', 'CTA': 'T', 'CTG': 'T', 'CCT': 'P', 'CCC': 'P',
'CCA': 'P', 'CCG': 'P', 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q',
'CAG': 'Q', 'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R',
'ATT': 'I', 'ATC': 'I', 'ATA': 'M', 'ATG': 'M', 'ACT': 'T',
'ACC': 'T', 'ACA': 'T', 'ACG': 'T', 'AAT': 'N', 'AAC': 'N',
'AAA': 'K', 'AAG': 'K', 'AGT': 'S', 'AGC': 'S', 'AGA': 'R',
'AGG': 'R', 'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V',
'GCT': 'A', 'GCC': 'A', 'GCA': 'A', 'GCG': 'A', 'GAT': 'D',
'GAC': 'D', 'GAA': 'E', 'GAG': 'E', 'GGT': 'G', 'GGC': 'G',
'GGA': 'G', 'GGG': 'G', },
stop_codons=['TAA', 'TAG'],
start_codons=['ATA', 'ATG'])
register_ncbi_table(name='Mold Mitochondrial; Protozoan Mitochondrial; Coelenterate Mitochondrial; Mycoplasma; Spiroplasma',
alt_name='SGC3', id=4,
table={
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TGT': 'C', 'TGC': 'C', 'TGA': 'W', 'TGG': 'W', 'CTT': 'L',
'CTC': 'L', 'CTA': 'L', 'CTG': 'L', 'CCT': 'P', 'CCC': 'P',
'CCA': 'P', 'CCG': 'P', 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q',
'CAG': 'Q', 'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R',
'ATT': 'I', 'ATC': 'I', 'ATA': 'I', 'ATG': 'M', 'ACT': 'T',
'ACC': 'T', 'ACA': 'T', 'ACG': 'T', 'AAT': 'N', 'AAC': 'N',
'AAA': 'K', 'AAG': 'K', 'AGT': 'S', 'AGC': 'S', 'AGA': 'R',
'AGG': 'R', 'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V',
'GCT': 'A', 'GCC': 'A', 'GCA': 'A', 'GCG': 'A', 'GAT': 'D',
'GAC': 'D', 'GAA': 'E', 'GAG': 'E', 'GGT': 'G', 'GGC': 'G',
'GGA': 'G', 'GGG': 'G', },
stop_codons=['TAA', 'TAG'],
start_codons=['TTA', 'TTG', 'CTG', 'ATT', 'ATC', 'ATA',
'ATG', 'GTG'])
register_ncbi_table(name='Invertebrate Mitochondrial',
alt_name='SGC4', id=5,
table={
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TGT': 'C', 'TGC': 'C', 'TGA': 'W', 'TGG': 'W', 'CTT': 'L',
'CTC': 'L', 'CTA': 'L', 'CTG': 'L', 'CCT': 'P', 'CCC': 'P',
'CCA': 'P', 'CCG': 'P', 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q',
'CAG': 'Q', 'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R',
'ATT': 'I', 'ATC': 'I', 'ATA': 'M', 'ATG': 'M', 'ACT': 'T',
'ACC': 'T', 'ACA': 'T', 'ACG': 'T', 'AAT': 'N', 'AAC': 'N',
'AAA': 'K', 'AAG': 'K', 'AGT': 'S', 'AGC': 'S', 'AGA': 'S',
'AGG': 'S', 'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V',
'GCT': 'A', 'GCC': 'A', 'GCA': 'A', 'GCG': 'A', 'GAT': 'D',
'GAC': 'D', 'GAA': 'E', 'GAG': 'E', 'GGT': 'G', 'GGC': 'G',
'GGA': 'G', 'GGG': 'G', },
stop_codons=['TAA', 'TAG'],
start_codons=['TTG', 'ATT', 'ATC', 'ATA', 'ATG', 'GTG'])
register_ncbi_table(name='Ciliate Nuclear; Dasycladacean Nuclear; Hexamita Nuclear',
alt_name='SGC5', id=6,
table={
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TAA': 'Q', 'TAG': 'Q', 'TGT': 'C', 'TGC': 'C', 'TGG': 'W',
'CTT': 'L', 'CTC': 'L', 'CTA': 'L', 'CTG': 'L', 'CCT': 'P',
'CCC': 'P', 'CCA': 'P', 'CCG': 'P', 'CAT': 'H', 'CAC': 'H',
'CAA': 'Q', 'CAG': 'Q', 'CGT': 'R', 'CGC': 'R', 'CGA': 'R',
'CGG': 'R', 'ATT': 'I', 'ATC': 'I', 'ATA': 'I', 'ATG': 'M',
'ACT': 'T', 'ACC': 'T', 'ACA': 'T', 'ACG': 'T', 'AAT': 'N',
'AAC': 'N', 'AAA': 'K', 'AAG': 'K', 'AGT': 'S', 'AGC': 'S',
'AGA': 'R', 'AGG': 'R', 'GTT': 'V', 'GTC': 'V', 'GTA': 'V',
'GTG': 'V', 'GCT': 'A', 'GCC': 'A', 'GCA': 'A', 'GCG': 'A',
'GAT': 'D', 'GAC': 'D', 'GAA': 'E', 'GAG': 'E', 'GGT': 'G',
'GGC': 'G', 'GGA': 'G', 'GGG': 'G', },
stop_codons=['TGA'],
start_codons=['ATG'])
register_ncbi_table(name='Echinoderm Mitochondrial; Flatworm Mitochondrial',
alt_name='SGC8', id=9,
table={
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TGT': 'C', 'TGC': 'C', 'TGA': 'W', 'TGG': 'W', 'CTT': 'L',
'CTC': 'L', 'CTA': 'L', 'CTG': 'L', 'CCT': 'P', 'CCC': 'P',
'CCA': 'P', 'CCG': 'P', 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q',
'CAG': 'Q', 'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R',
'ATT': 'I', 'ATC': 'I', 'ATA': 'I', 'ATG': 'M', 'ACT': 'T',
'ACC': 'T', 'ACA': 'T', 'ACG': 'T', 'AAT': 'N', 'AAC': 'N',
'AAA': 'N', 'AAG': 'K', 'AGT': 'S', 'AGC': 'S', 'AGA': 'S',
'AGG': 'S', 'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V',
'GCT': 'A', 'GCC': 'A', 'GCA': 'A', 'GCG': 'A', 'GAT': 'D',
'GAC': 'D', 'GAA': 'E', 'GAG': 'E', 'GGT': 'G', 'GGC': 'G',
'GGA': 'G', 'GGG': 'G', },
stop_codons=['TAA', 'TAG'],
start_codons=['ATG', 'GTG'])
register_ncbi_table(name='Euplotid Nuclear',
alt_name='SGC9', id=10,
table={
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TGT': 'C', 'TGC': 'C', 'TGA': 'C', 'TGG': 'W', 'CTT': 'L',
'CTC': 'L', 'CTA': 'L', 'CTG': 'L', 'CCT': 'P', 'CCC': 'P',
'CCA': 'P', 'CCG': 'P', 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q',
'CAG': 'Q', 'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R',
'ATT': 'I', 'ATC': 'I', 'ATA': 'I', 'ATG': 'M', 'ACT': 'T',
'ACC': 'T', 'ACA': 'T', 'ACG': 'T', 'AAT': 'N', 'AAC': 'N',
'AAA': 'K', 'AAG': 'K', 'AGT': 'S', 'AGC': 'S', 'AGA': 'R',
'AGG': 'R', 'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V',
'GCT': 'A', 'GCC': 'A', 'GCA': 'A', 'GCG': 'A', 'GAT': 'D',
'GAC': 'D', 'GAA': 'E', 'GAG': 'E', 'GGT': 'G', 'GGC': 'G',
'GGA': 'G', 'GGG': 'G', },
stop_codons=['TAA', 'TAG'],
start_codons=['ATG'])
register_ncbi_table(name='Bacterial, Archaeal and Plant Plastid',
alt_name=None, id=11,
table={
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TGT': 'C', 'TGC': 'C', 'TGG': 'W', 'CTT': 'L', 'CTC': 'L',
'CTA': 'L', 'CTG': 'L', 'CCT': 'P', 'CCC': 'P', 'CCA': 'P',
'CCG': 'P', 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q', 'CAG': 'Q',
'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R', 'ATT': 'I',
'ATC': 'I', 'ATA': 'I', 'ATG': 'M', 'ACT': 'T', 'ACC': 'T',
'ACA': 'T', 'ACG': 'T', 'AAT': 'N', 'AAC': 'N', 'AAA': 'K',
'AAG': 'K', 'AGT': 'S', 'AGC': 'S', 'AGA': 'R', 'AGG': 'R',
'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V', 'GCT': 'A',
'GCC': 'A', 'GCA': 'A', 'GCG': 'A', 'GAT': 'D', 'GAC': 'D',
'GAA': 'E', 'GAG': 'E', 'GGT': 'G', 'GGC': 'G', 'GGA': 'G',
'GGG': 'G', },
stop_codons=['TAA', 'TAG', 'TGA'],
start_codons=['TTG', 'CTG', 'ATT', 'ATC', 'ATA', 'ATG',
'GTG'])
register_ncbi_table(name='Alternative Yeast Nuclear',
alt_name=None, id=12,
table={
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TGT': 'C', 'TGC': 'C', 'TGG': 'W', 'CTT': 'L', 'CTC': 'L',
'CTA': 'L', 'CTG': 'S', 'CCT': 'P', 'CCC': 'P', 'CCA': 'P',
'CCG': 'P', 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q', 'CAG': 'Q',
'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R', 'ATT': 'I',
'ATC': 'I', 'ATA': 'I', 'ATG': 'M', 'ACT': 'T', 'ACC': 'T',
'ACA': 'T', 'ACG': 'T', 'AAT': 'N', 'AAC': 'N', 'AAA': 'K',
'AAG': 'K', 'AGT': 'S', 'AGC': 'S', 'AGA': 'R', 'AGG': 'R',
'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V', 'GCT': 'A',
'GCC': 'A', 'GCA': 'A', 'GCG': 'A', 'GAT': 'D', 'GAC': 'D',
'GAA': 'E', 'GAG': 'E', 'GGT': 'G', 'GGC': 'G', 'GGA': 'G',
'GGG': 'G', },
stop_codons=['TAA', 'TAG', 'TGA'],
start_codons=['CTG', 'ATG'])
register_ncbi_table(name='Ascidian Mitochondrial',
alt_name=None, id=13,
table={
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TGT': 'C', 'TGC': 'C', 'TGA': 'W', 'TGG': 'W', 'CTT': 'L',
'CTC': 'L', 'CTA': 'L', 'CTG': 'L', 'CCT': 'P', 'CCC': 'P',
'CCA': 'P', 'CCG': 'P', 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q',
'CAG': 'Q', 'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R',
'ATT': 'I', 'ATC': 'I', 'ATA': 'M', 'ATG': 'M', 'ACT': 'T',
'ACC': 'T', 'ACA': 'T', 'ACG': 'T', 'AAT': 'N', 'AAC': 'N',
'AAA': 'K', 'AAG': 'K', 'AGT': 'S', 'AGC': 'S', 'AGA': 'G',
'AGG': 'G', 'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V',
'GCT': 'A', 'GCC': 'A', 'GCA': 'A', 'GCG': 'A', 'GAT': 'D',
'GAC': 'D', 'GAA': 'E', 'GAG': 'E', 'GGT': 'G', 'GGC': 'G',
'GGA': 'G', 'GGG': 'G', },
stop_codons=['TAA', 'TAG'],
start_codons=['TTG', 'ATA', 'ATG', 'GTG'])
register_ncbi_table(name='Alternative Flatworm Mitochondrial',
alt_name=None, id=14,
table={
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TAA': 'Y', 'TGT': 'C', 'TGC': 'C', 'TGA': 'W', 'TGG': 'W',
'CTT': 'L', 'CTC': 'L', 'CTA': 'L', 'CTG': 'L', 'CCT': 'P',
'CCC': 'P', 'CCA': 'P', 'CCG': 'P', 'CAT': 'H', 'CAC': 'H',
'CAA': 'Q', 'CAG': 'Q', 'CGT': 'R', 'CGC': 'R', 'CGA': 'R',
'CGG': 'R', 'ATT': 'I', 'ATC': 'I', 'ATA': 'I', 'ATG': 'M',
'ACT': 'T', 'ACC': 'T', 'ACA': 'T', 'ACG': 'T', 'AAT': 'N',
'AAC': 'N', 'AAA': 'N', 'AAG': 'K', 'AGT': 'S', 'AGC': 'S',
'AGA': 'S', 'AGG': 'S', 'GTT': 'V', 'GTC': 'V', 'GTA': 'V',
'GTG': 'V', 'GCT': 'A', 'GCC': 'A', 'GCA': 'A', 'GCG': 'A',
'GAT': 'D', 'GAC': 'D', 'GAA': 'E', 'GAG': 'E', 'GGT': 'G',
'GGC': 'G', 'GGA': 'G', 'GGG': 'G', },
stop_codons=['TAG'],
start_codons=['ATG'])
register_ncbi_table(name='Blepharisma Macronuclear',
alt_name=None, id=15,
table={
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TAG': 'Q', 'TGT': 'C', 'TGC': 'C', 'TGG': 'W', 'CTT': 'L',
'CTC': 'L', 'CTA': 'L', 'CTG': 'L', 'CCT': 'P', 'CCC': 'P',
'CCA': 'P', 'CCG': 'P', 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q',
'CAG': 'Q', 'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R',
'ATT': 'I', 'ATC': 'I', 'ATA': 'I', 'ATG': 'M', 'ACT': 'T',
'ACC': 'T', 'ACA': 'T', 'ACG': 'T', 'AAT': 'N', 'AAC': 'N',
'AAA': 'K', 'AAG': 'K', 'AGT': 'S', 'AGC': 'S', 'AGA': 'R',
'AGG': 'R', 'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V',
'GCT': 'A', 'GCC': 'A', 'GCA': 'A', 'GCG': 'A', 'GAT': 'D',
'GAC': 'D', 'GAA': 'E', 'GAG': 'E', 'GGT': 'G', 'GGC': 'G',
'GGA': 'G', 'GGG': 'G', },
stop_codons=['TAA', 'TGA'],
start_codons=['ATG'])
register_ncbi_table(name='Chlorophycean Mitochondrial',
alt_name=None, id=16,
table={
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TAG': 'L', 'TGT': 'C', 'TGC': 'C', 'TGG': 'W', 'CTT': 'L',
'CTC': 'L', 'CTA': 'L', 'CTG': 'L', 'CCT': 'P', 'CCC': 'P',
'CCA': 'P', 'CCG': 'P', 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q',
'CAG': 'Q', 'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R',
'ATT': 'I', 'ATC': 'I', 'ATA': 'I', 'ATG': 'M', 'ACT': 'T',
'ACC': 'T', 'ACA': 'T', 'ACG': 'T', 'AAT': 'N', 'AAC': 'N',
'AAA': 'K', 'AAG': 'K', 'AGT': 'S', 'AGC': 'S', 'AGA': 'R',
'AGG': 'R', 'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V',
'GCT': 'A', 'GCC': 'A', 'GCA': 'A', 'GCG': 'A', 'GAT': 'D',
'GAC': 'D', 'GAA': 'E', 'GAG': 'E', 'GGT': 'G', 'GGC': 'G',
'GGA': 'G', 'GGG': 'G', },
stop_codons=['TAA', 'TGA'],
start_codons=['ATG'])
register_ncbi_table(name='Trematode Mitochondrial',
alt_name=None, id=21,
table={
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TGT': 'C', 'TGC': 'C', 'TGA': 'W', 'TGG': 'W', 'CTT': 'L',
'CTC': 'L', 'CTA': 'L', 'CTG': 'L', 'CCT': 'P', 'CCC': 'P',
'CCA': 'P', 'CCG': 'P', 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q',
'CAG': 'Q', 'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R',
'ATT': 'I', 'ATC': 'I', 'ATA': 'M', 'ATG': 'M', 'ACT': 'T',
'ACC': 'T', 'ACA': 'T', 'ACG': 'T', 'AAT': 'N', 'AAC': 'N',
'AAA': 'N', 'AAG': 'K', 'AGT': 'S', 'AGC': 'S', 'AGA': 'S',
'AGG': 'S', 'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V',
'GCT': 'A', 'GCC': 'A', 'GCA': 'A', 'GCG': 'A', 'GAT': 'D',
'GAC': 'D', 'GAA': 'E', 'GAG': 'E', 'GGT': 'G', 'GGC': 'G',
'GGA': 'G', 'GGG': 'G', },
stop_codons=['TAA', 'TAG'],
start_codons=['ATG', 'GTG'])
register_ncbi_table(name='Scenedesmus obliquus Mitochondrial',
alt_name=None, id=22,
table={
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y', 'TAG': 'L',
'TGT': 'C', 'TGC': 'C', 'TGG': 'W', 'CTT': 'L', 'CTC': 'L',
'CTA': 'L', 'CTG': 'L', 'CCT': 'P', 'CCC': 'P', 'CCA': 'P',
'CCG': 'P', 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q', 'CAG': 'Q',
'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R', 'ATT': 'I',
'ATC': 'I', 'ATA': 'I', 'ATG': 'M', 'ACT': 'T', 'ACC': 'T',
'ACA': 'T', 'ACG': 'T', 'AAT': 'N', 'AAC': 'N', 'AAA': 'K',
'AAG': 'K', 'AGT': 'S', 'AGC': 'S', 'AGA': 'R', 'AGG': 'R',
'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V', 'GCT': 'A',
'GCC': 'A', 'GCA': 'A', 'GCG': 'A', 'GAT': 'D', 'GAC': 'D',
'GAA': 'E', 'GAG': 'E', 'GGT': 'G', 'GGC': 'G', 'GGA': 'G',
'GGG': 'G', },
stop_codons=['TCA', 'TAA', 'TGA'],
start_codons=['ATG'])
register_ncbi_table(name='Thraustochytrium Mitochondrial',
alt_name=None, id=23,
table={
'TTT': 'F', 'TTC': 'F', 'TTG': 'L', 'TCT': 'S', 'TCC': 'S',
'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y', 'TGT': 'C',
'TGC': 'C', 'TGG': 'W', 'CTT': 'L', 'CTC': 'L', 'CTA': 'L',
'CTG': 'L', 'CCT': 'P', 'CCC': 'P', 'CCA': 'P', 'CCG': 'P',
'CAT': 'H', 'CAC': 'H', 'CAA': 'Q', 'CAG': 'Q', 'CGT': 'R',
'CGC': 'R', 'CGA': 'R', 'CGG': 'R', 'ATT': 'I', 'ATC': 'I',
'ATA': 'I', 'ATG': 'M', 'ACT': 'T', 'ACC': 'T', 'ACA': 'T',
'ACG': 'T', 'AAT': 'N', 'AAC': 'N', 'AAA': 'K', 'AAG': 'K',
'AGT': 'S', 'AGC': 'S', 'AGA': 'R', 'AGG': 'R', 'GTT': 'V',
'GTC': 'V', 'GTA': 'V', 'GTG': 'V', 'GCT': 'A', 'GCC': 'A',
'GCA': 'A', 'GCG': 'A', 'GAT': 'D', 'GAC': 'D', 'GAA': 'E',
'GAG': 'E', 'GGT': 'G', 'GGC': 'G', 'GGA': 'G', 'GGG': 'G', },
stop_codons=['TTA', 'TAA', 'TAG', 'TGA'],
start_codons=['ATT', 'ATG', 'GTG'])
register_ncbi_table(name='Pterobranchia Mitochondrial',
alt_name=None, id=24,
table={
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TGT': 'C', 'TGC': 'C', 'TGA': 'W', 'TGG': 'W', 'CTT': 'L',
'CTC': 'L', 'CTA': 'L', 'CTG': 'L', 'CCT': 'P', 'CCC': 'P',
'CCA': 'P', 'CCG': 'P', 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q',
'CAG': 'Q', 'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R',
'ATT': 'I', 'ATC': 'I', 'ATA': 'I', 'ATG': 'M', 'ACT': 'T',
'ACC': 'T', 'ACA': 'T', 'ACG': 'T', 'AAT': 'N', 'AAC': 'N',
'AAA': 'K', 'AAG': 'K', 'AGT': 'S', 'AGC': 'S', 'AGA': 'S',
'AGG': 'K', 'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V',
'GCT': 'A', 'GCC': 'A', 'GCA': 'A', 'GCG': 'A', 'GAT': 'D',
'GAC': 'D', 'GAA': 'E', 'GAG': 'E', 'GGT': 'G', 'GGC': 'G',
'GGA': 'G', 'GGG': 'G', },
stop_codons=['TAA', 'TAG'],
start_codons=['TTG', 'CTG', 'ATG', 'GTG'])
register_ncbi_table(name='Candidate Division SR1 and Gracilibacteria',
alt_name=None, id=25,
table={
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TGT': 'C', 'TGC': 'C', 'TGA': 'G', 'TGG': 'W', 'CTT': 'L',
'CTC': 'L', 'CTA': 'L', 'CTG': 'L', 'CCT': 'P', 'CCC': 'P',
'CCA': 'P', 'CCG': 'P', 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q',
'CAG': 'Q', 'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R',
'ATT': 'I', 'ATC': 'I', 'ATA': 'I', 'ATG': 'M', 'ACT': 'T',
'ACC': 'T', 'ACA': 'T', 'ACG': 'T', 'AAT': 'N', 'AAC': 'N',
'AAA': 'K', 'AAG': 'K', 'AGT': 'S', 'AGC': 'S', 'AGA': 'R',
'AGG': 'R', 'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V',
'GCT': 'A', 'GCC': 'A', 'GCA': 'A', 'GCG': 'A', 'GAT': 'D',
'GAC': 'D', 'GAA': 'E', 'GAG': 'E', 'GGT': 'G', 'GGC': 'G',
'GGA': 'G', 'GGG': 'G', },
stop_codons=['TAA', 'TAG'],
start_codons=['TTG', 'ATG', 'GTG'])
########################################################################
# End of auto-generated output from Scripts/update_ncbi_codon_table.py #
########################################################################
# This is currently missing in Version 4.0 of
# ftp://ftp.ncbi.nih.gov/entrez/misc/data/gc.prt
# and was entered by hand based on
# http://www.ncbi.nlm.nih.gov/Taxonomy/Utils/wprintgc.cgi#SG26
#
# Code 26 is used so far only for the ascomycete fungus Pachysolen
# tannophilus. The only difference to the standard code is the
# translation of CUG as alanine (as opposed to leucine). As of
# April 2016, there is no publication documenting this code.
register_ncbi_table(name='Pachysolen tannophilus Nuclear Code',
alt_name=None, id=26,
table={
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TGT': 'C', 'TGC': 'C', 'TGG': 'W', 'CTT': 'L', 'CTC': 'L',
'CTA': 'L', 'CTG': 'A', 'CCT': 'P', 'CCC': 'P', 'CCA': 'P',
'CCG': 'P', 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q', 'CAG': 'Q',
'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R', 'ATT': 'I',
'ATC': 'I', 'ATA': 'I', 'ATG': 'M', 'ACT': 'T', 'ACC': 'T',
'ACA': 'T', 'ACG': 'T', 'AAT': 'N', 'AAC': 'N', 'AAA': 'K',
'AAG': 'K', 'AGT': 'S', 'AGC': 'S', 'AGA': 'R', 'AGG': 'R',
'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V', 'GCT': 'A',
'GCC': 'A', 'GCA': 'A', 'GCG': 'A', 'GAT': 'D', 'GAC': 'D',
'GAA': 'E', 'GAG': 'E', 'GGT': 'G', 'GGC': 'G', 'GGA': 'G',
'GGG': 'G', },
stop_codons=['TAA', 'TAG', 'TGA'],
start_codons=['TTG', 'CTG', 'ATG'])
# Basic sanity test,
for key, val in generic_by_name.items():
assert key in ambiguous_generic_by_name[key].names
for key, val in generic_by_id.items():
assert ambiguous_generic_by_id[key].id == key
del key, val
for n in ambiguous_generic_by_id:
assert ambiguous_rna_by_id[n].forward_table["GUU"] == "V"
assert ambiguous_rna_by_id[n].forward_table["GUN"] == "V"
if n != 23:
# For table 23, UUN = F, L or stop.
assert ambiguous_rna_by_id[n].forward_table["UUN"] == "X" # F or L
# R = A or G, so URR = UAA or UGA / TRA = TAA or TGA = stop codons
if "UAA" in unambiguous_rna_by_id[n].stop_codons \
and "UGA" in unambiguous_rna_by_id[n].stop_codons:
try:
print(ambiguous_dna_by_id[n].forward_table["TRA"])
assert False, "Should be a stop only"
except KeyError:
pass
assert "URA" in ambiguous_generic_by_id[n].stop_codons
assert "URA" in ambiguous_rna_by_id[n].stop_codons
assert "TRA" in ambiguous_generic_by_id[n].stop_codons
assert "TRA" in ambiguous_dna_by_id[n].stop_codons
del n
assert ambiguous_generic_by_id[1] == ambiguous_generic_by_name["Standard"]
assert ambiguous_generic_by_id[4] == ambiguous_generic_by_name["SGC3"]
assert ambiguous_generic_by_id[11] == ambiguous_generic_by_name["Bacterial"]
assert ambiguous_generic_by_id[11] == ambiguous_generic_by_name["Archaeal"]
assert ambiguous_generic_by_id[11] == ambiguous_generic_by_name["Plant Plastid"]
assert ambiguous_generic_by_id[15] == ambiguous_generic_by_name['Blepharisma Macronuclear']
assert ambiguous_generic_by_id[24] == ambiguous_generic_by_name["Pterobranchia Mitochondrial"]
assert generic_by_id[1] == generic_by_name["Standard"]
assert generic_by_id[4] == generic_by_name["SGC3"]
assert generic_by_id[11] == generic_by_name["Bacterial"]
assert generic_by_id[11] == generic_by_name["Plant Plastid"]
assert generic_by_id[15] == generic_by_name['Blepharisma Macronuclear']
assert generic_by_id[24] == generic_by_name["Pterobranchia Mitochondrial"]
|
the-stack_106_28603 | import torch
from torch import nn
from .torch_nn import BasicConv, batched_index_select
from .torch_edge import DenseDilatedKnnGraph, DilatedKnnGraph
import torch.nn.functional as F
class MRConv2d(nn.Module):
"""
Max-Relative Graph Convolution (Paper: https://arxiv.org/abs/1904.03751) for dense data type
"""
def __init__(self, in_channels, out_channels, act='relu', norm=None, bias=True):
super(MRConv2d, self).__init__()
self.nn = BasicConv([in_channels*2, out_channels], act, norm, bias)
def forward(self, x, edge_index):
x_i = batched_index_select(x, edge_index[1])
x_j = batched_index_select(x, edge_index[0])
x_j, _ = torch.max(x_j - x_i, -1, keepdim=True)
return self.nn(torch.cat([x, x_j], dim=1))
class EdgeConv2d(nn.Module):
"""
Edge convolution layer (with activation, batch normalization) for dense data type
"""
def __init__(self, in_channels, out_channels, act='relu', norm=None, bias=True):
super(EdgeConv2d, self).__init__()
self.nn = BasicConv([in_channels * 2, out_channels], act, norm, bias)
def forward(self, x, edge_index):
x_i = batched_index_select(x, edge_index[1])
x_j = batched_index_select(x, edge_index[0])
max_value, _ = torch.max(self.nn(torch.cat([x_i, x_j - x_i], dim=1)), -1, keepdim=True)
return max_value
class GraphConv2d(nn.Module):
"""
Static graph convolution layer
"""
def __init__(self, in_channels, out_channels, conv='edge', act='relu', norm=None, bias=True):
super(GraphConv2d, self).__init__()
if conv == 'edge':
self.gconv = EdgeConv2d(in_channels, out_channels, act, norm, bias)
elif conv == 'mr':
self.gconv = MRConv2d(in_channels, out_channels, act, norm, bias)
else:
raise NotImplementedError('conv:{} is not supported'.format(conv))
def forward(self, x, edge_index):
return self.gconv(x, edge_index)
class DynConv2d(GraphConv2d):
"""
Dynamic graph convolution layer
"""
def __init__(self, in_channels, out_channels, kernel_size=9, dilation=1, conv='edge', act='relu',
norm=None, bias=True, stochastic=False, epsilon=0.0, knn='matrix'):
super(DynConv2d, self).__init__(in_channels, out_channels, conv, act, norm, bias)
self.k = kernel_size
self.d = dilation
if knn == 'matrix':
self.dilated_knn_graph = DenseDilatedKnnGraph(kernel_size, dilation, stochastic, epsilon)
else:
self.dilated_knn_graph = DilatedKnnGraph(kernel_size, dilation, stochastic, epsilon)
def forward(self, x):
edge_index = self.dilated_knn_graph(x)
return super(DynConv2d, self).forward(x, edge_index)
class ResDynBlock2d(nn.Module):
"""
Residual Dynamic graph convolution block
:input: (x0, x1, x2, ... , xi), batch
:output:(x0, x1, x2, ... , xi ,xi+1) , batch
"""
def __init__(self, in_channels, kernel_size=9, dilation=1, conv='edge', act='relu', norm=None,
bias=True, stochastic=False, epsilon=0.0, knn='matrix', res_scale=1):
super(ResDynBlock2d, self).__init__()
self.body = DynConv2d(in_channels, in_channels, kernel_size, dilation, conv,
act, norm, bias, stochastic, epsilon, knn)
self.res_scale = res_scale
def forward(self, x):
return self.body(x) + x*self.res_scale
class DenseDynBlock2d(nn.Module):
"""
Dense Dynamic graph convolution block
"""
def __init__(self, in_channels, out_channels=64, kernel_size=9, dilation=1, conv='edge',
act='relu', norm=None,bias=True, stochastic=False, epsilon=0.0, knn='matrix'):
super(DenseDynBlock2d, self).__init__()
self.body = DynConv2d(in_channels, out_channels, kernel_size, dilation, conv,
act, norm, bias, stochastic, epsilon, knn)
def forward(self, x):
dense = self.body(x)
return torch.cat((x, dense), 1)
class GraphPooling(nn.Module):
"""
Dense Dynamic graph pooling block
"""
def __init__(self, in_channels, ratio=0.5, conv='edge', **kwargs):
super(GraphPooling, self).__init__()
self.gnn = DynConv2d(in_channels, 1, conv=conv, **kwargs)
self.ratio = ratio
def forward(self, x):
""""""
score = torch.tanh(self.gnn(x))
_, indices = score.topk(int(x.shape[2]*self.ratio), 2)
return torch.gather(x, 2, indices.repeat(1, x.shape[1], 1, 1))
class VLADPool(torch.nn.Module):
def __init__(self, in_channels, num_clusters=64, alpha=100.0):
super(VLADPool, self).__init__()
self.in_channels = in_channels
self.num_clusters = num_clusters
self.alpha = alpha
self.lin = nn.Linear(in_channels, self.num_clusters, bias=True)
self.centroids = nn.Parameter(torch.rand(self.num_clusters, in_channels))
self._init_params()
def _init_params(self):
self.lin.weight = nn.Parameter((2.0 * self.alpha * self.centroids))
self.lin.bias = nn.Parameter(- self.alpha * self.centroids.norm(dim=1))
def forward(self, x, norm_intra=False, norm_L2=False):
B, C, N, _ = x.shape
x = x.squeeze().transpose(1, 2) # B, N, C
K = self.num_clusters
soft_assign = self.lin(x) # soft_assign of size (B, N, K)
soft_assign = F.softmax(soft_assign, dim=1).unsqueeze(1) # soft_assign of size (B, N, K)
soft_assign = soft_assign.expand(-1, C, -1, -1) # soft_assign of size (B, C, N, K)
# input x of size (NxC)
xS = x.transpose(1, 2).unsqueeze(-1).expand(-1, -1, -1, K) # xS of size (B, C, N, K)
cS = self.centroids.unsqueeze(0).unsqueeze(0).expand(B, N, -1, -1).transpose(2, 3) # cS of size (B, C, N, K)
residual = (xS - cS) # residual of size (B, C, N, K)
residual = residual * soft_assign # vlad of size (B, C, N, K)
vlad = torch.sum(residual, dim=2, keepdim=True) # (B, C, K)
if (norm_intra):
vlad = F.normalize(vlad, p=2, dim=1) # intra-normalization
# print("i-norm vlad", vlad.shape)
if (norm_L2):
vlad = vlad.view(-1, K * C) # flatten
vlad = F.normalize(vlad, p=2, dim=1) # L2 normalize
# return vlad.view(B, -1, 1, 1)
return vlad
|
the-stack_106_28605 | """Example with fitting a 32 triangles soup to an image."""
import copy
import os
import cv2
import deodr
from deodr import differentiable_renderer_cython
from deodr.differentiable_renderer import Scene2D
from imageio import imread
import matplotlib.pyplot as plt
import numpy as np
def create_example_scene(n_tri=30, width=200, height=200):
material = np.double(imread(os.path.join(deodr.data_path, "trefle.jpg"))) / 255
height_material = material.shape[0]
width_material = material.shape[1]
scale_matrix = np.array([[height, 0], [0, width]])
scale_material = np.array([[height_material - 1, 0], [0, width_material - 1]])
triangles = []
for _ in range(n_tri):
tmp = scale_matrix.dot(
np.random.rand(2, 1).dot(np.ones((1, 3)))
+ 0.5 * (-0.5 + np.random.rand(2, 3))
)
while np.abs(np.linalg.det(np.vstack((tmp, np.ones((3)))))) < 1500:
tmp = scale_matrix.dot(
np.random.rand(2, 1).dot(np.ones((1, 3)))
+ 0.5 * (-0.5 + np.random.rand(2, 3))
)
if np.linalg.det(np.vstack((tmp, np.ones((3))))) > 0:
tmp = np.fliplr(tmp)
triangle = {}
triangle["ij"] = tmp.T
triangle["depths"] = np.random.rand(1) * np.ones(
(3, 1)
) # constant depth triangles to avoid collisions
triangle["textured"] = np.random.rand(1) > 0.5
if triangle["textured"]:
triangle["uv"] = (
scale_material.dot(np.array([[0, 1, 0.2], [0, 0.2, 1]])).T + 1
) # texture coordinate of the vertices
triangle["shade"] = np.random.rand(3, 1) # shade intensity at each vertex
triangle["colors"] = np.zeros((3, 3))
triangle["shaded"] = True
else:
triangle["uv"] = np.zeros((3, 2))
triangle["shade"] = np.zeros((3, 1))
triangle["colors"] = np.random.rand(3, 3)
# colors of the vertices (can be gray, rgb color,or even other dimension
# vectors) when using simple linear interpolation across triangles
triangle["shaded"] = False
triangle["edgeflags"] = np.array(
[True, True, True]
) # all edges are discontinuity edges as no triangle pair share an edge
triangles.append(triangle)
scene = {}
for key in triangles[0].keys():
scene[key] = np.squeeze(
np.vstack([np.array(triangle[key]) for triangle in triangles])
)
scene["faces"] = np.arange(3 * n_tri).reshape(-1, 3).astype(np.uint32)
scene["faces_uv"] = np.arange(3 * n_tri).reshape(-1, 3).astype(np.uint32)
scene["height"] = height
scene["width"] = width
scene["texture"] = material
scene["nb_colors"] = 3
scene["background"] = np.tile(
np.array([0.3, 0.5, 0.7])[None, None, :], (height, width, 1)
)
return Scene2D(**scene)
def run(nb_max_iter=500, display=True):
print("process id=%d" % os.getpid())
np.random.seed(2)
scene_gt = create_example_scene()
antialiase_error = False
sigma = 1
image_target = np.zeros((scene_gt.height, scene_gt.width, scene_gt.nb_colors))
z_buffer = np.zeros((scene_gt.height, scene_gt.width))
differentiable_renderer_cython.renderScene(scene_gt, sigma, image_target, z_buffer)
n_vertices = len(scene_gt.depths)
displacement_magnitude_ij = 10
displacement_magnitude_uv = 0
displacement_magnitude_colors = 0
alpha_ij = 0.01
beta_ij = 0.80
alpha_uv = 0.03
beta_uv = 0.80
alpha_color = 0.001
beta_color = 0.70
max_uv = np.array(scene_gt.texture.shape[:2]) - 1
scene_init = copy.deepcopy(scene_gt)
scene_init.ij = (
scene_gt.ij + np.random.randn(n_vertices, 2) * displacement_magnitude_ij
)
scene_init.uv = (
scene_gt.uv + np.random.randn(n_vertices, 2) * displacement_magnitude_uv
)
scene_init.uv = np.maximum(scene_init.uv, 0)
scene_init.uv = np.minimum(scene_init.uv, max_uv)
scene_init.colors = (
scene_gt.colors + np.random.randn(n_vertices, 3) * displacement_magnitude_colors
)
final_loss = {}
for antialiase_error in [True, False]:
np.random.seed(2)
scene_iter = copy.deepcopy(scene_init)
speed_ij = np.zeros((n_vertices, 2))
speed_uv = np.zeros((n_vertices, 2))
speed_color = np.zeros((n_vertices, 3))
losses = []
for niter in range(nb_max_iter):
image, depth, loss_image, loss = scene_iter.render_compare_and_backward(
sigma, antialiase_error, image_target
)
print(f"iter {niter} loss = {loss}")
# imsave(os.path.join(iterfolder,f'soup_{niter}.png'), combinedIMage)
losses.append(loss)
if loss_image.ndim == 2:
loss_image = np.broadcast_to(loss_image[:, :, None], image.shape)
if display:
cv2.waitKey(1)
cv2.imshow(
"animation",
np.column_stack((image_target, image, loss_image))[:, :, ::-1],
)
if displacement_magnitude_ij > 0:
speed_ij = beta_ij * speed_ij - scene_iter.ij_b * alpha_ij
scene_iter.ij = scene_iter.ij + speed_ij
if displacement_magnitude_colors > 0:
speed_color = (
beta_color * speed_color - scene_iter.colors_b * alpha_color
)
scene_iter.colors = scene_iter.colors + speed_color
if displacement_magnitude_uv > 0:
speed_uv = beta_uv * speed_uv - scene_iter.uv_b * alpha_uv
scene_iter.uv = scene_iter.uv + speed_uv
scene_iter.uv = max(scene_iter.uv, 0)
scene_iter.uv = min(scene_iter.uv, max_uv)
if display:
plt.plot(losses, label="antialiaseError=%d" % antialiase_error)
final_loss[antialiase_error] = loss
if display:
plt.legend()
plt.show()
return final_loss
if __name__ == "__main__":
run()
|
the-stack_106_28610 | # Copyright 2015 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nose.tools import with_setup
from ..connection.info import custom_setup, custom_teardown
handle = None
def setup():
global handle
handle = custom_setup()
def teardown():
custom_teardown(handle)
@with_setup(setup, teardown)
def test_001_sp_minimal():
from ucsmsdk.mometa.ls.LsServer import LsServer
mo = LsServer(parent_mo_or_dn="org-root", vmedia_policy_name="",
ext_ip_state="none", bios_profile_name="",
mgmt_fw_policy_name="", agent_policy_name="",
mgmt_access_policy_name="", dynamic_con_policy_name="",
kvm_mgmt_policy_name="", sol_policy_name="", uuid="0",
descr="", stats_policy_name="default", policy_owner="local",
ext_ip_pool_name="ext-mgmt", boot_policy_name="", usr_lbl="",
host_fw_policy_name="", vcon_profile_name="",
ident_pool_name="default", src_templ_name="",
local_disk_policy_name="", scrub_policy_name="",
power_policy_name="default", maint_policy_name="",
name="test_sp", resolve_remote="yes")
handle.add_mo(mo)
handle.commit()
##########################################################
# Modify a single property in the Sp created above
# and genertate XML with DIRTY option set
##########################################################
import ucsmsdk.ucsxmlcodec as xc
from ucsmsdk.ucscoremeta import WriteXmlOption
obj = handle.query_dn("org-root/ls-test_sp")
obj.usr_lbl = "new_label"
print(xc.to_xml_str(obj.to_xml(option=WriteXmlOption.DIRTY)))
print(xc.to_xml_str(obj.to_xml(option=WriteXmlOption.ALL_CONFIG)))
print(xc.to_xml_str(obj.to_xml()))
##########################################################
# Delete the SP
##########################################################
obj = handle.query_dn("org-root/ls-test_sp")
handle.remove_mo(obj)
handle.commit()
@with_setup(setup, teardown)
def test_002_sp_expert():
'''
This case is generated based on SP expert mode creation wizard.
'''
from ucsmsdk.mometa.ls.LsServer import LsServer
from ucsmsdk.mometa.ls.LsVConAssign import LsVConAssign
from ucsmsdk.mometa.vnic.VnicEther import VnicEther
from ucsmsdk.mometa.vnic.VnicEtherIf import VnicEtherIf
from ucsmsdk.mometa.vnic.VnicFc import VnicFc
from ucsmsdk.mometa.vnic.VnicFcIf import VnicFcIf
from ucsmsdk.mometa.vnic.VnicFcNode import VnicFcNode
from ucsmsdk.mometa.storage.StorageIniGroup import StorageIniGroup
from ucsmsdk.mometa.vnic.VnicFcGroupDef import VnicFcGroupDef
from ucsmsdk.mometa.storage.StorageInitiator import StorageInitiator
from ucsmsdk.mometa.ls.LsPower import LsPower
from ucsmsdk.mometa.fabric.FabricVCon import FabricVCon
mo = LsServer(parent_mo_or_dn="org-root", vmedia_policy_name="",
ext_ip_state="none", bios_profile_name="SRIOV",
mgmt_fw_policy_name="", agent_policy_name="",
mgmt_access_policy_name="", dynamic_con_policy_name="",
kvm_mgmt_policy_name="", sol_policy_name="",
uuid="00000000-0000-0000-0000-0000000000bb", descr="",
stats_policy_name="default", policy_owner="local",
ext_ip_pool_name="ext-mgmt", boot_policy_name="default",
usr_lbl="", host_fw_policy_name="", vcon_profile_name="",
ident_pool_name="", src_templ_name="",
local_disk_policy_name="default", scrub_policy_name="",
power_policy_name="default", maint_policy_name="",
name="test_sp", resolve_remote="yes")
mo_1 = LsVConAssign(parent_mo_or_dn=mo, admin_vcon="any", order="1",
transport="ethernet", vnic_name="eth0")
mo_2 = LsVConAssign(parent_mo_or_dn=mo, admin_vcon="any", order="2",
transport="fc", vnic_name="fc0")
mo_3 = VnicEther(parent_mo_or_dn=mo, nw_ctrl_policy_name="", name="eth0",
admin_host_port="ANY", admin_vcon="any",
stats_policy_name="default", admin_cdn_name="",
switch_id="A", pin_to_group_name="", mtu="1500",
qos_policy_name="", adaptor_profile_name="",
ident_pool_name="default", order="1", nw_templ_name="",
addr="derived")
mo_3_1 = VnicEtherIf(parent_mo_or_dn=mo_3, default_net="yes",
name="default")
mo_4 = VnicFc(parent_mo_or_dn=mo, addr="derived", name="fc0",
admin_host_port="ANY", admin_vcon="any",
stats_policy_name="default", admin_cdn_name="", switch_id="A",
pin_to_group_name="", pers_bind="disabled",
pers_bind_clear="no", qos_policy_name="",
adaptor_profile_name="", ident_pool_name="", order="2",
nw_templ_name="", max_data_field_size="2048")
mo_4_1 = VnicFcIf(parent_mo_or_dn=mo_4, name="default")
mo_5 = VnicFcNode(parent_mo_or_dn=mo, ident_pool_name="",
addr="20:00:00:25:B5:00:00:00")
mo_6 = StorageIniGroup(parent_mo_or_dn=mo, name="test", descr="",
group_policy_name="", policy_name="",
policy_owner="local", rmt_disk_cfg_name="")
mo_6_1 = VnicFcGroupDef(parent_mo_or_dn=mo_6, storage_conn_policy_name="",
policy_owner="local", name="", descr="",
stats_policy_name="default")
mo_6_2 = StorageInitiator(parent_mo_or_dn=mo_6, policy_owner="local",
name="fc0", descr="")
mo_7 = LsPower(parent_mo_or_dn=mo, state="admin-up")
mo_8 = FabricVCon(parent_mo_or_dn=mo, placement="physical", fabric="NONE",
share="shared", select="all", transport="ethernet,fc",
id="1", inst_type="auto")
mo_9 = FabricVCon(parent_mo_or_dn=mo, placement="physical", fabric="NONE",
share="shared", select="all", transport="ethernet,fc",
id="2", inst_type="auto")
mo_10 = FabricVCon(parent_mo_or_dn=mo, placement="physical", fabric="NONE",
share="shared", select="all", transport="ethernet,fc",
id="3", inst_type="auto")
mo_11 = FabricVCon(parent_mo_or_dn=mo, placement="physical", fabric="NONE",
share="shared", select="all", transport="ethernet,fc",
id="4", inst_type="auto")
handle.add_mo(mo)
handle.commit()
obj = handle.query_dn("org-root/ls-test_sp")
handle.remove_mo(obj)
handle.commit()
|
the-stack_106_28611 | from time import sleep
from org.myrobotlab.service import InMoovArm
# create the IK3D service.
ik3d= Runtime.createAndStart("ik3d", "InverseKinematics3D")
ik3d.setCurrentArm(InMoovArm.getDHRobotArm())
# starting point
# x , y , z
x1 = 100
y1 = 100
z1 = 100
# ending point
# x , y , z
x2 = 500
y2 = 100
z2 = 100
startPoint = [ x1, y1, z1 ]
# move along the x in a straight line from 100 to 500
endPoint = [ x2 , y2 , z2 ]
# how many steps?
numSteps = 100
# delay between steps (in seconds)
delay = 0.1
# lets compute how long the path is.
dx = 1.0*(x2 - x1)/numSteps
dy = 1.0*(y2 - y1)/numSteps
dz = 1.0*(z2 - z1)/numSteps
# our current xyz
curX = startPoint[0]
curY = startPoint[1]
curZ = startPoint[2]
ik3d.moveTo(curX,curY,curZ)
for i in range(0 , 100):
curX+=dx
curY+=dy
curZ+=dz
ik3d.moveTo(curX, curY, curZ)
sleep(delay)
|
the-stack_106_28612 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
from airflow.models.dag import DAG
from airflow.providers.amazon.aws.hooks.eks import ClusterStates, NodegroupStates
from airflow.providers.amazon.aws.operators.eks import (
EksCreateClusterOperator,
EksCreateNodegroupOperator,
EksDeleteClusterOperator,
EksDeleteNodegroupOperator,
EksPodOperator,
)
from airflow.providers.amazon.aws.sensors.eks import EksClusterStateSensor, EksNodegroupStateSensor
# Example Jinja Template format, substitute your values:
"""
{
"cluster_name": "templated-cluster",
"cluster_role_arn": "arn:aws:iam::123456789012:role/role_name",
"nodegroup_subnets": ["subnet-12345ab", "subnet-67890cd"],
"resources_vpc_config": {
"subnetIds": ["subnet-12345ab", "subnet-67890cd"],
"endpointPublicAccess": true,
"endpointPrivateAccess": false
},
"nodegroup_name": "templated-nodegroup",
"nodegroup_role_arn": "arn:aws:iam::123456789012:role/role_name"
}
"""
with DAG(
dag_id='to-publish-manuals-templated',
default_args={'cluster_name': "{{ dag_run.conf['cluster_name'] }}"},
schedule_interval=None,
start_date=datetime(2021, 1, 1),
catchup=False,
max_active_runs=1,
tags=['example', 'templated'],
# render_template_as_native_obj=True is what converts the Jinja to Python objects, instead of a string.
render_template_as_native_obj=True,
) as dag:
# Create an Amazon EKS Cluster control plane without attaching a compute service.
create_cluster = EksCreateClusterOperator(
task_id='create_eks_cluster',
compute=None,
cluster_role_arn="{{ dag_run.conf['cluster_role_arn'] }}",
resources_vpc_config="{{ dag_run.conf['resources_vpc_config'] }}",
)
await_create_cluster = EksClusterStateSensor(
task_id='wait_for_create_cluster',
target_state=ClusterStates.ACTIVE,
)
create_nodegroup = EksCreateNodegroupOperator(
task_id='create_eks_nodegroup',
nodegroup_name="{{ dag_run.conf['nodegroup_name'] }}",
nodegroup_subnets="{{ dag_run.conf['nodegroup_subnets'] }}",
nodegroup_role_arn="{{ dag_run.conf['nodegroup_role_arn'] }}",
)
await_create_nodegroup = EksNodegroupStateSensor(
task_id='wait_for_create_nodegroup',
nodegroup_name="{{ dag_run.conf['nodegroup_name'] }}",
target_state=NodegroupStates.ACTIVE,
)
start_pod = EksPodOperator(
task_id="run_pod",
pod_name="run_pod",
image="amazon/aws-cli:latest",
cmds=["sh", "-c", "ls"],
labels={"demo": "hello_world"},
get_logs=True,
# Delete the pod when it reaches its final state, or the execution is interrupted.
is_delete_operator_pod=True,
)
delete_nodegroup = EksDeleteNodegroupOperator(
task_id='delete_eks_nodegroup',
nodegroup_name="{{ dag_run.conf['nodegroup_name'] }}",
)
await_delete_nodegroup = EksNodegroupStateSensor(
task_id='wait_for_delete_nodegroup',
nodegroup_name="{{ dag_run.conf['nodegroup_name'] }}",
target_state=NodegroupStates.NONEXISTENT,
)
delete_cluster = EksDeleteClusterOperator(
task_id='delete_eks_cluster',
)
await_delete_cluster = EksClusterStateSensor(
task_id='wait_for_delete_cluster',
target_state=ClusterStates.NONEXISTENT,
)
(
create_cluster
>> await_create_cluster
>> create_nodegroup
>> await_create_nodegroup
>> start_pod
>> delete_nodegroup
>> await_delete_nodegroup
>> delete_cluster
>> await_delete_cluster
)
|
the-stack_106_28613 | # vanilla topological sorting
"""
This method is based on DFS. It is not based on in degree and out degree!
"""
import collections
class Graph(object):
# directed graph
def __init__(self, vertices):
self.graph = collections.defaultdict(list)
self.v = vertices
def addEdge(self, u, v):
self.graph[u].append(v)
def getDic(self):
return self.graph
def topologicalSort(self):
stack = []
visited = [False]*self.v
for v in range(self.v):
if not visited[v]:
self.dfs(v, stack, visited)
print(stack)
def dfs(self, v, stack, visited):
visited[v] = True
for children in self.graph[v]:
if not visited[children]:
self.dfs(children, stack, visited)
stack.insert(0,v)
class Graph2(object):
# graph directed
def __init__(self, edges, vertices):
self.dic = collections.defaultdict(list)
self.v = vertices
for x,y in edges:
self.dic[x].append(y)
def getDic(self):
return self.dic
if __name__ == '__main__':
ob = Graph(5)
ob.addEdge(0,1)
ob.addEdge(1,2)
ob.addEdge(2,3)
ob.addEdge(3,4)
ob.addEdge(2,4)
# ob.addEdge(3,4)
print(ob.getDic())
ob.topologicalSort()
# edges = [(0,1),(1,2),(2,3),(3,4),(2,4),(3,4)]
# ob2 = Graph2(edges=edges, vertices=5)
# print(ob2.getDic())
|
the-stack_106_28614 | # -*- coding: utf-8 -*-
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os.path
from setuptools import setup
from setuptools import find_packages
from dist_utils import fetch_requirements
from dist_utils import apply_vagrant_workaround
from python_runner import __version__
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
REQUIREMENTS_FILE = os.path.join(BASE_DIR, "requirements.txt")
install_reqs, dep_links = fetch_requirements(REQUIREMENTS_FILE)
apply_vagrant_workaround()
setup(
name="stackstorm-runner-python",
version=__version__,
description="Python action runner for StackStorm event-driven automation platform",
author="StackStorm",
author_email="[email protected]",
license="Apache License (2.0)",
url="https://stackstorm.com/",
install_requires=install_reqs,
dependency_links=dep_links,
test_suite="tests",
zip_safe=False,
include_package_data=True,
packages=find_packages(exclude=["setuptools", "tests"]),
package_data={"python_runner": ["runner.yaml"]},
scripts=[],
entry_points={
"st2common.runners.runner": [
"python-script = python_runner.python_runner",
],
},
)
|
the-stack_106_28615 | #!usr/bin/python3
# * head.direction์ด๋ผ๋ attribute ์ฌ์ฉ๋ณด๋ค๋ ์์ข๊ธฐ๋ ํ์ง๋ง ์์ด๋ค์ด ๋ ์ฝ๊ฒ ์ดํดํ๋๋ก global variable ์ฌ์ฉ.
import turtle
import time
import random
wn = turtle.Screen()
wn.title("SNAKE!")
wn.bgcolor("steel blue")
wn.setup(width = 600, height = 600)
wn.tracer(0) # ๋งค๋ด์ผ๋ก ์
๋ฐ์ดํธํ๋๋ก ์ค์
# [2] Snake ๊ฐ์ฒด
head = turtle.Turtle()
head.speed(0) # ์๋๋ฅผ ๊ฐ์ฅ ๋น ๋ฅด๊ฒ
head.shape("square")
head.color("limegreen")
head.up()
direction = "stop"
# [3] Snake์ ์์ง์
def move():
if direction == "up":
y = head.ycor() # ํ์ฌ snake์ y ์ขํ๋ฅผ ์ ์ฅ
head.sety(y + 20) # 20 ์๋ก ์์ง์
if direction == "down":
y = head.ycor() # ํ์ฌ snake์ y ์ขํ๋ฅผ ์ ์ฅ
head.sety(y - 20) # 20 ์๋๋ก ์์ง์
if direction == "right":
x = head.xcor() # ํ์ฌ snake์ x ์ขํ๋ฅผ ์ ์ฅ
head.setx(x + 20) # 20 ์๋ก ์์ง์
if direction == "left":
x = head.xcor() # ํ์ฌ snake์ x ์ขํ๋ฅผ ์ ์ฅ
head.setx(x - 20) # 20 ์๋ก ์์ง์
# [4] Snake์ ๋ฐฉํฅ ๋ฐ๊พธ๊ธฐ
def up():
global direction #
if direction != "down":
direction = "up"
def down():
global direction
if direction != "up":
direction = "down"
def right():
global direction
if direction != "left":
direction = "right"
def left():
global direction
if direction != "right":
direction = "left"
wn.listen() # ์ปดํจํฐ (turtle)์ด keypress์ ๋ฐ์ํ๋๋ก ํจ.
wn.onkey(up, "Up")
wn.onkey(down, 'Down')
wn.onkey(left, "Left")
wn.onkey(right, 'Right')
# [5] food
food = turtle.Turtle()
food.speed(0)
food.shape("circle")
food.color('red')
food.up()
x = random.randint(-290, 290)
y = random.randint(-290, 290)
food.goto(x, y)
# [9] score
score = 0
pen = turtle.Turtle()
pen.color("white")
pen.hideturtle()
pen.up()
pen.goto(0, 250)
# [6] ๋์ด๋๊ฒ ๋ฐ๊พธ๊ธฐ
tails = []
# main loop
while True:
if head.distance(food) < 15:
x = random.randint(-290, 290)
y = random.randint(-290, 290)
food.goto(x, y)
score += 10
# [5] ๋์ด๋๊ฒ ๋ฐ๊พธ๊ธฐ
new_tail = turtle.Turtle()
new_tail.speed(0)
new_tail.shape("square")
new_tail.color("white")
new_tail.up()
tails.append(new_tail)
# [8] ๋ชธํต ์ถฉ๋
#for i in range(1, len(tails) - 1):
for tail in tails:
if tail.distance(head) < 15:
time.sleep(1)
head.goto(0, 0)
direction = "stop"
# ๋งจ ๋ค์์๋ถํฐ loop, ๋์ ์๋ ๊ฒ๋ค์ ํ๋์ฉ ์์ผ๋ก ๋น๊ฒจ์์ผ ํจ.
if len(tails) > 0:
for index in range(len(tails) - 1, 0, -1):
x = tails[index-1].xcor()
y = tails[index-1].ycor()
tails[index].goto(x, y)
x = head.xcor()
y = head.ycor()
tails[0].goto(x, y)
# ๋งจ ์ฒ์๊บผ๋ head์ ์๋ฆฌ๋ก
# [7] ๋ฒฝ ์ถฉ๋
if (head.xcor() > 290 or head.xcor() < -290
or head.ycor() > 290 or head.ycor() < -290):
time.sleep(1)
head.goto(0, 0)
direction = "stop"
if direction == "stop":
score = 0
for tail in tails:
tail.hideturtle()
tails = []
pen.clear()
pen.write(f"Score: {score}", align = 'center', font = ("Courier", 20))
move()
wn.update()
time.sleep(0.1) # 0.1์ด๋ง๋ค ์
๋ฐ์ดํธ
|
the-stack_106_28616 | import mal_types as mal
class MalEnv():
"""Mal environment class.
An environment mapping symbols to Mal objects. Note that the symbols should
be strings, not Mal objects, although the initializer accepts both strings
naming symbols and mal_types.Symbol.
"""
def __init__(self, outer=None, data=None, binds=[], exprs=[]):
self.outer = outer
if data is None:
self.data = {}
else:
self.data = data
for i in range(len(binds)):
if type(binds[i]) is mal.Symbol:
sym = binds[i].name
else:
sym = binds[i]
if sym == '&':
sym = binds[i + 1]
val = mal.List(list(exprs)[i:])
self.set(sym, val)
break
else:
if i < len(exprs):
val = exprs[i]
else:
val = mal.NIL
self.set(sym, val)
def set(self, symbol, value):
if type(symbol) is mal.Symbol:
symbol = symbol.name
if type(symbol) is str:
self.data[symbol] = value
return value
else:
return mal.Error("TypeError", "Cannot bind to non-symbol")
def find(self, symbol):
if type(symbol) is mal.Symbol:
symbol = symbol.name
if symbol in self.data:
return self
elif self.outer is None:
return None
else:
return self.outer.find(symbol)
def get(self, symbol):
if type(symbol) is mal.Symbol:
symbol = symbol.name
env = self.find(symbol)
if env:
return env.data[symbol]
else:
return mal.Error("SymbolError",
"Symbol value is void: '{}'".format(symbol))
|
the-stack_106_28620 | import logging
from datetime import datetime
from django.conf import settings
from django import forms
from django.core.mail import mail_admins
from django.template.loader import render_to_string
from .models import get_gsheets_client
log = logging.getLogger(__name__)
class SuggestionForm(forms.Form):
ward_id = forms.CharField(widget=forms.HiddenInput())
councillor_name = forms.CharField(label='Councillor name', required=False)
councillor_email = forms.EmailField(label='Councillor email address', required=False)
councillor_phone = forms.CharField(label='Councillor phone number', required=False)
email = forms.CharField(label="Your email address", required=False)
# honeypot, if this is filled in it's probably spam
website = forms.CharField(label='Leave this blank', required=False)
def save(self, request):
# check for honey pot, if this is filled in, ignore the submission
if self.cleaned_data['website']:
log.info(f"Honeypot not empty, ignoring spammy submission: {self.cleaned_data}")
return
sheets = get_gsheets_client()
spreadsheet = sheets.open_by_key(settings.GOOGLE_SHEETS_SHEET_KEY)
worksheet = spreadsheet.worksheet('Suggestions')
log.info(f"Saving suggestion: {self.cleaned_data}")
worksheet.append_row([
datetime.now().isoformat(),
self.cleaned_data['ward_id'],
self.cleaned_data['councillor_name'],
self.cleaned_data['councillor_phone'],
self.cleaned_data['councillor_email'],
self.cleaned_data['email'],
request.META.get('HTTP_USER_AGENT', ''),
request.META.get('HTTP_X_FORWARDED_FOR', ''),
])
log.info("Saved")
log.info("Sending email")
mail_admins('New Ward Councillor Suggestion', '',
html_message=render_to_string('councillor/suggestion_email.html', self.cleaned_data))
log.info("Sent")
|
the-stack_106_28621 | import numpy as np
import pandas as pd
df = pd.read_csv('tests/bugs/issue_19/issue_19_data_1.csv')
import datetime
def convert_date(x):
y = np.nan
try:
y = datetime.datetime.strptime(str(x), "%Y")
except:
# bad format
pass
return y
df['date'] = df['date'].apply(convert_date)
df_train = df[['date' , 'number']].dropna().reset_index(drop=True)
print(df_train)
import pyaf.ForecastEngine as autof
lEngine = autof.cForecastEngine()
lEngine.train(iInputDS = df_train, iTime = 'date', iSignal = 'number', iHorizon = 7);
print(lEngine.getModelInfo())
# lEngine.standardPlots('outputs/tour')
df_forecast = lEngine.forecast(iInputDS = df_train, iHorizon = 7)
print(df_forecast.columns)
print(df_forecast[['date', 'number_Forecast', 'number_Forecast_Lower_Bound', 'number_Forecast_Upper_Bound']].tail(7))
|
the-stack_106_28622 |
from __future__ import print_function
import os
import platform
import sys
from mvIMPACT import acquire
from mvIMPACT.Common import exampleHelper
import ctypes
import numpy as np
import datetime as dt
import matplotlib
from LEDDriver import detect_LED_devices, LEDDriver, LEDException
from spectracular.fpi_driver import detectFPIDevices, createFPIDevice
import fpipy as fp
import fpipy.conventions as c
import xarray as xr
from tqdm.autonotebook import tqdm, trange
# Argument count
argc = len(sys.argv)
print("Argument count: ", argc)
# Arguments passed
for i in range(1, argc):
print(sys.argv[i], end = " ")
print("")
if argc == 1:
exposureTime = "60000"
print("No exposure time argument given! Using default 60000")
else:
exposureTime = sys.argv[1]
print("Exposure time given as argument: ", exposureTime)
print("Using exposure time: ", exposureTime)
print("Exposure time converted to string: ", str(exposureTime))
#-----------------------------------------
# Camera
#-----------------------------------------
devMgr = acquire.DeviceManager()
pDev = exampleHelper.getDeviceFromUserInput(devMgr)
if pDev == None:
exampleHelper.requestENTERFromUser()
sys.exit(-1)
pDev.open()
#
# Set camera settings
#
ac = acquire.AcquisitionControl(pDev)
# print("Old TriggerMode:")
# print(ac.triggerMode.readS())
# print("New TriggerMode:")
# ac.triggerMode.writeS("On")
# print(ac.triggerMode.readS())
# print("Old TriggerSource:")
# print(ac.triggerSource.readS())
# print("New TriggerSource:")
# ac.triggerSource.writeS("Software")
# print(ac.triggerSource.readS())
print("Old ExposureAuto:")
print(ac.exposureAuto.readS())
print("New ExposureAuto:")
ac.exposureAuto.writeS("Off")
print(ac.exposureAuto.readS())
ifc = acquire.ImageFormatControl(pDev)
print("Old pixelformat:")
print(ifc.pixelFormat.readS())
print("New pixelformat:")
ifc.pixelFormat.writeS("BayerGB12")
# ifc.pixelFormat.writeS("RGB8")
print(ifc.pixelFormat.readS())
print("Old pixelColorFilter:")
print(ifc.pixelColorFilter.readS())
imgp = acquire.ImageProcessing(pDev)
# "Auto" originally
print("Old colorProcessing:")
print(imgp.colorProcessing.readS())
imgp.colorProcessing.writeS("Raw")
print("New colorProcessing:")
print(imgp.colorProcessing.readS())
print("Old ExposureTime:")
print(ac.exposureTime.readS())
print("New ExposureTime:")
# ac.exposureTime.writeS("150000")
# ac.exposureTime.writeS("60000")
ac.exposureTime.writeS(str(exposureTime))
print(ac.exposureTime.readS())
anlgc = acquire.AnalogControl(pDev)
print("Old BalanceWhiteAuto:")
print(anlgc.balanceWhiteAuto.readS())
print("New BalanceWhiteAuto:")
anlgc.balanceWhiteAuto.writeS("Off")
print(anlgc.balanceWhiteAuto.readS())
print("Old Gamma:")
print(anlgc.gamma.readS())
print("New Gamma:")
anlgc.gamma.writeS("1")
print(anlgc.gamma.readS())
print("Old Gain:")
print(anlgc.gain.readS())
print("New Gain:")
anlgc.gain.writeS("1.9382002601")
print(anlgc.gain.readS())
print("Old GainAuto:")
print(anlgc.gainAuto.readS())
print("New GainAuto:")
anlgc.gainAuto.writeS("Off")
print(anlgc.gainAuto.readS())
# -----------------------------------------
# Test
# -----------------------------------------
#
# Taking image
#
fi = acquire.FunctionInterface(pDev)
fi.imageRequestSingle()
exampleHelper.manuallyStartAcquisitionIfNeeded(pDev, fi)
requestNr = fi.imageRequestWaitFor(10000)
# Add this from SingleCapture.cpp
exampleHelper.manuallyStopAcquisitionIfNeeded(pDev, fi)
if fi.isRequestNrValid(requestNr):
print("Request number valid!")
pRequest = fi.getRequest(requestNr)
print("Print request: " + str(pRequest))
print("Print request result: " + str(pRequest.requestResult))
print("Print request result readS: " + pRequest.requestResult.readS())
if pRequest.isOK:
print("Request OK!")
height = pRequest.imageHeight.read()
width = pRequest.imageWidth.read()
channelCount = pRequest.imageChannelCount.read()
channelBitDepth = pRequest.imageChannelBitDepth.read()
imageSize = pRequest.imageSize.read()
print("Image height: " + str(height))
print("Image width: " + str(width))
print("Image channel count: " + str(channelCount))
print("Image channel bit depth: " + str(channelBitDepth))
print("Image size: " + str(imageSize))
# For systems with NO mvDisplay library support
cbuf = (ctypes.c_char * pRequest.imageSize.read()).from_address(int(pRequest.imageData.read()))
print(cbuf)
channelType = np.uint16 if channelBitDepth > 8 else np.uint8
arr = np.fromstring(cbuf, dtype = channelType)
arr.shape = (height, width, channelCount)
print(arr)
# print("Start saving PNG image...")
# matplotlib.image.imsave('testimage.png', arr)
fi.imageRequestUnlock(requestNr)
exampleHelper.manuallyStopAcquisitionIfNeeded(pDev, fi)
#-----------------------------------------
# LED driver
#-----------------------------------------
LED_IDS = [
# ( VID, PID) (and the same in decimal)
('1FC9', '0083'), (8137, 131),
]
"""Known VID:PID pairs of LED devices."""
LED_HWIDS = [
# Strings returned by read_hardware_id
'1000e016 aefba123 580267dd f5001982',
'10025018 af28a028 5a66a511 f5001983'
]
ledportdevice = detect_LED_devices()
ledportstring = '/dev/ttyACM0'
print('Trying to use ' + ledportstring + ' for LED control')
# led = LEDDriver('/dev/ttyACM0')
# led = LEDDriver('COM10')
led = LEDDriver(ledportstring)
print(led)
led.open()
print('Turning off LEDs')
led.L(0)
#-----------------------------------------
# MFPI
#-----------------------------------------
FPI_IDS = [
# ( VID, PID) (and the same in decimal)
('1FC9', '0083'), (8137, 131),
]
"""Known VID:PID pairs of FPI devices."""
FPI_HWIDS = [
# Strings returned by read_hardware_id
'd02b012 af380065 5b5bbeab f50019c1'
]
print('Trying to create FPI device')
fpi = createFPIDevice(detectFPIDevices(FPI_IDS, FPI_HWIDS)[0].device)
print(fpi)
# ------------------------------------------
# camazing.pixelformats
# ------------------------------------------
class PixelFormatError(Exception):
pass
def get_valid_range(pxformat):
"""Return the valid range of values for a given pixel format.
Parameters
----------
pxformat: str
Pixel format as given by cameras GenICam PixelFormat feature.
Returns
------
np.array
A vector of [min_value, max_value] with the same type as the decoded
pixel format.
"""
try:
valid_range = _ranges[pxformat]
except KeyError:
raise PixelFormatError(f'No range found for the pixel format `{pxformat}')
return valid_range
def get_decoder(pxformat):
"""Return a numpy decoder for a given GenICam pixel format.
Parameters
----------
pxformat: str
Pixel format as given by cameras PixelFormat.
Returns
-------
decoder: function
Function for decoding a buffer
"""
try:
decoder = _decoders[pxformat]
except KeyError:
raise PixelFormatError(f'No decoder for the pixel format `{pxformat}`')
return decoder
def decode_raw(dtype):
"""Decode raw buffer with a given bit depth."""
def decode(buf, shape):
return np.frombuffer(
buf,
dtype=dtype
).reshape(*shape).copy()
return decode
def decode_RGB(bpp):
"""Decode RGB buffer with a given bit depth."""
def decode(buf, shape):
return np.frombuffer(
buf,
dtype=bpp,
).reshape(*shape, 3).copy()
return decode
def decode_YCbCr422_8():
"""Decode YCbCr422 buffer with given bit depth."""
raise NotImplementedError
_decoders = {
'BayerRG8': decode_raw(np.uint8),
'BayerGB8': decode_raw(np.uint8),
'BayerGB12': decode_raw(np.uint16),
'BayerRG12': decode_raw(np.uint16),
'BayerRG16': decode_raw(np.uint16),
'RGB8': decode_RGB(np.uint8),
'Mono8': decode_raw(np.uint8),
'Mono16': decode_raw(np.uint16),
}
_ranges = {
'BayerRG8': np.uint8([0, 255]),
'BayerGB8': np.uint8([0, 255]),
'BayerGB12': np.uint16([0, 4095]),
'BayerRG12': np.uint16([0, 4095]),
'BayerRG16': np.uint16([0, 65535]),
'RGB8': np.uint8([0, 255]),
'Mono8': np.uint8([0, 255]),
'Mono16': np.uint16([0, 65535]),
}
# ------------------------------------------
# camazing.core
# ------------------------------------------
class DanielCamera:
def __init__(self, pDev):
self._meta = None
self._pDev = pDev
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
print("Exit DanielCamera")
def _get_frame(self, timeout=1):
"""Helper function"""
self._pixel_format = "BayerGB12"
self._buffer_decoder = get_decoder(self._pixel_format)
self._image_range = get_valid_range(self._pixel_format)
# data = self._buffer_decoder(buffer.raw_buffer, (height, width))
#------------------------
# Take frame
#------------------------
self._fi = acquire.FunctionInterface(pDev)
self._fi.imageRequestSingle()
exampleHelper.manuallyStartAcquisitionIfNeeded(self._pDev, self._fi)
requestNr = self._fi.imageRequestWaitFor(20000)
exampleHelper.manuallyStopAcquisitionIfNeeded(self._pDev, self._fi)
data = []
if self._fi.isRequestNrValid(requestNr):
print("Request number valid! " + str(requestNr))
pRequest = self._fi.getRequest(requestNr)
print("Print request: " + str(pRequest))
print("Print request result: " + str(pRequest.requestResult))
print("Print request result readS: " + pRequest.requestResult.readS())
if pRequest.isOK:
print("Request OK!")
height = pRequest.imageHeight.read()
width = pRequest.imageWidth.read()
channelCount = pRequest.imageChannelCount.read()
channelBitDepth = pRequest.imageChannelBitDepth.read()
imageSize = pRequest.imageSize.read()
print("Image height: " + str(height))
print("Image width: " + str(width))
print("Image channel count: " + str(channelCount))
print("Image channel bit depth: " + str(channelBitDepth))
print("Image size: " + str(imageSize))
cbuf = (ctypes.c_char * pRequest.imageSize.read()).from_address(int(pRequest.imageData.read()))
# Check if this is now correct buffer format!
# Convert with numpy if needed
data = self._buffer_decoder(cbuf, (height, width))
print("Data from buffer_decoder()")
print(data)
self._fi.imageRequestUnlock(requestNr)
else:
print("imageRequestWaitFor failed (" + str(requestNr) + ", " + acquire.ImpactAcquireException.getErrorCodeAsString(requestNr) + ")")
exampleHelper.manuallyStopAcquisitionIfNeeded(self._pDev, self._fi)
return data
def _get_frame_with_meta(self):
"""Fetch a frame and add metadata from the camera."""
data = self._get_frame()
print("Data from _get_frame(): ")
print(data)
height, width = data.shape[0], data.shape[1]
coords = {
"x": ("x", np.arange(0, width) + 0.5),
"y": ("y", np.arange(0, height) + 0.5),
"timestamp": dt.datetime.today().timestamp(),
}
if 'RGB' in self._pixel_format:
dims = ('y', 'x', 'colour')
coords['colour'] = list('RGB')
elif 'YUV' in self._pixel_format:
dims = ('y', 'x', 'colour')
coords['colour'] = list('YUV')
elif 'YCbCr' in self._pixel_format:
dims = ('y', 'x', 'colour')
coords['colour'] = ['Y', 'Cb', 'Cr']
else:
dims = ('y', 'x')
# Keep some meta by default, if available
# self._meta = []
# for feature in ['Gain', 'ExposureTime', 'PixelFormat', 'PixelColorFilter']:
# if feature in self._features:
# self._meta.append(feature)
# Add metadata as coordinates
# if self._meta:
# coords.update({k: self._features[k].value for k in self._meta})
# Replace these hard-coded values by reading from camera!
coords['Gain'] = "1.9382002601"
coords['ExposureTime'] = 150000
coords['PixelFormat'] = "BayerGB12"
coords['PixelColorFilter'] = "BayerGB"
frame = xr.DataArray(
data,
name="frame",
dims=dims,
coords=coords,
attrs={
'valid_range': self._image_range,
}
)
return frame
def get_frame(self):
return self._get_frame_with_meta()
# ------------------------------------------
# HSI
# ------------------------------------------
class CaptureException(Exception):
pass
class HSI:
"""Hyperspectral imager"""
def __init__(self, camera=None, fpi=None):
self.camera = camera
self.fpi = fpi
self.dataset = None
self.calibration_file = None
def read_calibration_file(self, calibration_file):
self.dataset = fp.io.read_calibration(calibration_file)
self.calibration_file = calibration_file
def take_dark_reference(self, number_of_frames=40, method="median"):
self.read_calibration_file(self.calibration_file)
# original_trigger_source = self.camera["TriggerSource"].value
# self.camera["TriggerSource"].value = "Software"
frames = []
with self.camera:
for idx in trange(0, number_of_frames):
frame = self.camera.get_frame()
frame.coords[c.image_index] = idx
frames.append(frame)
# self.camera["TriggerSource"].value = original_trigger_source
dark = xr.concat(frames, dim=c.image_index)
if method == "median":
dark = dark.median(dim=c.image_index)
elif method == "mean":
dark = dark.mean(dim=c.image_index)
else:
raise ValueError("Unknown method: '" + method)
self.dataset[c.dark_reference_data] = dark
return dark
def capture_cube(self, *, selectors=None):
if selectors is None:
dataset = self.dataset.copy()
else:
dataset = self.dataset.sel(**selectors).copy()
frames = []
# if self.camera["TriggerSource"].value == "Software":
with self.camera:
for idx in tqdm(dataset[c.image_index].values):
setpoint = dataset[c.setpoint_data].sel(
**{c.setpoint_coord: "SP1",
c.image_index: idx,
}).values
self.fpi.set_setpoint(setpoint, wait=True)
frame = self.camera.get_frame()
frame.coords[c.image_index] = idx
frames.append(frame)
# else:
# with self.camera:
# self.create_fpi_taskfile(dataset)
# self.camera["StrobeDuration"].value = \
# self.camera["ExposureTime"].value
# self.fpi.run_taskfile()
# for idx, setpoint in enumerate(tqdm(
# dataset.setpoint.sel(setpoint_index="SP1").values)):
# frame = self.camera.get_frame()
# frame.coords[c.image_index] = idx
# frames.append(frame)
dataset[c.cfa_data] = xr.concat(frames, dim=c.image_index)
return dataset
def create_fpi_taskfile(dataset):
raise NotImplementedError()
danielCam = DanielCamera(pDev)
print(danielCam)
hsi = HSI(danielCam, fpi)
print(hsi)
hsi.read_calibration_file('led_set_g_calib_1.txt')
input("Put the lens cap on")
hsi.take_dark_reference()
print(hsi.dataset.dark)
input("Take the lens cap off and set white reference")
print('Turning on LEDs')
# VNIR1 and VNIR2
#
# 810.0078184
# 848.0322309
#
# 000011110000011110000011110
# * Reverse for LED control:
# 011110000011110000011110000
#
led.L(0b011110000011110000011110000)
print('Capturing white reference')
white_raw = hsi.capture_cube()
input("Set image (only for radiance)")
print('Capturing cube')
raw = hsi.capture_cube()
print(raw)
print('Turning off LEDs')
led.L(0)
print('Calculating radiance')
rad = fp.raw_to_radiance(raw, keep_variables=['dark'])
print(rad)
print(rad['radiance'])
print('Calculating white radiance')
rad['white'] = fp.raw_to_radiance(white_raw, keep_variables = []).radiance
print(rad['white'])
print('Calculating reflectance')
rad['reflectance'] = rad.radiance / rad.white
print(rad['reflectance'])
# reflectance = fp.radiance_to_reflectance(rad, white_raw, keep_variables=[])
# print(reflectance)
print('Extracting single frame from cube and saving to PNG')
test = rad["radiance"]
print('Radiance data')
testdata = test.data
print(testdata)
print('White data')
whitedata = rad['white'].data
print(whitedata)
print('Reflectance data')
reflectdata = rad['reflectance'].data
print(reflectdata)
print ("Wavelengths")
wavelengths = rad["wavelength"].data
print(wavelengths)
print ("Wavelengths count")
wavelengthCount = len(wavelengths)
print(wavelengthCount)
# Multiple peaks result in multiple of single calib file row count
imagelastindex = wavelengthCount
#
# Save radiance images
#
print('Start saving radiance images')
for x in range(0, imagelastindex):
wavelengthValue = wavelengths[x]
wavelengthStr = str(wavelengthValue)
wavelengthReplacedStr = wavelengthStr.replace(".", "p")
print('Saving wavelength: ' + wavelengthStr)
rad1 = testdata[:,:,x]
matplotlib.image.imsave('rad_' + wavelengthReplacedStr + 'nm_' + str(x) + '_exp_' + exposureTime + '.png', rad1, cmap='gray')
white1 = whitedata[:,:,x]
# matplotlib.image.imsave('white_' + wavelengthReplacedStr + 'nm_' + str(x) + '.png', white1, cmap='gray')
ref1 = reflectdata[:,:,x]
matplotlib.image.imsave('refl_' + wavelengthReplacedStr + 'nm_' + str(x) + '_exp_' + exposureTime + '.png', ref1, cmap='gray', vmin=0,vmax=1)
# import matplotlib.pyplot as plt
# plt.gray()
#
# Save raw images and demosaic images
#
# print('Start saving raw data')
# for x in range(1, 2):
# Raw data values
# dn1 = raw.dn.isel(index=x)
# matplotlib.image.imsave('raw_' + str(x) + '.png', dn1)
# Demosaic to get three colour channels
# dm1 = fp.demosaic(dn1, 'BayerGB', 'bilinear')
# dm1_red = dm1[:,:,0]
# dm1_green = dm1[:,:,1]
# dm1_blue = dm1[:,:,2]
# matplotlib.image.imsave('raw_' + str(x) + '_demosaic_red.png', dm1_red)
# matplotlib.image.imsave('raw_' + str(x) + '_demosaic_green.png', dm1_green)
# matplotlib.image.imsave('raw_' + str(x) + '_demosaic_blue.png', dm1_blue)
# fi.acquisitionStart()
# self["TriggerSoftware"].execute()
# acquire.TriggerControl.triggerSoftware()
# fi.acquisitionStop()
|
the-stack_106_28625 | import logging
import os
import sys
import warnings
from django.apps import AppConfig
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
log = logging.getLogger('z.startup')
class CoreConfig(AppConfig):
name = 'olympia.core'
verbose_name = _('Core')
def ready(self):
super(CoreConfig, self).ready()
# Ignore Python warnings unless we're running in debug mode.
if not settings.DEBUG:
warnings.simplefilter('ignore')
self.set_recursion_limit()
self.enable_urllib_certificate_checking()
def enable_urllib_certificate_checking(self):
# From requests's packages/urllib3/contrib/pyopenssl.py
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
def set_recursion_limit(self):
"""Set explicit recursion limit if set in the environment.
This is set here to make sure we're setting it always
when we initialize Django, also when we're loading celery (which
is calling django.setup too).
This is only being used for the amo-validator so initializing this late
should be fine.
"""
if 'RECURSION_LIMIT' in os.environ:
try:
limit = int(os.environ['RECURSION_LIMIT'])
except TypeError:
log.warning('Unable to parse RECURSION_LIMIT "{}"'.format(
os.environ['RECURSION_LIMIT']))
else:
sys.setrecursionlimit(limit)
log.info('Set RECURSION_LIMIT to {}'.format(limit))
|
the-stack_106_28626 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import os
import shutil
import numpy as np
from dmriqcpy.io.report import Report
from dmriqcpy.viz.graph import graph_tractogram
from dmriqcpy.analysis.stats import stats_tractogram
from dmriqcpy.viz.screenshot import screenshot_tracking
from dmriqcpy.viz.utils import analyse_qa, dataframe_to_html
from dmriqcpy.io.utils import add_overwrite_arg, assert_inputs_exist,\
assert_outputs_exist
DESCRIPTION = """
Compute the tractogram report in HTML format.
"""
def _build_arg_parser():
p = argparse.ArgumentParser(description=DESCRIPTION,
formatter_class=argparse.RawTextHelpFormatter)
p.add_argument('output_report',
help='HTML report')
p.add_argument('--tractograms', nargs='+',
help='Tractograms in format supported by Nibabel')
p.add_argument('--t1', nargs='+',
help='T1 images in Nifti format')
add_overwrite_arg(p)
return p
def main():
parser = _build_arg_parser()
args = parser.parse_args()
if not len(args.tractograms) == len(args.t1):
parser.error("Not the same number of images in input.")
all_images = np.concatenate([args.tractograms, args.t1])
assert_inputs_exist(parser, all_images)
assert_outputs_exist(parser, args, [args.output_report, "data", "libs"])
if os.path.exists("data"):
shutil.rmtree("data")
os.makedirs("data")
if os.path.exists("libs"):
shutil.rmtree("libs")
name = "Tracking"
columns = ["Nb streamlines"]
warning_dict = {}
summary, stats = stats_tractogram(columns, args.tractograms)
warning_dict[name] = analyse_qa(summary, stats, ["Nb streamlines"])
warning_list = np.concatenate([filenames for filenames in warning_dict[name].values()])
warning_dict[name]['nb_warnings'] = len(np.unique(warning_list))
graphs = []
graph = graph_tractogram("Tracking", columns, summary)
graphs.append(graph)
summary_dict = {}
stats_html = dataframe_to_html(stats)
summary_dict[name] = stats_html
metrics_dict = {}
subjects_dict = {}
for subj_metric, t1 in zip(args.tractograms, args.t1):
screenshot_path = screenshot_tracking(subj_metric, t1, "data")
summary_html = dataframe_to_html(summary.loc[subj_metric])
subjects_dict[subj_metric] = {}
subjects_dict[subj_metric]['screenshot'] = screenshot_path
subjects_dict[subj_metric]['stats'] = summary_html
metrics_dict[name] = subjects_dict
nb_subjects = len(args.tractograms)
report = Report(args.output_report)
report.generate(title="Quality Assurance tractograms",
nb_subjects=nb_subjects, summary_dict=summary_dict,
graph_array=graphs, metrics_dict=metrics_dict,
warning_dict=warning_dict)
if __name__ == '__main__':
main()
|
the-stack_106_28627 | import torch
import torch.nn as nn
import os
import glob
class Model(nn.Module):
def __init__(self, name):
super(Model, self).__init__()
self.name = name
def save(self, path, epoch=0):
complete_path = os.path.join(path, self.name)
if not os.path.exists(complete_path):
os.makedirs(complete_path)
torch.save(self.state_dict(),
os.path.join(complete_path,
"model-{}.pth".format(str(epoch).zfill(5))))
def save_results(self, path, data):
raise NotImplementedError("Model subclass must implement this method.")
def load(self, path, modelfile=None):
complete_path = os.path.join(path, self.name)
if not os.path.exists(complete_path):
raise IOError("{} directory does not exist in {}".format(self.name, path))
if modelfile is None:
model_files = glob.glob(complete_path+"/*")
mf = max(model_files)
else:
mf = os.path.join(complete_path, modelfile)
self.load_state_dict(torch.load(mf))
|
the-stack_106_28629 | import math
import numpy as np
from common.numpy_fast import interp, clip
from common.realtime import sec_since_boot
from selfdrive.modeld.constants import T_IDXS
from selfdrive.controls.lib.radar_helpers import _LEAD_ACCEL_TAU
from selfdrive.controls.lib.lead_mpc_lib import libmpc_py
from selfdrive.controls.lib.drive_helpers import MPC_COST_LONG, CONTROL_N
from selfdrive.swaglog import cloudlog
from selfdrive.config import Conversions as CV
CRUISE_GAP_BP = [1., 2., 3., 4.]
CRUISE_GAP_V = [1.3, 1.6, 2.1, 2.7]
AUTO_TR_BP = [3., 11.1, 19.4, 30.]
AUTO_TR_V = [1.0, 1.05, 1.35, 1.88]
AUTO_TR_ENABLED = True
AUTO_TR_CRUISE_GAP = 4
MPC_T = list(np.arange(0,1.,.2)) + list(np.arange(1.,10.6,.6))
class LeadMpc():
def __init__(self, mpc_id):
self.lead_id = mpc_id
self.reset_mpc()
self.prev_lead_status = False
self.prev_lead_x = 0.0
self.new_lead = False
self.last_cloudlog_t = 0.0
self.n_its = 0
self.duration = 0
self.status = False
def reset_mpc(self):
ffi, self.libmpc = libmpc_py.get_libmpc(self.lead_id)
self.libmpc.init(MPC_COST_LONG.TTC, MPC_COST_LONG.DISTANCE,
MPC_COST_LONG.ACCELERATION, MPC_COST_LONG.JERK)
self.mpc_solution = ffi.new("log_t *")
self.cur_state = ffi.new("state_t *")
self.cur_state[0].v_ego = 0
self.cur_state[0].a_ego = 0
self.a_lead_tau = _LEAD_ACCEL_TAU
def set_cur_state(self, v, a):
v_safe = max(v, 1e-3)
a_safe = a
self.cur_state[0].v_ego = v_safe
self.cur_state[0].a_ego = a_safe
def update(self, CS, radarstate, v_cruise):
v_ego = CS.vEgo
if self.lead_id == 0:
lead = radarstate.leadOne
else:
lead = radarstate.leadOne
self.status = lead.status and lead.modelProb > .5
# Setup current mpc state
self.cur_state[0].x_ego = 0.0
if lead is not None and lead.status:
x_lead = lead.dRel
v_lead = max(0.0, lead.vLead)
a_lead = lead.aLeadK
if (v_lead < 0.1 or -a_lead / 2.0 > v_lead):
v_lead = 0.0
a_lead = 0.0
self.a_lead_tau = lead.aLeadTau
self.new_lead = False
if not self.prev_lead_status or abs(x_lead - self.prev_lead_x) > 2.5:
self.libmpc.init_with_simulation(v_ego, x_lead, v_lead, a_lead, self.a_lead_tau)
self.new_lead = True
self.prev_lead_status = True
self.prev_lead_x = x_lead
self.cur_state[0].x_l = x_lead
self.cur_state[0].v_l = v_lead
else:
self.prev_lead_status = False
# Fake a fast lead car, so mpc keeps running
self.cur_state[0].x_l = 50.0
self.cur_state[0].v_l = v_ego + 10.0
a_lead = 0.0
self.a_lead_tau = _LEAD_ACCEL_TAU
cruise_gap = int(clip(CS.cruiseGap, 1., 4.))
if AUTO_TR_ENABLED and cruise_gap == AUTO_TR_CRUISE_GAP:
TR = interp(v_ego, AUTO_TR_BP, AUTO_TR_V)
else:
TR = interp(float(cruise_gap), CRUISE_GAP_BP, CRUISE_GAP_V)
# Calculate mpc
t = sec_since_boot()
self.n_its = self.libmpc.run_mpc(self.cur_state, self.mpc_solution, self.a_lead_tau, a_lead, TR)
self.v_solution = interp(T_IDXS[:CONTROL_N], MPC_T, self.mpc_solution.v_ego)
self.a_solution = interp(T_IDXS[:CONTROL_N], MPC_T, self.mpc_solution.a_ego)
self.duration = int((sec_since_boot() - t) * 1e9)
# Reset if NaN or goes through lead car
crashing = any(lead - ego < -50 for (lead, ego) in zip(self.mpc_solution[0].x_l, self.mpc_solution[0].x_ego))
nans = any(math.isnan(x) for x in self.mpc_solution[0].v_ego)
backwards = min(self.mpc_solution[0].v_ego) < -0.01
if ((backwards or crashing) and self.prev_lead_status) or nans:
if t > self.last_cloudlog_t + 5.0:
self.last_cloudlog_t = t
cloudlog.warning("Longitudinal mpc %d reset - backwards: %s crashing: %s nan: %s" % (
self.lead_id, backwards, crashing, nans))
self.libmpc.init(MPC_COST_LONG.TTC, MPC_COST_LONG.DISTANCE,
MPC_COST_LONG.ACCELERATION, MPC_COST_LONG.JERK)
self.cur_state[0].v_ego = v_ego
self.cur_state[0].a_ego = 0.0
self.a_mpc = CS.aEgo
self.prev_lead_status = False
|
the-stack_106_28630 | # Copyright 2021 Collate
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Default simple profiler to use
"""
from sqlalchemy.orm.attributes import InstrumentedAttribute
from sqlalchemy.orm.session import Session
from metadata.orm_profiler.metrics.registry import Metrics
from metadata.orm_profiler.profiles.core import SingleProfiler
class SimpleProfiler(SingleProfiler):
"""
Pre-built profiler with a simple
set of metrics that we can use as
a default.
"""
def __init__(self, session: Session, col: InstrumentedAttribute, table):
_metrics = [
Metrics.MIN(col),
Metrics.COUNT(col),
Metrics.STDDEV(col),
Metrics.NULL_COUNT(col),
Metrics.NULL_RATIO(col),
]
super().__init__(*_metrics, session=session, table=table)
class SimpleTableProfiler(SingleProfiler):
"""
Default set of table metrics to run
"""
def __init__(self, session: Session, table):
_metrics = [Metrics.ROW_NUMBER()]
super().__init__(*_metrics, session=session, table=table)
|
the-stack_106_28632 | import typing
from argparse import Namespace
from kgtk.cli_argparse import KGTKArgumentParser, KGTKFiles
def parser():
return {
'help': 'Split a sorted KGTK edge file into multiple byte sized files',
'description': 'split a sorted KGTK edge file into smaller files, keeping the Qnode'
'boundaries intact. Helpful in parallel processing and debugging.'
}
def add_arguments_extended(parser: KGTKArgumentParser, parsed_shared_args: Namespace):
"""
Parse arguments
Args:
parser (argparse.ArgumentParser)
"""
from kgtk.value.kgtkvalueoptions import KgtkValueOptions
from kgtk.io.kgtkreader import KgtkReader, KgtkReaderOptions
_expert: bool = parsed_shared_args._expert
parser.add_input_file()
parser.add_argument('--output-path', action='store', type=str, dest="output_path", required=True,
help="Path of an existing folder where the split files will be written")
parser.add_argument('--file-prefix', action='store', type=str, default='split_', dest='file_prefix', required=False,
help="file name prefix, will be appended to output file names before a number")
parser.add_argument('--split-by-qnode', default=False, action="store_true", dest='split_by_qnode',
help="If specified, all edges for a qnode will be written to a separate file, "
"qnode will be added to the file name. WARNING: If there are millions of Qnodes, "
"this option will create millions of file."
" Default [FALSE]")
parser.add_argument('--gzipped-output', default=False, action="store_true", dest='gzipped_output',
help="If specified, the output split files will be gzipped. Default FALSE")
parser.add_argument('--lines', action='store', dest='lines', type=int, default=1000000, required=False,
help="number of lines in each split file. The actual number of lines will exceed this number, "
"since Qnode boundaries are preserved.")
KgtkReader.add_debug_arguments(parser, expert=_expert)
KgtkReaderOptions.add_arguments(parser, mode_options=True, expert=_expert)
KgtkValueOptions.add_arguments(parser, expert=_expert)
def run(input_file: KGTKFiles,
output_path: str,
file_prefix: str,
split_by_qnode: bool,
lines: int,
gzipped_output: bool,
errors_to_stdout: bool = False,
**kwargs
) -> int:
import sys
from pathlib import Path
from kgtk.io.kgtkwriter import KgtkWriter
from kgtk.exceptions import KGTKException
from kgtk.io.kgtkreader import KgtkReader, KgtkReaderOptions
from kgtk.value.kgtkvalueoptions import KgtkValueOptions
def write_files(error_file, file_number, file_prefix, kr, lines_to_write, output_path, Qnode, reader_options,
split_by_qnode, suffix):
if split_by_qnode:
output_kgtk_file = Path(f'{output_path}/{Qnode}{suffix}')
else:
output_kgtk_file = Path(f'{output_path}/{file_prefix}{file_number}{suffix}')
kw = KgtkWriter.open(kr.column_names,
output_kgtk_file,
mode=KgtkWriter.Mode[kr.mode.name],
use_mgzip=reader_options.use_mgzip, # Hack!
mgzip_threads=reader_options.mgzip_threads, # Hack!
error_file=error_file,
verbose=False,
very_verbose=False)
for r in lines_to_write:
kw.write(r)
kw.close()
input_kgtk_file: Path = KGTKArgumentParser.get_input_file(input_file)
error_file: typing.TextIO = sys.stdout if errors_to_stdout else sys.stderr
# Build the option structures.
reader_options: KgtkReaderOptions = KgtkReaderOptions.from_dict(kwargs)
value_options: KgtkValueOptions = KgtkValueOptions.from_dict(kwargs)
suffix = ".tsv.gz" if gzipped_output else ".tsv"
kr: KgtkReader = KgtkReader.open(input_kgtk_file,
options=reader_options,
value_options=value_options,
error_file=error_file,
verbose=False,
very_verbose=False,
)
node1_idx: int = kr.get_node1_column_index()
label_idx: int = kr.get_label_column_index()
node2_idx: int = kr.get_node2_column_index()
if node1_idx < 0 or label_idx < 0 or node2_idx < 0:
print("Error: Not a valid file: {}. A valid edge file should have these columns: node1, label and node2".format(
input_file), file=error_file, flush=True)
kr.close()
raise KGTKException("Missing columns.")
prev = None
lines_to_write = list()
file_number = 0
for row in kr:
node = row[node1_idx]
if node.startswith('Q') or node.startswith('P'):
if prev is None:
prev = node
if not are_nodes_equal(prev, node):
if split_by_qnode or len(lines_to_write) >= lines:
write_files(error_file, file_number, file_prefix, kr, lines_to_write, output_path, prev,
reader_options, split_by_qnode, suffix)
lines_to_write = list()
file_number += 1
prev = node
lines_to_write.append(row)
if len(lines_to_write) > 0:
write_files(error_file, file_number, file_prefix, kr, lines_to_write, output_path, prev, reader_options,
split_by_qnode, suffix)
return 0
def are_nodes_equal(q1, q2):
if q1.strip() == "" or q2.strip() == "":
return False
if q1.strip() == q2.strip():
return True
if q1.strip() == q2.split('-')[0]: # qualifiers
return True
return False
|
the-stack_106_28633 | from airflow import DAG
import pandas as pd
import datetime as dt
from airflow.operators.python import PythonOperator
from minio import Minio
import os
import glob
import functions as f
data_lake_server= f.var['data_lake_server_airflow']
data_lake_login= f.var['data_lake_login']
data_lake_password= f.var['data_lake_password']
client = Minio(
endpoint= data_lake_server,
access_key= data_lake_login,
secret_key= data_lake_password,
secure=False
)
dag = DAG(
dag_id="etl_client_clustering",
description="ETL - Client Clustering DataFrame",
start_date=dt.datetime(2021, 11, 29),
schedule_interval= "@once")
##################### olist_customers_dataset #####################
def extract_customers():
# load data to a tmp folder
client.fget_object(
bucket_name= 'processing',
object_name= 'olist_customers_dataset.parquet',
file_path= 'tmp/olist_customers_dataset.parquet'
)
extract_customers_task = PythonOperator(
task_id= "extract_customers",
python_callable= extract_customers,
dag= dag)
##################### olist_orders_dataset #####################
def extract_orders():
# load data to a tmp folder
client.fget_object(
bucket_name= 'processing',
object_name= 'olist_orders_dataset.parquet',
file_path= 'tmp/olist_orders_dataset.parquet'
)
extract_orders_task = PythonOperator(
task_id= "extract_orders",
python_callable= extract_orders,
dag= dag)
##################### olist_order_items_dataset #####################
def extract_order_items():
# load data to a tmp folder
client.fget_object(
bucket_name= 'processing',
object_name= 'olist_order_items_dataset.parquet',
file_path= 'tmp/olist_order_items_dataset.parquet'
)
extract_order_items_task = PythonOperator(
task_id= "extract_order_items",
python_callable= extract_order_items,
dag= dag)
##################### olist_geolocation_dataset #####################
def extract_geolocation():
# load data to a tmp folder
client.fget_object(
bucket_name= 'processing',
object_name= 'olist_geolocation_dataset.parquet',
file_path= 'tmp/olist_geolocation_dataset.parquet'
)
extract_geolocation_task = PythonOperator(
task_id= "extract_geolocation",
python_callable= extract_geolocation,
dag= dag)
def transform_data():
customers = pd.read_parquet('tmp/olist_customers_dataset.parquet')
orders = pd.read_parquet('tmp/olist_orders_dataset.parquet')
order_items = pd.read_parquet('tmp/olist_order_items_dataset.parquet')
geolocation = pd.read_parquet('tmp/olist_geolocation_dataset.parquet')
geo_means = geolocation.groupby(
['geolocation_zip_code_prefix', 'geolocation_city', 'geolocation_state']
)[['geolocation_lat', 'geolocation_lng']].mean().reset_index()
customers = customers.merge(
geo_means,
left_on= ['customer_zip_code_prefix', 'customer_city', 'customer_state'],
right_on= ['geolocation_zip_code_prefix', 'geolocation_city', 'geolocation_state'],
how= 'left'
).drop(columns= ['geolocation_zip_code_prefix', 'geolocation_city', 'geolocation_state'])
del geo_means
price_per_order = order_items.groupby('order_id').price.sum().reset_index().rename(columns= {'price': 'monetary'})
orders = pd.merge(orders, price_per_order, on= 'order_id', how= 'inner')
orders.order_purchase_timestamp = pd.to_datetime(orders.order_purchase_timestamp)
ult_compra = orders.order_purchase_timestamp.max()
orders['days_ult_compra'] = (ult_compra - orders.order_purchase_timestamp).dt.days
df_rfm = pd.merge(
customers[['customer_unique_id', 'customer_id', 'geolocation_lat', 'geolocation_lng']],
orders[['customer_id', 'monetary', 'days_ult_compra']],
on= 'customer_id',
how= 'left')\
.groupby('customer_unique_id')\
.agg({
'geolocation_lat': 'mean',
'geolocation_lng': 'mean',
'customer_id': 'count',
'monetary': 'sum',
'days_ult_compra': 'min'
}).reset_index()\
.rename(
columns= {
'customer_id': 'frequency',
'days_ult_compra': 'recency'
}
)
df_rfm.to_parquet('tmp/dataframe_rfm.parquet')
client.fput_object(
bucket_name= 'agrupamento-clientes',
object_name= 'dataframe_rfm.parquet',
file_path= 'tmp/dataframe_rfm.parquet'
)
transform_data_task = PythonOperator(
task_id= "transform_data",
python_callable= transform_data,
dag= dag)
##################### clean #####################
def clean():
files_remove = glob.glob('tmp/*')
for f in files_remove:
os.remove(f)
clean_task = PythonOperator(
task_id= "clean",
python_callable= clean,
dag= dag)
[extract_customers_task, extract_order_items_task, extract_orders_task, extract_geolocation_task] >> transform_data_task
transform_data_task >> clean_task
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.