repository_name
stringlengths 5
67
| func_path_in_repository
stringlengths 4
234
| func_name
stringlengths 0
314
| whole_func_string
stringlengths 52
3.87M
| language
stringclasses 6
values | func_code_string
stringlengths 52
3.87M
| func_documentation_string
stringlengths 1
47.2k
| func_code_url
stringlengths 85
339
|
---|---|---|---|---|---|---|---|
ecordell/pymacaroons | pymacaroons/serializers/json_serializer.py | JsonSerializer._serialize_v1 | def _serialize_v1(self, macaroon):
'''Serialize the macaroon in JSON format v1.
@param macaroon the macaroon to serialize.
@return JSON macaroon.
'''
serialized = {
'identifier': utils.convert_to_string(macaroon.identifier),
'signature': macaroon.signature,
}
if macaroon.location:
serialized['location'] = macaroon.location
if macaroon.caveats:
serialized['caveats'] = [
_caveat_v1_to_dict(caveat) for caveat in macaroon.caveats
]
return json.dumps(serialized) | python | def _serialize_v1(self, macaroon):
'''Serialize the macaroon in JSON format v1.
@param macaroon the macaroon to serialize.
@return JSON macaroon.
'''
serialized = {
'identifier': utils.convert_to_string(macaroon.identifier),
'signature': macaroon.signature,
}
if macaroon.location:
serialized['location'] = macaroon.location
if macaroon.caveats:
serialized['caveats'] = [
_caveat_v1_to_dict(caveat) for caveat in macaroon.caveats
]
return json.dumps(serialized) | Serialize the macaroon in JSON format v1.
@param macaroon the macaroon to serialize.
@return JSON macaroon. | https://github.com/ecordell/pymacaroons/blob/c941614df15fe732ea432a62788e45410bcb868d/pymacaroons/serializers/json_serializer.py#L20-L36 |
ecordell/pymacaroons | pymacaroons/serializers/json_serializer.py | JsonSerializer._serialize_v2 | def _serialize_v2(self, macaroon):
'''Serialize the macaroon in JSON format v2.
@param macaroon the macaroon to serialize.
@return JSON macaroon in v2 format.
'''
serialized = {}
_add_json_binary_field(macaroon.identifier_bytes, serialized, 'i')
_add_json_binary_field(binascii.unhexlify(macaroon.signature_bytes),
serialized, 's')
if macaroon.location:
serialized['l'] = macaroon.location
if macaroon.caveats:
serialized['c'] = [
_caveat_v2_to_dict(caveat) for caveat in macaroon.caveats
]
return json.dumps(serialized) | python | def _serialize_v2(self, macaroon):
'''Serialize the macaroon in JSON format v2.
@param macaroon the macaroon to serialize.
@return JSON macaroon in v2 format.
'''
serialized = {}
_add_json_binary_field(macaroon.identifier_bytes, serialized, 'i')
_add_json_binary_field(binascii.unhexlify(macaroon.signature_bytes),
serialized, 's')
if macaroon.location:
serialized['l'] = macaroon.location
if macaroon.caveats:
serialized['c'] = [
_caveat_v2_to_dict(caveat) for caveat in macaroon.caveats
]
return json.dumps(serialized) | Serialize the macaroon in JSON format v2.
@param macaroon the macaroon to serialize.
@return JSON macaroon in v2 format. | https://github.com/ecordell/pymacaroons/blob/c941614df15fe732ea432a62788e45410bcb868d/pymacaroons/serializers/json_serializer.py#L38-L55 |
ecordell/pymacaroons | pymacaroons/serializers/json_serializer.py | JsonSerializer.deserialize | def deserialize(self, serialized):
'''Deserialize a JSON macaroon depending on the format.
@param serialized the macaroon in JSON format.
@return the macaroon object.
'''
deserialized = json.loads(serialized)
if deserialized.get('identifier') is None:
return self._deserialize_v2(deserialized)
else:
return self._deserialize_v1(deserialized) | python | def deserialize(self, serialized):
'''Deserialize a JSON macaroon depending on the format.
@param serialized the macaroon in JSON format.
@return the macaroon object.
'''
deserialized = json.loads(serialized)
if deserialized.get('identifier') is None:
return self._deserialize_v2(deserialized)
else:
return self._deserialize_v1(deserialized) | Deserialize a JSON macaroon depending on the format.
@param serialized the macaroon in JSON format.
@return the macaroon object. | https://github.com/ecordell/pymacaroons/blob/c941614df15fe732ea432a62788e45410bcb868d/pymacaroons/serializers/json_serializer.py#L57-L67 |
ecordell/pymacaroons | pymacaroons/serializers/json_serializer.py | JsonSerializer._deserialize_v1 | def _deserialize_v1(self, deserialized):
'''Deserialize a JSON macaroon in v1 format.
@param serialized the macaroon in v1 JSON format.
@return the macaroon object.
'''
from pymacaroons.macaroon import Macaroon, MACAROON_V1
from pymacaroons.caveat import Caveat
caveats = []
for c in deserialized.get('caveats', []):
caveat = Caveat(
caveat_id=c['cid'],
verification_key_id=(
utils.raw_b64decode(c['vid']) if c.get('vid')
else None
),
location=(
c['cl'] if c.get('cl') else None
),
version=MACAROON_V1
)
caveats.append(caveat)
return Macaroon(
location=deserialized.get('location'),
identifier=deserialized['identifier'],
caveats=caveats,
signature=deserialized['signature'],
version=MACAROON_V1
) | python | def _deserialize_v1(self, deserialized):
'''Deserialize a JSON macaroon in v1 format.
@param serialized the macaroon in v1 JSON format.
@return the macaroon object.
'''
from pymacaroons.macaroon import Macaroon, MACAROON_V1
from pymacaroons.caveat import Caveat
caveats = []
for c in deserialized.get('caveats', []):
caveat = Caveat(
caveat_id=c['cid'],
verification_key_id=(
utils.raw_b64decode(c['vid']) if c.get('vid')
else None
),
location=(
c['cl'] if c.get('cl') else None
),
version=MACAROON_V1
)
caveats.append(caveat)
return Macaroon(
location=deserialized.get('location'),
identifier=deserialized['identifier'],
caveats=caveats,
signature=deserialized['signature'],
version=MACAROON_V1
) | Deserialize a JSON macaroon in v1 format.
@param serialized the macaroon in v1 JSON format.
@return the macaroon object. | https://github.com/ecordell/pymacaroons/blob/c941614df15fe732ea432a62788e45410bcb868d/pymacaroons/serializers/json_serializer.py#L69-L99 |
ecordell/pymacaroons | pymacaroons/serializers/json_serializer.py | JsonSerializer._deserialize_v2 | def _deserialize_v2(self, deserialized):
'''Deserialize a JSON macaroon v2.
@param serialized the macaroon in JSON format v2.
@return the macaroon object.
'''
from pymacaroons.macaroon import Macaroon, MACAROON_V2
from pymacaroons.caveat import Caveat
caveats = []
for c in deserialized.get('c', []):
caveat = Caveat(
caveat_id=_read_json_binary_field(c, 'i'),
verification_key_id=_read_json_binary_field(c, 'v'),
location=_read_json_binary_field(c, 'l'),
version=MACAROON_V2
)
caveats.append(caveat)
return Macaroon(
location=_read_json_binary_field(deserialized, 'l'),
identifier=_read_json_binary_field(deserialized, 'i'),
caveats=caveats,
signature=binascii.hexlify(
_read_json_binary_field(deserialized, 's')),
version=MACAROON_V2
) | python | def _deserialize_v2(self, deserialized):
'''Deserialize a JSON macaroon v2.
@param serialized the macaroon in JSON format v2.
@return the macaroon object.
'''
from pymacaroons.macaroon import Macaroon, MACAROON_V2
from pymacaroons.caveat import Caveat
caveats = []
for c in deserialized.get('c', []):
caveat = Caveat(
caveat_id=_read_json_binary_field(c, 'i'),
verification_key_id=_read_json_binary_field(c, 'v'),
location=_read_json_binary_field(c, 'l'),
version=MACAROON_V2
)
caveats.append(caveat)
return Macaroon(
location=_read_json_binary_field(deserialized, 'l'),
identifier=_read_json_binary_field(deserialized, 'i'),
caveats=caveats,
signature=binascii.hexlify(
_read_json_binary_field(deserialized, 's')),
version=MACAROON_V2
) | Deserialize a JSON macaroon v2.
@param serialized the macaroon in JSON format v2.
@return the macaroon object. | https://github.com/ecordell/pymacaroons/blob/c941614df15fe732ea432a62788e45410bcb868d/pymacaroons/serializers/json_serializer.py#L101-L125 |
zooniverse/panoptes-python-client | panoptes_client/project_preferences.py | ProjectPreferences.find | def find(cls, id='', user=None, project=None):
"""
Like :py:meth:`.PanoptesObject.find` but can also query by user and
project.
- **user** and **project** can be either a :py:class:`.User` and
:py:class:`.Project` instance respectively, or they can be given as
IDs. If either argument is given, the other is also required.
"""
if not id:
if not (user and project):
raise ValueError('Both user and project required')
if (
isinstance(user, User)
and isinstance(project, Project)
):
_user_id = user.id
_project_id = project.id
elif (
isinstance(user, (int, str,))
and isinstance(project, (int, str,))
):
_user_id = user
_project_id = project
else:
raise TypeError
id = cls.where(user_id=_user_id, project_id=_project_id).next().id
return super(ProjectPreferences, cls).find(id) | python | def find(cls, id='', user=None, project=None):
"""
Like :py:meth:`.PanoptesObject.find` but can also query by user and
project.
- **user** and **project** can be either a :py:class:`.User` and
:py:class:`.Project` instance respectively, or they can be given as
IDs. If either argument is given, the other is also required.
"""
if not id:
if not (user and project):
raise ValueError('Both user and project required')
if (
isinstance(user, User)
and isinstance(project, Project)
):
_user_id = user.id
_project_id = project.id
elif (
isinstance(user, (int, str,))
and isinstance(project, (int, str,))
):
_user_id = user
_project_id = project
else:
raise TypeError
id = cls.where(user_id=_user_id, project_id=_project_id).next().id
return super(ProjectPreferences, cls).find(id) | Like :py:meth:`.PanoptesObject.find` but can also query by user and
project.
- **user** and **project** can be either a :py:class:`.User` and
:py:class:`.Project` instance respectively, or they can be given as
IDs. If either argument is given, the other is also required. | https://github.com/zooniverse/panoptes-python-client/blob/138d93cb03378501a8d349428e381ad73f928680/panoptes_client/project_preferences.py#L21-L49 |
zooniverse/panoptes-python-client | panoptes_client/project_preferences.py | ProjectPreferences.save_settings | def save_settings(cls, project=None, user=None, settings=None):
"""
Save settings for a user without first fetching their preferences.
- **user** and **project** can be either a :py:class:`.User` and
:py:class:`.Project` instance respectively, or they can be given as
IDs. If either argument is given, the other is also required.
- **settings** is a :py:class:`dict` containing the settings to be
saved.
"""
if (isinstance(settings, dict)):
_to_update = settings
if (
isinstance(user, User)
and isinstance(project, Project)
):
_user_id = user.id
_project_id = project.id
elif (
isinstance(user, (int, str,))
and isinstance(project, (int, str,))
):
_user_id = user
_project_id = project
else:
raise TypeError
cls.http_post(
'update_settings',
json={
'project_preferences': {
'user_id': _user_id,
'project_id': _project_id,
'settings': _to_update,
}
}
)
else:
raise TypeError | python | def save_settings(cls, project=None, user=None, settings=None):
"""
Save settings for a user without first fetching their preferences.
- **user** and **project** can be either a :py:class:`.User` and
:py:class:`.Project` instance respectively, or they can be given as
IDs. If either argument is given, the other is also required.
- **settings** is a :py:class:`dict` containing the settings to be
saved.
"""
if (isinstance(settings, dict)):
_to_update = settings
if (
isinstance(user, User)
and isinstance(project, Project)
):
_user_id = user.id
_project_id = project.id
elif (
isinstance(user, (int, str,))
and isinstance(project, (int, str,))
):
_user_id = user
_project_id = project
else:
raise TypeError
cls.http_post(
'update_settings',
json={
'project_preferences': {
'user_id': _user_id,
'project_id': _project_id,
'settings': _to_update,
}
}
)
else:
raise TypeError | Save settings for a user without first fetching their preferences.
- **user** and **project** can be either a :py:class:`.User` and
:py:class:`.Project` instance respectively, or they can be given as
IDs. If either argument is given, the other is also required.
- **settings** is a :py:class:`dict` containing the settings to be
saved. | https://github.com/zooniverse/panoptes-python-client/blob/138d93cb03378501a8d349428e381ad73f928680/panoptes_client/project_preferences.py#L52-L90 |
zooniverse/panoptes-python-client | panoptes_client/subject.py | Subject.async_saves | def async_saves(cls):
"""
Returns a context manager to allow asynchronously creating subjects.
Using this context manager will create a pool of threads which will
create multiple subjects at once and upload any local files
simultaneously.
The recommended way to use this is with the `with` statement::
with Subject.async_saves():
local_files = [...]
for filename in local_files:
s = Subject()
s.links.project = 1234
s.add_location(filename)
s.save()
Alternatively, you can manually shut down the thread pool::
pool = Subject.async_saves()
local_files = [...]
try:
for filename in local_files:
s = Subject()
s.links.project = 1234
s.add_location(filename)
s.save()
finally:
pool.shutdown()
"""
cls._local.save_exec = ThreadPoolExecutor(
max_workers=ASYNC_SAVE_THREADS
)
return cls._local.save_exec | python | def async_saves(cls):
"""
Returns a context manager to allow asynchronously creating subjects.
Using this context manager will create a pool of threads which will
create multiple subjects at once and upload any local files
simultaneously.
The recommended way to use this is with the `with` statement::
with Subject.async_saves():
local_files = [...]
for filename in local_files:
s = Subject()
s.links.project = 1234
s.add_location(filename)
s.save()
Alternatively, you can manually shut down the thread pool::
pool = Subject.async_saves()
local_files = [...]
try:
for filename in local_files:
s = Subject()
s.links.project = 1234
s.add_location(filename)
s.save()
finally:
pool.shutdown()
"""
cls._local.save_exec = ThreadPoolExecutor(
max_workers=ASYNC_SAVE_THREADS
)
return cls._local.save_exec | Returns a context manager to allow asynchronously creating subjects.
Using this context manager will create a pool of threads which will
create multiple subjects at once and upload any local files
simultaneously.
The recommended way to use this is with the `with` statement::
with Subject.async_saves():
local_files = [...]
for filename in local_files:
s = Subject()
s.links.project = 1234
s.add_location(filename)
s.save()
Alternatively, you can manually shut down the thread pool::
pool = Subject.async_saves()
local_files = [...]
try:
for filename in local_files:
s = Subject()
s.links.project = 1234
s.add_location(filename)
s.save()
finally:
pool.shutdown() | https://github.com/zooniverse/panoptes-python-client/blob/138d93cb03378501a8d349428e381ad73f928680/panoptes_client/subject.py#L63-L96 |
zooniverse/panoptes-python-client | panoptes_client/subject.py | Subject.save | def save(self, client=None):
"""
Like :py:meth:`.PanoptesObject.save`, but also uploads any local files
which have previosly been added to the subject with
:py:meth:`add_location`. Automatically retries uploads on error.
If multiple local files are to be uploaded, several files will be
uploaded simultaneously to save time.
"""
if not client:
client = Panoptes.client()
async_save = hasattr(self._local, 'save_exec')
with client:
if async_save:
try:
# The recursive call will exec in a new thread, so
# self._local.save_exec will be undefined above
self._async_future = self._local.save_exec.submit(
self.save,
client=client,
)
return
except RuntimeError:
del self._local.save_exec
async_save = False
if not self.metadata == self._original_metadata:
self.modified_attributes.add('metadata')
response = retry(
super(Subject, self).save,
attempts=UPLOAD_RETRY_LIMIT,
sleeptime=RETRY_BACKOFF_INTERVAL,
retry_exceptions=(PanoptesAPIException,),
log_args=False,
)
if not response:
return
try:
if async_save:
upload_exec = self._local.save_exec
else:
upload_exec = ThreadPoolExecutor(
max_workers=ASYNC_SAVE_THREADS,
)
for location, media_data in zip(
response['subjects'][0]['locations'],
self._media_files
):
if not media_data:
continue
for media_type, url in location.items():
upload_exec.submit(
retry,
self._upload_media,
args=(url, media_data, media_type),
attempts=UPLOAD_RETRY_LIMIT,
sleeptime=RETRY_BACKOFF_INTERVAL,
retry_exceptions=(
requests.exceptions.RequestException,
),
log_args=False,
)
finally:
if not async_save:
upload_exec.shutdown() | python | def save(self, client=None):
"""
Like :py:meth:`.PanoptesObject.save`, but also uploads any local files
which have previosly been added to the subject with
:py:meth:`add_location`. Automatically retries uploads on error.
If multiple local files are to be uploaded, several files will be
uploaded simultaneously to save time.
"""
if not client:
client = Panoptes.client()
async_save = hasattr(self._local, 'save_exec')
with client:
if async_save:
try:
# The recursive call will exec in a new thread, so
# self._local.save_exec will be undefined above
self._async_future = self._local.save_exec.submit(
self.save,
client=client,
)
return
except RuntimeError:
del self._local.save_exec
async_save = False
if not self.metadata == self._original_metadata:
self.modified_attributes.add('metadata')
response = retry(
super(Subject, self).save,
attempts=UPLOAD_RETRY_LIMIT,
sleeptime=RETRY_BACKOFF_INTERVAL,
retry_exceptions=(PanoptesAPIException,),
log_args=False,
)
if not response:
return
try:
if async_save:
upload_exec = self._local.save_exec
else:
upload_exec = ThreadPoolExecutor(
max_workers=ASYNC_SAVE_THREADS,
)
for location, media_data in zip(
response['subjects'][0]['locations'],
self._media_files
):
if not media_data:
continue
for media_type, url in location.items():
upload_exec.submit(
retry,
self._upload_media,
args=(url, media_data, media_type),
attempts=UPLOAD_RETRY_LIMIT,
sleeptime=RETRY_BACKOFF_INTERVAL,
retry_exceptions=(
requests.exceptions.RequestException,
),
log_args=False,
)
finally:
if not async_save:
upload_exec.shutdown() | Like :py:meth:`.PanoptesObject.save`, but also uploads any local files
which have previosly been added to the subject with
:py:meth:`add_location`. Automatically retries uploads on error.
If multiple local files are to be uploaded, several files will be
uploaded simultaneously to save time. | https://github.com/zooniverse/panoptes-python-client/blob/138d93cb03378501a8d349428e381ad73f928680/panoptes_client/subject.py#L107-L178 |
zooniverse/panoptes-python-client | panoptes_client/subject.py | Subject.async_save_result | def async_save_result(self):
"""
Retrieves the result of this subject's asynchronous save.
- Returns `True` if the subject was saved successfully.
- Raises `concurrent.futures.CancelledError` if the save was cancelled.
- If the save failed, raises the relevant exception.
- Returns `False` if the subject hasn't finished saving or if the
subject has not been queued for asynchronous save.
"""
if hasattr(self, "_async_future") and self._async_future.done():
self._async_future.result()
return True
else:
return False | python | def async_save_result(self):
"""
Retrieves the result of this subject's asynchronous save.
- Returns `True` if the subject was saved successfully.
- Raises `concurrent.futures.CancelledError` if the save was cancelled.
- If the save failed, raises the relevant exception.
- Returns `False` if the subject hasn't finished saving or if the
subject has not been queued for asynchronous save.
"""
if hasattr(self, "_async_future") and self._async_future.done():
self._async_future.result()
return True
else:
return False | Retrieves the result of this subject's asynchronous save.
- Returns `True` if the subject was saved successfully.
- Raises `concurrent.futures.CancelledError` if the save was cancelled.
- If the save failed, raises the relevant exception.
- Returns `False` if the subject hasn't finished saving or if the
subject has not been queued for asynchronous save. | https://github.com/zooniverse/panoptes-python-client/blob/138d93cb03378501a8d349428e381ad73f928680/panoptes_client/subject.py#L192-L206 |
zooniverse/panoptes-python-client | panoptes_client/subject.py | Subject.add_location | def add_location(self, location):
"""
Add a media location to this subject.
- **location** can be an open :py:class:`file` object, a path to a
local file, or a :py:class:`dict` containing MIME types and URLs for
remote media.
Examples::
subject.add_location(my_file)
subject.add_location('/data/image.jpg')
subject.add_location({'image/png': 'https://example.com/image.png'})
"""
if type(location) is dict:
self.locations.append(location)
self._media_files.append(None)
return
elif type(location) in (str,) + _OLD_STR_TYPES:
f = open(location, 'rb')
else:
f = location
try:
media_data = f.read()
if MEDIA_TYPE_DETECTION == 'magic':
media_type = magic.from_buffer(media_data, mime=True)
else:
media_type = imghdr.what(None, media_data)
if not media_type:
raise UnknownMediaException(
'Could not detect file type. Please try installing '
'libmagic: https://panoptes-python-client.readthedocs.'
'io/en/latest/user_guide.html#uploading-non-image-'
'media-types'
)
media_type = 'image/{}'.format(media_type)
self.locations.append(media_type)
self._media_files.append(media_data)
finally:
f.close() | python | def add_location(self, location):
"""
Add a media location to this subject.
- **location** can be an open :py:class:`file` object, a path to a
local file, or a :py:class:`dict` containing MIME types and URLs for
remote media.
Examples::
subject.add_location(my_file)
subject.add_location('/data/image.jpg')
subject.add_location({'image/png': 'https://example.com/image.png'})
"""
if type(location) is dict:
self.locations.append(location)
self._media_files.append(None)
return
elif type(location) in (str,) + _OLD_STR_TYPES:
f = open(location, 'rb')
else:
f = location
try:
media_data = f.read()
if MEDIA_TYPE_DETECTION == 'magic':
media_type = magic.from_buffer(media_data, mime=True)
else:
media_type = imghdr.what(None, media_data)
if not media_type:
raise UnknownMediaException(
'Could not detect file type. Please try installing '
'libmagic: https://panoptes-python-client.readthedocs.'
'io/en/latest/user_guide.html#uploading-non-image-'
'media-types'
)
media_type = 'image/{}'.format(media_type)
self.locations.append(media_type)
self._media_files.append(media_data)
finally:
f.close() | Add a media location to this subject.
- **location** can be an open :py:class:`file` object, a path to a
local file, or a :py:class:`dict` containing MIME types and URLs for
remote media.
Examples::
subject.add_location(my_file)
subject.add_location('/data/image.jpg')
subject.add_location({'image/png': 'https://example.com/image.png'}) | https://github.com/zooniverse/panoptes-python-client/blob/138d93cb03378501a8d349428e381ad73f928680/panoptes_client/subject.py#L215-L255 |
zooniverse/panoptes-python-client | panoptes_client/exportable.py | Exportable.get_export | def get_export(
self,
export_type,
generate=False,
wait=False,
wait_timeout=None,
):
"""
Downloads a data export over HTTP. Returns a `Requests Response
<http://docs.python-requests.org/en/master/api/#requests.Response>`_
object containing the content of the export.
- **export_type** is a string specifying which type of export should be
downloaded.
- **generate** is a boolean specifying whether to generate a new export
and wait for it to be ready, or to just download the latest export.
- **wait** is a boolean specifying whether to wait for an in-progress
export to finish, if there is one. Has no effect if ``generate`` is
``True``.
- **wait_timeout** is the number of seconds to wait if ``wait`` is
``True``. Has no effect if ``wait`` is ``False`` or if ``generate``
is ``True``.
The returned :py:class:`.Response` object has two additional attributes
as a convenience for working with the CSV content; **csv_reader** and
**csv_dictreader**, which are wrappers for :py:meth:`.csv.reader`
and :py:class:`csv.DictReader` respectively. These wrappers take care
of correctly decoding the export content for the CSV parser.
Example::
classification_export = Project(1234).get_export('classifications')
for row in classification_export.csv_reader():
print(row)
classification_export = Project(1234).get_export('classifications')
for row in classification_export.csv_dictreader():
print(row)
"""
if generate:
self.generate_export(export_type)
if generate or wait:
export = self.wait_export(export_type, wait_timeout)
else:
export = self.describe_export(export_type)
if export_type in TALK_EXPORT_TYPES:
media_url = export['data_requests'][0]['url']
else:
media_url = export['media'][0]['src']
response = requests.get(media_url, stream=True)
response.csv_reader = functools.partial(
csv.reader,
response.iter_lines(decode_unicode=True),
)
response.csv_dictreader = functools.partial(
csv.DictReader,
response.iter_lines(decode_unicode=True),
)
return response | python | def get_export(
self,
export_type,
generate=False,
wait=False,
wait_timeout=None,
):
"""
Downloads a data export over HTTP. Returns a `Requests Response
<http://docs.python-requests.org/en/master/api/#requests.Response>`_
object containing the content of the export.
- **export_type** is a string specifying which type of export should be
downloaded.
- **generate** is a boolean specifying whether to generate a new export
and wait for it to be ready, or to just download the latest export.
- **wait** is a boolean specifying whether to wait for an in-progress
export to finish, if there is one. Has no effect if ``generate`` is
``True``.
- **wait_timeout** is the number of seconds to wait if ``wait`` is
``True``. Has no effect if ``wait`` is ``False`` or if ``generate``
is ``True``.
The returned :py:class:`.Response` object has two additional attributes
as a convenience for working with the CSV content; **csv_reader** and
**csv_dictreader**, which are wrappers for :py:meth:`.csv.reader`
and :py:class:`csv.DictReader` respectively. These wrappers take care
of correctly decoding the export content for the CSV parser.
Example::
classification_export = Project(1234).get_export('classifications')
for row in classification_export.csv_reader():
print(row)
classification_export = Project(1234).get_export('classifications')
for row in classification_export.csv_dictreader():
print(row)
"""
if generate:
self.generate_export(export_type)
if generate or wait:
export = self.wait_export(export_type, wait_timeout)
else:
export = self.describe_export(export_type)
if export_type in TALK_EXPORT_TYPES:
media_url = export['data_requests'][0]['url']
else:
media_url = export['media'][0]['src']
response = requests.get(media_url, stream=True)
response.csv_reader = functools.partial(
csv.reader,
response.iter_lines(decode_unicode=True),
)
response.csv_dictreader = functools.partial(
csv.DictReader,
response.iter_lines(decode_unicode=True),
)
return response | Downloads a data export over HTTP. Returns a `Requests Response
<http://docs.python-requests.org/en/master/api/#requests.Response>`_
object containing the content of the export.
- **export_type** is a string specifying which type of export should be
downloaded.
- **generate** is a boolean specifying whether to generate a new export
and wait for it to be ready, or to just download the latest export.
- **wait** is a boolean specifying whether to wait for an in-progress
export to finish, if there is one. Has no effect if ``generate`` is
``True``.
- **wait_timeout** is the number of seconds to wait if ``wait`` is
``True``. Has no effect if ``wait`` is ``False`` or if ``generate``
is ``True``.
The returned :py:class:`.Response` object has two additional attributes
as a convenience for working with the CSV content; **csv_reader** and
**csv_dictreader**, which are wrappers for :py:meth:`.csv.reader`
and :py:class:`csv.DictReader` respectively. These wrappers take care
of correctly decoding the export content for the CSV parser.
Example::
classification_export = Project(1234).get_export('classifications')
for row in classification_export.csv_reader():
print(row)
classification_export = Project(1234).get_export('classifications')
for row in classification_export.csv_dictreader():
print(row) | https://github.com/zooniverse/panoptes-python-client/blob/138d93cb03378501a8d349428e381ad73f928680/panoptes_client/exportable.py#L30-L92 |
zooniverse/panoptes-python-client | panoptes_client/exportable.py | Exportable.wait_export | def wait_export(
self,
export_type,
timeout=None,
):
"""
Blocks until an in-progress export is ready.
- **export_type** is a string specifying which type of export to wait
for.
- **timeout** is the maximum number of seconds to wait.
If ``timeout`` is given and the export is not ready by the time limit,
:py:class:`.PanoptesAPIException` is raised.
"""
success = False
if timeout:
end_time = datetime.datetime.now() + datetime.timedelta(
seconds=timeout
)
while (not timeout) or (datetime.datetime.now() < end_time):
export_description = self.describe_export(
export_type,
)
if export_type in TALK_EXPORT_TYPES:
export_metadata = export_description['data_requests'][0]
else:
export_metadata = export_description['media'][0]['metadata']
if export_metadata.get('state', '') in ('ready', 'finished'):
success = True
break
time.sleep(2)
if not success:
raise PanoptesAPIException(
'{}_export not ready within {} seconds'.format(
export_type,
timeout
)
)
return export_description | python | def wait_export(
self,
export_type,
timeout=None,
):
"""
Blocks until an in-progress export is ready.
- **export_type** is a string specifying which type of export to wait
for.
- **timeout** is the maximum number of seconds to wait.
If ``timeout`` is given and the export is not ready by the time limit,
:py:class:`.PanoptesAPIException` is raised.
"""
success = False
if timeout:
end_time = datetime.datetime.now() + datetime.timedelta(
seconds=timeout
)
while (not timeout) or (datetime.datetime.now() < end_time):
export_description = self.describe_export(
export_type,
)
if export_type in TALK_EXPORT_TYPES:
export_metadata = export_description['data_requests'][0]
else:
export_metadata = export_description['media'][0]['metadata']
if export_metadata.get('state', '') in ('ready', 'finished'):
success = True
break
time.sleep(2)
if not success:
raise PanoptesAPIException(
'{}_export not ready within {} seconds'.format(
export_type,
timeout
)
)
return export_description | Blocks until an in-progress export is ready.
- **export_type** is a string specifying which type of export to wait
for.
- **timeout** is the maximum number of seconds to wait.
If ``timeout`` is given and the export is not ready by the time limit,
:py:class:`.PanoptesAPIException` is raised. | https://github.com/zooniverse/panoptes-python-client/blob/138d93cb03378501a8d349428e381ad73f928680/panoptes_client/exportable.py#L94-L140 |
zooniverse/panoptes-python-client | panoptes_client/exportable.py | Exportable.generate_export | def generate_export(self, export_type):
"""
Start a new export.
- **export_type** is a string specifying which type of export to start.
Returns a :py:class:`dict` containing metadata for the new export.
"""
if export_type in TALK_EXPORT_TYPES:
return talk.post_data_request(
'project-{}'.format(self.id),
export_type.replace('talk_', '')
)
return self.http_post(
self._export_path(export_type),
json={"media": {"content_type": "text/csv"}},
)[0] | python | def generate_export(self, export_type):
"""
Start a new export.
- **export_type** is a string specifying which type of export to start.
Returns a :py:class:`dict` containing metadata for the new export.
"""
if export_type in TALK_EXPORT_TYPES:
return talk.post_data_request(
'project-{}'.format(self.id),
export_type.replace('talk_', '')
)
return self.http_post(
self._export_path(export_type),
json={"media": {"content_type": "text/csv"}},
)[0] | Start a new export.
- **export_type** is a string specifying which type of export to start.
Returns a :py:class:`dict` containing metadata for the new export. | https://github.com/zooniverse/panoptes-python-client/blob/138d93cb03378501a8d349428e381ad73f928680/panoptes_client/exportable.py#L142-L160 |
zooniverse/panoptes-python-client | panoptes_client/exportable.py | Exportable.describe_export | def describe_export(self, export_type):
"""
Fetch metadata for an export.
- **export_type** is a string specifying which type of export to look
up.
Returns a :py:class:`dict` containing metadata for the export.
"""
if export_type in TALK_EXPORT_TYPES:
return talk.get_data_request(
'project-{}'.format(self.id),
export_type.replace('talk_', '')
)[0]
return self.http_get(
self._export_path(export_type),
)[0] | python | def describe_export(self, export_type):
"""
Fetch metadata for an export.
- **export_type** is a string specifying which type of export to look
up.
Returns a :py:class:`dict` containing metadata for the export.
"""
if export_type in TALK_EXPORT_TYPES:
return talk.get_data_request(
'project-{}'.format(self.id),
export_type.replace('talk_', '')
)[0]
return self.http_get(
self._export_path(export_type),
)[0] | Fetch metadata for an export.
- **export_type** is a string specifying which type of export to look
up.
Returns a :py:class:`dict` containing metadata for the export. | https://github.com/zooniverse/panoptes-python-client/blob/138d93cb03378501a8d349428e381ad73f928680/panoptes_client/exportable.py#L162-L179 |
zooniverse/panoptes-python-client | panoptes_client/classification.py | Classification.where | def where(cls, **kwargs):
"""
where(scope=None, **kwargs)
Like :py:meth:`.PanoptesObject.where`, but also allows setting the
query scope.
- **scope** can be any of the values given in the `Classification
Collection API documentation <http://docs.panoptes.apiary.io/#reference/classification/classification/list-all-classifications>`_
without the leading slash.
Examples::
my_classifications = Classification.where()
my_proj_123_classifications = Classification.where(project_id=123)
all_proj_123_classifications = Classification.where(
scope='project',
project_id=123,
)
"""
scope = kwargs.pop('scope', None)
if not scope:
return super(Classification, cls).where(**kwargs)
return cls.paginated_results(*cls.http_get(scope, params=kwargs)) | python | def where(cls, **kwargs):
"""
where(scope=None, **kwargs)
Like :py:meth:`.PanoptesObject.where`, but also allows setting the
query scope.
- **scope** can be any of the values given in the `Classification
Collection API documentation <http://docs.panoptes.apiary.io/#reference/classification/classification/list-all-classifications>`_
without the leading slash.
Examples::
my_classifications = Classification.where()
my_proj_123_classifications = Classification.where(project_id=123)
all_proj_123_classifications = Classification.where(
scope='project',
project_id=123,
)
"""
scope = kwargs.pop('scope', None)
if not scope:
return super(Classification, cls).where(**kwargs)
return cls.paginated_results(*cls.http_get(scope, params=kwargs)) | where(scope=None, **kwargs)
Like :py:meth:`.PanoptesObject.where`, but also allows setting the
query scope.
- **scope** can be any of the values given in the `Classification
Collection API documentation <http://docs.panoptes.apiary.io/#reference/classification/classification/list-all-classifications>`_
without the leading slash.
Examples::
my_classifications = Classification.where()
my_proj_123_classifications = Classification.where(project_id=123)
all_proj_123_classifications = Classification.where(
scope='project',
project_id=123,
) | https://github.com/zooniverse/panoptes-python-client/blob/138d93cb03378501a8d349428e381ad73f928680/panoptes_client/classification.py#L12-L37 |
zooniverse/panoptes-python-client | panoptes_client/collection.py | Collection.find | def find(cls, id='', slug=None):
"""
Similar to :py:meth:`.PanoptesObject.find`, but allows lookup by slug
as well as ID.
Examples::
collection_1234 = Collection.find(1234)
my_collection = Collection.find(slug="example/my-collection")
"""
if not id and not slug:
return None
try:
return cls.where(id=id, slug=slug).next()
except StopIteration:
raise PanoptesAPIException(
"Could not find collection with slug='{}'".format(slug)
) | python | def find(cls, id='', slug=None):
"""
Similar to :py:meth:`.PanoptesObject.find`, but allows lookup by slug
as well as ID.
Examples::
collection_1234 = Collection.find(1234)
my_collection = Collection.find(slug="example/my-collection")
"""
if not id and not slug:
return None
try:
return cls.where(id=id, slug=slug).next()
except StopIteration:
raise PanoptesAPIException(
"Could not find collection with slug='{}'".format(slug)
) | Similar to :py:meth:`.PanoptesObject.find`, but allows lookup by slug
as well as ID.
Examples::
collection_1234 = Collection.find(1234)
my_collection = Collection.find(slug="example/my-collection") | https://github.com/zooniverse/panoptes-python-client/blob/138d93cb03378501a8d349428e381ad73f928680/panoptes_client/collection.py#L28-L46 |
zooniverse/panoptes-python-client | panoptes_client/collection.py | Collection.set_default_subject | def set_default_subject(self, subject):
"""
Sets the subject's location media URL as a link.
It displays as the default subject on PFE.
- **subject** can be a single :py:class:`.Subject` instance or a single
subject ID.
Examples::
collection.set_default_subject(1234)
collection.set_default_subject(Subject(1234))
"""
if not (
isinstance(subject, Subject)
or isinstance(subject, (int, str,))
):
raise TypeError
if isinstance(subject, Subject):
_subject_id = subject.id
else:
_subject_id = str(subject)
self.http_post(
'{}/links/default_subject'.format(self.id),
json={'default_subject': _subject_id},
) | python | def set_default_subject(self, subject):
"""
Sets the subject's location media URL as a link.
It displays as the default subject on PFE.
- **subject** can be a single :py:class:`.Subject` instance or a single
subject ID.
Examples::
collection.set_default_subject(1234)
collection.set_default_subject(Subject(1234))
"""
if not (
isinstance(subject, Subject)
or isinstance(subject, (int, str,))
):
raise TypeError
if isinstance(subject, Subject):
_subject_id = subject.id
else:
_subject_id = str(subject)
self.http_post(
'{}/links/default_subject'.format(self.id),
json={'default_subject': _subject_id},
) | Sets the subject's location media URL as a link.
It displays as the default subject on PFE.
- **subject** can be a single :py:class:`.Subject` instance or a single
subject ID.
Examples::
collection.set_default_subject(1234)
collection.set_default_subject(Subject(1234)) | https://github.com/zooniverse/panoptes-python-client/blob/138d93cb03378501a8d349428e381ad73f928680/panoptes_client/collection.py#L74-L100 |
zooniverse/panoptes-python-client | panoptes_client/subject_set.py | SubjectSet.subjects | def subjects(self):
"""
A generator which yields :py:class:`.Subject` objects which are in this
subject set.
Examples::
for subject in subject_set.subjects:
print(subject.id)
"""
for sms in SetMemberSubject.where(subject_set_id=self.id):
yield sms.links.subject | python | def subjects(self):
"""
A generator which yields :py:class:`.Subject` objects which are in this
subject set.
Examples::
for subject in subject_set.subjects:
print(subject.id)
"""
for sms in SetMemberSubject.where(subject_set_id=self.id):
yield sms.links.subject | A generator which yields :py:class:`.Subject` objects which are in this
subject set.
Examples::
for subject in subject_set.subjects:
print(subject.id) | https://github.com/zooniverse/panoptes-python-client/blob/138d93cb03378501a8d349428e381ad73f928680/panoptes_client/subject_set.py#L69-L82 |
zooniverse/panoptes-python-client | panoptes_client/workflow.py | Workflow.retire_subjects | def retire_subjects(self, subjects, reason='other'):
"""
Retires subjects in this workflow.
- **subjects** can be a list of :py:class:`Subject` instances, a list
of subject IDs, a single :py:class:`Subject` instance, or a single
subject ID.
- **reason** gives the reason the :py:class:`Subject` has been retired.
Defaults to **other**.
Examples::
workflow.retire_subjects(1234)
workflow.retire_subjects([1,2,3,4])
workflow.retire_subjects(Subject(1234))
workflow.retire_subjects([Subject(12), Subject(34)])
"""
subjects = [ s.id if isinstance(s, Subject) else s for s in subjects ]
return Workflow.http_post(
'{}/retired_subjects'.format(self.id),
json={
'subject_ids': subjects,
'retirement_reason': reason
}
) | python | def retire_subjects(self, subjects, reason='other'):
"""
Retires subjects in this workflow.
- **subjects** can be a list of :py:class:`Subject` instances, a list
of subject IDs, a single :py:class:`Subject` instance, or a single
subject ID.
- **reason** gives the reason the :py:class:`Subject` has been retired.
Defaults to **other**.
Examples::
workflow.retire_subjects(1234)
workflow.retire_subjects([1,2,3,4])
workflow.retire_subjects(Subject(1234))
workflow.retire_subjects([Subject(12), Subject(34)])
"""
subjects = [ s.id if isinstance(s, Subject) else s for s in subjects ]
return Workflow.http_post(
'{}/retired_subjects'.format(self.id),
json={
'subject_ids': subjects,
'retirement_reason': reason
}
) | Retires subjects in this workflow.
- **subjects** can be a list of :py:class:`Subject` instances, a list
of subject IDs, a single :py:class:`Subject` instance, or a single
subject ID.
- **reason** gives the reason the :py:class:`Subject` has been retired.
Defaults to **other**.
Examples::
workflow.retire_subjects(1234)
workflow.retire_subjects([1,2,3,4])
workflow.retire_subjects(Subject(1234))
workflow.retire_subjects([Subject(12), Subject(34)]) | https://github.com/zooniverse/panoptes-python-client/blob/138d93cb03378501a8d349428e381ad73f928680/panoptes_client/workflow.py#L31-L57 |
zooniverse/panoptes-python-client | panoptes_client/project.py | Project.collaborators | def collaborators(self, *roles):
"""
Returns a list of :py:class:`.User` who are collaborators on this
project.
Zero or more role arguments can be passed as strings to narrow down the
results. If any roles are given, users who possess at least one of the
given roles are returned.
Examples::
all_collabs = project.collaborators()
moderators = project.collaborators("moderators")
moderators_and_translators = project.collaborators(
"moderators",
"translators",
)
"""
return [
r.links.owner for r in ProjectRole.where(project_id=self.id)
if len(roles) == 0 or len(set(roles) & set(r.roles)) > 0
] | python | def collaborators(self, *roles):
"""
Returns a list of :py:class:`.User` who are collaborators on this
project.
Zero or more role arguments can be passed as strings to narrow down the
results. If any roles are given, users who possess at least one of the
given roles are returned.
Examples::
all_collabs = project.collaborators()
moderators = project.collaborators("moderators")
moderators_and_translators = project.collaborators(
"moderators",
"translators",
)
"""
return [
r.links.owner for r in ProjectRole.where(project_id=self.id)
if len(roles) == 0 or len(set(roles) & set(r.roles)) > 0
] | Returns a list of :py:class:`.User` who are collaborators on this
project.
Zero or more role arguments can be passed as strings to narrow down the
results. If any roles are given, users who possess at least one of the
given roles are returned.
Examples::
all_collabs = project.collaborators()
moderators = project.collaborators("moderators")
moderators_and_translators = project.collaborators(
"moderators",
"translators",
) | https://github.com/zooniverse/panoptes-python-client/blob/138d93cb03378501a8d349428e381ad73f928680/panoptes_client/project.py#L63-L85 |
zooniverse/panoptes-python-client | panoptes_client/panoptes.py | Panoptes.connect | def connect(cls, *args, **kwargs):
"""
connect(username=None, password=None, endpoint=None, admin=False)
Configures the Panoptes client for use.
Note that there is no need to call this unless you need to pass one or
more of the below arguments. By default, the client will connect to
the public Zooniverse.org API as an anonymous user.
All arguments are optional:
- **username** is your Zooniverse.org username.
- **password** is your Zooniverse.org password.
- **endpoint** is the HTTP API endpoint you'd like to connect to.
Defaults to **https://www.zooniverse.org**. Should not include a
trailing slash.
- **admin** is a boolean, switching on admin mode if ``True``. Has no
effect if the given username is not a Zooniverse.org administrator.
Examples::
Panoptes.connect(username='example', password='example')
Panoptes.connect(endpoint='https://panoptes.example.com')
"""
cls._local.panoptes_client = cls(*args, **kwargs)
cls._local.panoptes_client.login()
return cls._local.panoptes_client | python | def connect(cls, *args, **kwargs):
"""
connect(username=None, password=None, endpoint=None, admin=False)
Configures the Panoptes client for use.
Note that there is no need to call this unless you need to pass one or
more of the below arguments. By default, the client will connect to
the public Zooniverse.org API as an anonymous user.
All arguments are optional:
- **username** is your Zooniverse.org username.
- **password** is your Zooniverse.org password.
- **endpoint** is the HTTP API endpoint you'd like to connect to.
Defaults to **https://www.zooniverse.org**. Should not include a
trailing slash.
- **admin** is a boolean, switching on admin mode if ``True``. Has no
effect if the given username is not a Zooniverse.org administrator.
Examples::
Panoptes.connect(username='example', password='example')
Panoptes.connect(endpoint='https://panoptes.example.com')
"""
cls._local.panoptes_client = cls(*args, **kwargs)
cls._local.panoptes_client.login()
return cls._local.panoptes_client | connect(username=None, password=None, endpoint=None, admin=False)
Configures the Panoptes client for use.
Note that there is no need to call this unless you need to pass one or
more of the below arguments. By default, the client will connect to
the public Zooniverse.org API as an anonymous user.
All arguments are optional:
- **username** is your Zooniverse.org username.
- **password** is your Zooniverse.org password.
- **endpoint** is the HTTP API endpoint you'd like to connect to.
Defaults to **https://www.zooniverse.org**. Should not include a
trailing slash.
- **admin** is a boolean, switching on admin mode if ``True``. Has no
effect if the given username is not a Zooniverse.org administrator.
Examples::
Panoptes.connect(username='example', password='example')
Panoptes.connect(endpoint='https://panoptes.example.com') | https://github.com/zooniverse/panoptes-python-client/blob/138d93cb03378501a8d349428e381ad73f928680/panoptes_client/panoptes.py#L82-L110 |
zooniverse/panoptes-python-client | panoptes_client/panoptes.py | PanoptesObject.where | def where(cls, **kwargs):
"""
Returns a generator which yields instances matching the given query
arguments.
For example, this would yield all :py:class:`.Project`::
Project.where()
And this would yield all launch approved :py:class:`.Project`::
Project.where(launch_approved=True)
"""
_id = kwargs.pop('id', '')
return cls.paginated_results(*cls.http_get(_id, params=kwargs)) | python | def where(cls, **kwargs):
"""
Returns a generator which yields instances matching the given query
arguments.
For example, this would yield all :py:class:`.Project`::
Project.where()
And this would yield all launch approved :py:class:`.Project`::
Project.where(launch_approved=True)
"""
_id = kwargs.pop('id', '')
return cls.paginated_results(*cls.http_get(_id, params=kwargs)) | Returns a generator which yields instances matching the given query
arguments.
For example, this would yield all :py:class:`.Project`::
Project.where()
And this would yield all launch approved :py:class:`.Project`::
Project.where(launch_approved=True) | https://github.com/zooniverse/panoptes-python-client/blob/138d93cb03378501a8d349428e381ad73f928680/panoptes_client/panoptes.py#L657-L672 |
zooniverse/panoptes-python-client | panoptes_client/panoptes.py | PanoptesObject.find | def find(cls, _id):
"""
Returns the individual instance with the given ID, if it exists. Raises
:py:class:`PanoptesAPIException` if the object with that ID is not
found.
"""
if not _id:
return None
try:
return next(cls.where(id=_id))
except StopIteration:
raise PanoptesAPIException(
"Could not find {} with id='{}'".format(cls.__name__, _id)
) | python | def find(cls, _id):
"""
Returns the individual instance with the given ID, if it exists. Raises
:py:class:`PanoptesAPIException` if the object with that ID is not
found.
"""
if not _id:
return None
try:
return next(cls.where(id=_id))
except StopIteration:
raise PanoptesAPIException(
"Could not find {} with id='{}'".format(cls.__name__, _id)
) | Returns the individual instance with the given ID, if it exists. Raises
:py:class:`PanoptesAPIException` if the object with that ID is not
found. | https://github.com/zooniverse/panoptes-python-client/blob/138d93cb03378501a8d349428e381ad73f928680/panoptes_client/panoptes.py#L675-L689 |
zooniverse/panoptes-python-client | panoptes_client/panoptes.py | PanoptesObject.save | def save(self):
"""
Saves the object. If the object has not been saved before (i.e. it's
new), then a new object is created. Otherwise, any changes are
submitted to the API.
"""
if not self.id:
save_method = Panoptes.client().post
force_reload = False
else:
if not self.modified_attributes:
return
if not self._loaded:
self.reload()
save_method = Panoptes.client().put
force_reload = True
response, response_etag = save_method(
self.url(self.id),
json={self._api_slug: self._savable_dict(
modified_attributes=self.modified_attributes
)},
etag=self.etag
)
raw_resource_response = response[self._api_slug][0]
self.set_raw(raw_resource_response, response_etag)
if force_reload:
self._loaded = False
return response | python | def save(self):
"""
Saves the object. If the object has not been saved before (i.e. it's
new), then a new object is created. Otherwise, any changes are
submitted to the API.
"""
if not self.id:
save_method = Panoptes.client().post
force_reload = False
else:
if not self.modified_attributes:
return
if not self._loaded:
self.reload()
save_method = Panoptes.client().put
force_reload = True
response, response_etag = save_method(
self.url(self.id),
json={self._api_slug: self._savable_dict(
modified_attributes=self.modified_attributes
)},
etag=self.etag
)
raw_resource_response = response[self._api_slug][0]
self.set_raw(raw_resource_response, response_etag)
if force_reload:
self._loaded = False
return response | Saves the object. If the object has not been saved before (i.e. it's
new), then a new object is created. Otherwise, any changes are
submitted to the API. | https://github.com/zooniverse/panoptes-python-client/blob/138d93cb03378501a8d349428e381ad73f928680/panoptes_client/panoptes.py#L792-L824 |
zooniverse/panoptes-python-client | panoptes_client/panoptes.py | PanoptesObject.reload | def reload(self):
"""
Re-fetches the object from the API, discarding any local changes.
Returns without doing anything if the object is new.
"""
if not self.id:
return
reloaded_object = self.__class__.find(self.id)
self.set_raw(
reloaded_object.raw,
reloaded_object.etag
) | python | def reload(self):
"""
Re-fetches the object from the API, discarding any local changes.
Returns without doing anything if the object is new.
"""
if not self.id:
return
reloaded_object = self.__class__.find(self.id)
self.set_raw(
reloaded_object.raw,
reloaded_object.etag
) | Re-fetches the object from the API, discarding any local changes.
Returns without doing anything if the object is new. | https://github.com/zooniverse/panoptes-python-client/blob/138d93cb03378501a8d349428e381ad73f928680/panoptes_client/panoptes.py#L826-L838 |
zooniverse/panoptes-python-client | panoptes_client/panoptes.py | PanoptesObject.delete | def delete(self):
"""
Deletes the object. Returns without doing anything if the object is
new.
"""
if not self.id:
return
if not self._loaded:
self.reload()
return self.http_delete(self.id, etag=self.etag) | python | def delete(self):
"""
Deletes the object. Returns without doing anything if the object is
new.
"""
if not self.id:
return
if not self._loaded:
self.reload()
return self.http_delete(self.id, etag=self.etag) | Deletes the object. Returns without doing anything if the object is
new. | https://github.com/zooniverse/panoptes-python-client/blob/138d93cb03378501a8d349428e381ad73f928680/panoptes_client/panoptes.py#L840-L850 |
zooniverse/panoptes-python-client | panoptes_client/panoptes.py | LinkCollection.add | def add(self, objs):
"""
Adds the given `objs` to this `LinkCollection`.
- **objs** can be a list of :py:class:`.PanoptesObject` instances, a
list of object IDs, a single :py:class:`.PanoptesObject` instance, or
a single object ID.
Examples::
organization.links.projects.add(1234)
organization.links.projects.add(Project(1234))
workflow.links.subject_sets.add([1,2,3,4])
workflow.links.subject_sets.add([Project(12), Project(34)])
"""
if self.readonly:
raise NotImplementedError(
'{} links can\'t be modified'.format(self._slug)
)
if not self._parent.id:
raise ObjectNotSavedException(
"Links can not be modified before the object has been saved."
)
_objs = [obj for obj in self._build_obj_list(objs) if obj not in self]
if not _objs:
return
self._parent.http_post(
'{}/links/{}'.format(self._parent.id, self._slug),
json={self._slug: _objs},
retry=True,
)
self._linked_object_ids.extend(_objs) | python | def add(self, objs):
"""
Adds the given `objs` to this `LinkCollection`.
- **objs** can be a list of :py:class:`.PanoptesObject` instances, a
list of object IDs, a single :py:class:`.PanoptesObject` instance, or
a single object ID.
Examples::
organization.links.projects.add(1234)
organization.links.projects.add(Project(1234))
workflow.links.subject_sets.add([1,2,3,4])
workflow.links.subject_sets.add([Project(12), Project(34)])
"""
if self.readonly:
raise NotImplementedError(
'{} links can\'t be modified'.format(self._slug)
)
if not self._parent.id:
raise ObjectNotSavedException(
"Links can not be modified before the object has been saved."
)
_objs = [obj for obj in self._build_obj_list(objs) if obj not in self]
if not _objs:
return
self._parent.http_post(
'{}/links/{}'.format(self._parent.id, self._slug),
json={self._slug: _objs},
retry=True,
)
self._linked_object_ids.extend(_objs) | Adds the given `objs` to this `LinkCollection`.
- **objs** can be a list of :py:class:`.PanoptesObject` instances, a
list of object IDs, a single :py:class:`.PanoptesObject` instance, or
a single object ID.
Examples::
organization.links.projects.add(1234)
organization.links.projects.add(Project(1234))
workflow.links.subject_sets.add([1,2,3,4])
workflow.links.subject_sets.add([Project(12), Project(34)]) | https://github.com/zooniverse/panoptes-python-client/blob/138d93cb03378501a8d349428e381ad73f928680/panoptes_client/panoptes.py#L1011-L1046 |
zooniverse/panoptes-python-client | panoptes_client/panoptes.py | LinkCollection.remove | def remove(self, objs):
"""
Removes the given `objs` from this `LinkCollection`.
- **objs** can be a list of :py:class:`.PanoptesObject` instances, a
list of object IDs, a single :py:class:`.PanoptesObject` instance, or
a single object ID.
Examples::
organization.links.projects.remove(1234)
organization.links.projects.remove(Project(1234))
workflow.links.subject_sets.remove([1,2,3,4])
workflow.links.subject_sets.remove([Project(12), Project(34)])
"""
if self.readonly:
raise NotImplementedError(
'{} links can\'t be modified'.format(self._slug)
)
if not self._parent.id:
raise ObjectNotSavedException(
"Links can not be modified before the object has been saved."
)
_objs = [obj for obj in self._build_obj_list(objs) if obj in self]
if not _objs:
return
_obj_ids = ",".join(_objs)
self._parent.http_delete(
'{}/links/{}/{}'.format(self._parent.id, self._slug, _obj_ids),
retry=True,
)
self._linked_object_ids = [
obj for obj in self._linked_object_ids if obj not in _objs
] | python | def remove(self, objs):
"""
Removes the given `objs` from this `LinkCollection`.
- **objs** can be a list of :py:class:`.PanoptesObject` instances, a
list of object IDs, a single :py:class:`.PanoptesObject` instance, or
a single object ID.
Examples::
organization.links.projects.remove(1234)
organization.links.projects.remove(Project(1234))
workflow.links.subject_sets.remove([1,2,3,4])
workflow.links.subject_sets.remove([Project(12), Project(34)])
"""
if self.readonly:
raise NotImplementedError(
'{} links can\'t be modified'.format(self._slug)
)
if not self._parent.id:
raise ObjectNotSavedException(
"Links can not be modified before the object has been saved."
)
_objs = [obj for obj in self._build_obj_list(objs) if obj in self]
if not _objs:
return
_obj_ids = ",".join(_objs)
self._parent.http_delete(
'{}/links/{}/{}'.format(self._parent.id, self._slug, _obj_ids),
retry=True,
)
self._linked_object_ids = [
obj for obj in self._linked_object_ids if obj not in _objs
] | Removes the given `objs` from this `LinkCollection`.
- **objs** can be a list of :py:class:`.PanoptesObject` instances, a
list of object IDs, a single :py:class:`.PanoptesObject` instance, or
a single object ID.
Examples::
organization.links.projects.remove(1234)
organization.links.projects.remove(Project(1234))
workflow.links.subject_sets.remove([1,2,3,4])
workflow.links.subject_sets.remove([Project(12), Project(34)]) | https://github.com/zooniverse/panoptes-python-client/blob/138d93cb03378501a8d349428e381ad73f928680/panoptes_client/panoptes.py#L1049-L1086 |
marshallward/f90nml | f90nml/cli.py | parse | def parse():
"""Parse the command line input arguments."""
parser = argparse.ArgumentParser()
parser.add_argument('--version', action='version',
version='f90nml {0}'.format(f90nml.__version__))
parser.add_argument('--group', '-g', action='store',
help="specify namelist group to modify. "
"When absent, the first group is used")
parser.add_argument('--variable', '-v', action='append',
help="specify the namelist variable to add or modify, "
"followed by the new value. Expressions are of the "
"form `VARIABLE=VALUE`")
parser.add_argument('--patch', '-p', action='store_true',
help="modify the existing namelist as a patch")
parser.add_argument('--format', '-f', action='store',
help="specify the output format (json, yaml, or nml)")
parser.add_argument('--output', '-o', action='store',
help="specify namelist group to modify. "
"When absent, the first group is used")
parser.add_argument('input', nargs='?')
parser.add_argument('output', nargs='?')
if len(sys.argv) == 1:
parser.print_help()
sys.exit()
args = parser.parse_args()
input_fname = args.input
output_fname = args.output
# Get input format
# TODO: Combine with output format
if input_fname:
_, input_ext = os.path.splitext(input_fname)
if input_ext == '.json':
input_fmt = 'json'
elif input_ext == '.yaml':
input_fmt = 'yaml'
else:
input_fmt = 'nml'
else:
input_fmt = 'nml'
# Output format flag validation
valid_formats = ('json', 'yaml', 'nml')
if args.format and args.format not in valid_formats:
print('f90nml: error: format must be one of the following: {0}'
''.format(valid_formats), file=sys.stderr)
sys.exit(-1)
# Get output format
# TODO: Combine with input format
if not args.format:
if output_fname:
_, output_ext = os.path.splitext(output_fname)
if output_ext == '.json':
output_fmt = 'json'
elif output_ext in ('.yaml', '.yml'):
output_fmt = 'yaml'
else:
output_fmt = 'nml'
else:
output_fmt = 'nml'
else:
output_fmt = args.format
# Confirm that YAML module is available
if (input_fmt == 'yaml' or output_fmt == 'yaml') and not has_yaml:
print('f90nml: error: YAML module could not be found.',
file=sys.stderr)
sys.exit(-1)
# Do not patch non-namelist output
if any(fmt != 'nml' for fmt in (input_fmt, output_fmt)) and args.patch:
print('f90nml: error: Only namelist files can be patched.',
file=sys.stderr)
sys.exit(-1)
# Read the input file
if input_fname:
if input_fmt in ('json', 'yaml'):
if input_fmt == 'json':
with open(input_fname) as input_file:
input_data = json.load(input_file)
elif input_ext == '.yaml':
with open(input_fname) as input_file:
input_data = yaml.safe_load(input_file)
else:
input_data = f90nml.read(input_fname)
else:
input_data = {}
input_data = f90nml.Namelist(input_data)
# Construct the update namelist
update_nml = {}
if args.variable:
if not args.group:
# Use the first available group
grp = list(input_data.keys())[0]
warnings.warn('f90nml: warning: Assuming variables are in group \'{0}\'.'.format(grp))
else:
grp = args.group
update_nml_str = '&{0} {1} /\n'.format(grp, ', '.join(args.variable))
update_io = StringIO(update_nml_str)
update_nml = f90nml.read(update_io)
update_io.close()
# Target output
output_file = open(output_fname, 'w') if output_fname else sys.stdout
if args.patch:
# We have to read the file twice for a patch. The main reason is
# to identify the default group, in case this is not provided.
# It could be avoided if a group is provided, but logically that could
# a mess that I do not want to sort out right now.
f90nml.patch(input_fname, update_nml, output_file)
else:
# Update the input namelist directly
if update_nml:
try:
input_data[grp].update(update_nml[grp])
except KeyError:
input_data[grp] = update_nml[grp]
# Write to output
if not args.patch:
if output_fmt in ('json', 'yaml'):
if output_fmt == 'json':
input_data = input_data.todict(complex_tuple=True)
json.dump(input_data, output_file,
indent=4, separators=(',', ': '))
output_file.write('\n')
elif output_fmt == 'yaml':
input_data = input_data.todict(complex_tuple=True)
yaml.dump(input_data, output_file,
default_flow_style=False)
else:
# Default to namelist output
f90nml.write(input_data, output_file)
# Cleanup
if output_file != sys.stdout:
output_file.close() | python | def parse():
"""Parse the command line input arguments."""
parser = argparse.ArgumentParser()
parser.add_argument('--version', action='version',
version='f90nml {0}'.format(f90nml.__version__))
parser.add_argument('--group', '-g', action='store',
help="specify namelist group to modify. "
"When absent, the first group is used")
parser.add_argument('--variable', '-v', action='append',
help="specify the namelist variable to add or modify, "
"followed by the new value. Expressions are of the "
"form `VARIABLE=VALUE`")
parser.add_argument('--patch', '-p', action='store_true',
help="modify the existing namelist as a patch")
parser.add_argument('--format', '-f', action='store',
help="specify the output format (json, yaml, or nml)")
parser.add_argument('--output', '-o', action='store',
help="specify namelist group to modify. "
"When absent, the first group is used")
parser.add_argument('input', nargs='?')
parser.add_argument('output', nargs='?')
if len(sys.argv) == 1:
parser.print_help()
sys.exit()
args = parser.parse_args()
input_fname = args.input
output_fname = args.output
# Get input format
# TODO: Combine with output format
if input_fname:
_, input_ext = os.path.splitext(input_fname)
if input_ext == '.json':
input_fmt = 'json'
elif input_ext == '.yaml':
input_fmt = 'yaml'
else:
input_fmt = 'nml'
else:
input_fmt = 'nml'
# Output format flag validation
valid_formats = ('json', 'yaml', 'nml')
if args.format and args.format not in valid_formats:
print('f90nml: error: format must be one of the following: {0}'
''.format(valid_formats), file=sys.stderr)
sys.exit(-1)
# Get output format
# TODO: Combine with input format
if not args.format:
if output_fname:
_, output_ext = os.path.splitext(output_fname)
if output_ext == '.json':
output_fmt = 'json'
elif output_ext in ('.yaml', '.yml'):
output_fmt = 'yaml'
else:
output_fmt = 'nml'
else:
output_fmt = 'nml'
else:
output_fmt = args.format
# Confirm that YAML module is available
if (input_fmt == 'yaml' or output_fmt == 'yaml') and not has_yaml:
print('f90nml: error: YAML module could not be found.',
file=sys.stderr)
sys.exit(-1)
# Do not patch non-namelist output
if any(fmt != 'nml' for fmt in (input_fmt, output_fmt)) and args.patch:
print('f90nml: error: Only namelist files can be patched.',
file=sys.stderr)
sys.exit(-1)
# Read the input file
if input_fname:
if input_fmt in ('json', 'yaml'):
if input_fmt == 'json':
with open(input_fname) as input_file:
input_data = json.load(input_file)
elif input_ext == '.yaml':
with open(input_fname) as input_file:
input_data = yaml.safe_load(input_file)
else:
input_data = f90nml.read(input_fname)
else:
input_data = {}
input_data = f90nml.Namelist(input_data)
# Construct the update namelist
update_nml = {}
if args.variable:
if not args.group:
# Use the first available group
grp = list(input_data.keys())[0]
warnings.warn('f90nml: warning: Assuming variables are in group \'{0}\'.'.format(grp))
else:
grp = args.group
update_nml_str = '&{0} {1} /\n'.format(grp, ', '.join(args.variable))
update_io = StringIO(update_nml_str)
update_nml = f90nml.read(update_io)
update_io.close()
# Target output
output_file = open(output_fname, 'w') if output_fname else sys.stdout
if args.patch:
# We have to read the file twice for a patch. The main reason is
# to identify the default group, in case this is not provided.
# It could be avoided if a group is provided, but logically that could
# a mess that I do not want to sort out right now.
f90nml.patch(input_fname, update_nml, output_file)
else:
# Update the input namelist directly
if update_nml:
try:
input_data[grp].update(update_nml[grp])
except KeyError:
input_data[grp] = update_nml[grp]
# Write to output
if not args.patch:
if output_fmt in ('json', 'yaml'):
if output_fmt == 'json':
input_data = input_data.todict(complex_tuple=True)
json.dump(input_data, output_file,
indent=4, separators=(',', ': '))
output_file.write('\n')
elif output_fmt == 'yaml':
input_data = input_data.todict(complex_tuple=True)
yaml.dump(input_data, output_file,
default_flow_style=False)
else:
# Default to namelist output
f90nml.write(input_data, output_file)
# Cleanup
if output_file != sys.stdout:
output_file.close() | Parse the command line input arguments. | https://github.com/marshallward/f90nml/blob/4932cabc5221afc844ee6a5b4a05ceb8bd4a2711/f90nml/cli.py#L38-L188 |
marshallward/f90nml | f90nml/__init__.py | write | def write(nml, nml_path, force=False, sort=False):
"""Save a namelist to disk using either a file object or its file path.
File object usage:
>>> with open(nml_path, 'w') as nml_file:
>>> f90nml.write(nml, nml_file)
File path usage:
>>> f90nml.write(nml, 'data.nml')
This function is equivalent to the ``write`` function of the ``Namelist``
object ``nml``.
>>> nml.write('data.nml')
By default, ``write`` will not overwrite an existing file. To override
this, use the ``force`` flag.
>>> nml.write('data.nml', force=True)
To alphabetically sort the ``Namelist`` keys, use the ``sort`` flag.
>>> nml.write('data.nml', sort=True)
"""
# Promote dicts to Namelists
if not isinstance(nml, Namelist) and isinstance(nml, dict):
nml_in = Namelist(nml)
else:
nml_in = nml
nml_in.write(nml_path, force=force, sort=sort) | python | def write(nml, nml_path, force=False, sort=False):
"""Save a namelist to disk using either a file object or its file path.
File object usage:
>>> with open(nml_path, 'w') as nml_file:
>>> f90nml.write(nml, nml_file)
File path usage:
>>> f90nml.write(nml, 'data.nml')
This function is equivalent to the ``write`` function of the ``Namelist``
object ``nml``.
>>> nml.write('data.nml')
By default, ``write`` will not overwrite an existing file. To override
this, use the ``force`` flag.
>>> nml.write('data.nml', force=True)
To alphabetically sort the ``Namelist`` keys, use the ``sort`` flag.
>>> nml.write('data.nml', sort=True)
"""
# Promote dicts to Namelists
if not isinstance(nml, Namelist) and isinstance(nml, dict):
nml_in = Namelist(nml)
else:
nml_in = nml
nml_in.write(nml_path, force=force, sort=sort) | Save a namelist to disk using either a file object or its file path.
File object usage:
>>> with open(nml_path, 'w') as nml_file:
>>> f90nml.write(nml, nml_file)
File path usage:
>>> f90nml.write(nml, 'data.nml')
This function is equivalent to the ``write`` function of the ``Namelist``
object ``nml``.
>>> nml.write('data.nml')
By default, ``write`` will not overwrite an existing file. To override
this, use the ``force`` flag.
>>> nml.write('data.nml', force=True)
To alphabetically sort the ``Namelist`` keys, use the ``sort`` flag.
>>> nml.write('data.nml', sort=True) | https://github.com/marshallward/f90nml/blob/4932cabc5221afc844ee6a5b4a05ceb8bd4a2711/f90nml/__init__.py#L50-L82 |
marshallward/f90nml | f90nml/__init__.py | patch | def patch(nml_path, nml_patch, out_path=None):
"""Create a new namelist based on an input namelist and reference dict.
>>> f90nml.patch('data.nml', nml_patch, 'patched_data.nml')
This function is equivalent to the ``read`` function of the ``Parser``
object with the patch output arguments.
>>> parser = f90nml.Parser()
>>> nml = parser.read('data.nml', nml_patch, 'patched_data.nml')
A patched namelist file will retain any formatting or comments from the
original namelist file. Any modified values will be formatted based on the
settings of the ``Namelist`` object.
"""
parser = Parser()
return parser.read(nml_path, nml_patch, out_path) | python | def patch(nml_path, nml_patch, out_path=None):
"""Create a new namelist based on an input namelist and reference dict.
>>> f90nml.patch('data.nml', nml_patch, 'patched_data.nml')
This function is equivalent to the ``read`` function of the ``Parser``
object with the patch output arguments.
>>> parser = f90nml.Parser()
>>> nml = parser.read('data.nml', nml_patch, 'patched_data.nml')
A patched namelist file will retain any formatting or comments from the
original namelist file. Any modified values will be formatted based on the
settings of the ``Namelist`` object.
"""
parser = Parser()
return parser.read(nml_path, nml_patch, out_path) | Create a new namelist based on an input namelist and reference dict.
>>> f90nml.patch('data.nml', nml_patch, 'patched_data.nml')
This function is equivalent to the ``read`` function of the ``Parser``
object with the patch output arguments.
>>> parser = f90nml.Parser()
>>> nml = parser.read('data.nml', nml_patch, 'patched_data.nml')
A patched namelist file will retain any formatting or comments from the
original namelist file. Any modified values will be formatted based on the
settings of the ``Namelist`` object. | https://github.com/marshallward/f90nml/blob/4932cabc5221afc844ee6a5b4a05ceb8bd4a2711/f90nml/__init__.py#L85-L102 |
Calysto/calysto | calysto/graphics.py | Canvas.setCoords | def setCoords(self, x1, y1, x2, y2):
"""Set coordinates of window to run from (x1,y1) in the
lower-left corner to (x2,y2) in the upper-right corner."""
self.trans = Transform(self.size[0], self.size[1], x1, y1, x2, y2) | python | def setCoords(self, x1, y1, x2, y2):
"""Set coordinates of window to run from (x1,y1) in the
lower-left corner to (x2,y2) in the upper-right corner."""
self.trans = Transform(self.size[0], self.size[1], x1, y1, x2, y2) | Set coordinates of window to run from (x1,y1) in the
lower-left corner to (x2,y2) in the upper-right corner. | https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/graphics.py#L94-L97 |
Calysto/calysto | calysto/graphics.py | Canvas.convert | def convert(self, format="png", **kwargs):
"""
png, ps, pdf, gif, jpg, svg
returns image in format as bytes
"""
if format.upper() in cairosvg.SURFACES:
surface = cairosvg.SURFACES[format.upper()]
else:
raise Exception("'%s' image format unavailable: use one of %s" %
(format.upper(), list(cairosvg.SURFACES.keys())))
return surface.convert(bytestring=str(self), **kwargs) | python | def convert(self, format="png", **kwargs):
"""
png, ps, pdf, gif, jpg, svg
returns image in format as bytes
"""
if format.upper() in cairosvg.SURFACES:
surface = cairosvg.SURFACES[format.upper()]
else:
raise Exception("'%s' image format unavailable: use one of %s" %
(format.upper(), list(cairosvg.SURFACES.keys())))
return surface.convert(bytestring=str(self), **kwargs) | png, ps, pdf, gif, jpg, svg
returns image in format as bytes | https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/graphics.py#L226-L236 |
Calysto/calysto | calysto/graphics.py | Canvas.toPIL | def toPIL(self, **attribs):
"""
Convert canvas to a PIL image
"""
import PIL.Image
bytes = self.convert("png")
sfile = io.BytesIO(bytes)
pil = PIL.Image.open(sfile)
return pil | python | def toPIL(self, **attribs):
"""
Convert canvas to a PIL image
"""
import PIL.Image
bytes = self.convert("png")
sfile = io.BytesIO(bytes)
pil = PIL.Image.open(sfile)
return pil | Convert canvas to a PIL image | https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/graphics.py#L238-L246 |
Calysto/calysto | calysto/graphics.py | Canvas.toGIF | def toGIF(self, **attribs):
"""
Convert canvas to GIF bytes
"""
im = self.toPIL(**attribs)
sfile = io.BytesIO()
im.save(sfile, format="gif")
return sfile.getvalue() | python | def toGIF(self, **attribs):
"""
Convert canvas to GIF bytes
"""
im = self.toPIL(**attribs)
sfile = io.BytesIO()
im.save(sfile, format="gif")
return sfile.getvalue() | Convert canvas to GIF bytes | https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/graphics.py#L248-L255 |
Calysto/calysto | calysto/graphics.py | Canvas.getPixels | def getPixels(self):
"""
Return a stream of pixels from current Canvas.
"""
array = self.toArray()
(width, height, depth) = array.size
for x in range(width):
for y in range(height):
yield Pixel(array, x, y) | python | def getPixels(self):
"""
Return a stream of pixels from current Canvas.
"""
array = self.toArray()
(width, height, depth) = array.size
for x in range(width):
for y in range(height):
yield Pixel(array, x, y) | Return a stream of pixels from current Canvas. | https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/graphics.py#L273-L281 |
Calysto/calysto | calysto/graphics.py | Circle.getP1 | def getP1(self):
"""
Left, upper point
"""
return Point(self.center[0] - self.radius,
self.center[1] - self.radius) | python | def getP1(self):
"""
Left, upper point
"""
return Point(self.center[0] - self.radius,
self.center[1] - self.radius) | Left, upper point | https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/graphics.py#L454-L459 |
Calysto/calysto | calysto/graphics.py | Circle.getP2 | def getP2(self):
"""
Right, lower point
"""
return Point(self.center[0] + self.radius,
self.center[1] + self.radius) | python | def getP2(self):
"""
Right, lower point
"""
return Point(self.center[0] + self.radius,
self.center[1] + self.radius) | Right, lower point | https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/graphics.py#L461-L466 |
Calysto/calysto | calysto/simulation.py | Simulation.start_sim | def start_sim(self, gui=True, set_values={}, error=None):
"""
Run the simulation in the background, showing the GUI by default.
"""
self.error = error
if not self.is_running.is_set():
def loop():
self.need_to_stop.clear()
self.is_running.set()
for robot in self.robots:
if robot.brain:
self.runBrain(robot.brain)
count = 0
while not self.need_to_stop.isSet():
if not self.paused.is_set():
self.clock += self.sim_time
for robot in self.robots:
try:
robot.update()
except Exception as exc:
self.need_to_stop.set()
if error:
error.value = "Error: %s. Now stopping simulation." % str(exc)
else:
raise
if gui:
self.draw()
if count % self.gui_update == 0:
if "canvas" in set_values:
set_values["canvas"].value = str(self.render())
if "energy" in set_values:
if len(self.robots) > 0:
set_values["energy"].value = str(self.robots[0].energy)
count += 1
self.realsleep(self.sim_time)
if self.robots[0].energy <= 0:
self.need_to_stop.set()
self.is_running.clear()
for robot in self.robots:
robot.stop()
threading.Thread(target=loop).start() | python | def start_sim(self, gui=True, set_values={}, error=None):
"""
Run the simulation in the background, showing the GUI by default.
"""
self.error = error
if not self.is_running.is_set():
def loop():
self.need_to_stop.clear()
self.is_running.set()
for robot in self.robots:
if robot.brain:
self.runBrain(robot.brain)
count = 0
while not self.need_to_stop.isSet():
if not self.paused.is_set():
self.clock += self.sim_time
for robot in self.robots:
try:
robot.update()
except Exception as exc:
self.need_to_stop.set()
if error:
error.value = "Error: %s. Now stopping simulation." % str(exc)
else:
raise
if gui:
self.draw()
if count % self.gui_update == 0:
if "canvas" in set_values:
set_values["canvas"].value = str(self.render())
if "energy" in set_values:
if len(self.robots) > 0:
set_values["energy"].value = str(self.robots[0].energy)
count += 1
self.realsleep(self.sim_time)
if self.robots[0].energy <= 0:
self.need_to_stop.set()
self.is_running.clear()
for robot in self.robots:
robot.stop()
threading.Thread(target=loop).start() | Run the simulation in the background, showing the GUI by default. | https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/simulation.py#L109-L149 |
Calysto/calysto | calysto/simulation.py | Simulation.draw | def draw(self):
"""
Render and draw the world and robots.
"""
from calysto.display import display, clear_output
canvas = self.render()
clear_output(wait=True)
display(canvas) | python | def draw(self):
"""
Render and draw the world and robots.
"""
from calysto.display import display, clear_output
canvas = self.render()
clear_output(wait=True)
display(canvas) | Render and draw the world and robots. | https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/simulation.py#L180-L187 |
Calysto/calysto | calysto/simulation.py | Simulation.sleep | def sleep(self, seconds):
"""
Sleep in simulated time.
"""
start = self.time()
while (self.time() - start < seconds and
not self.need_to_stop.is_set()):
self.need_to_stop.wait(self.sim_time) | python | def sleep(self, seconds):
"""
Sleep in simulated time.
"""
start = self.time()
while (self.time() - start < seconds and
not self.need_to_stop.is_set()):
self.need_to_stop.wait(self.sim_time) | Sleep in simulated time. | https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/simulation.py#L209-L216 |
Calysto/calysto | calysto/simulation.py | Simulation.runBrain | def runBrain(self, f):
"""
Run a brain program in the background.
"""
if self.error:
self.error.value = ""
def wrapper():
self.brain_running.set()
try:
f()
except KeyboardInterrupt:
# Just stop
pass
except Exception as e:
if self.error:
self.error.value = "<pre style='background: #fdd'>" + traceback.format_exc() + "</pre>"
else:
raise
finally:
self.brain_running.clear()
# Otherwise, will show error
threading.Thread(target=wrapper).start() | python | def runBrain(self, f):
"""
Run a brain program in the background.
"""
if self.error:
self.error.value = ""
def wrapper():
self.brain_running.set()
try:
f()
except KeyboardInterrupt:
# Just stop
pass
except Exception as e:
if self.error:
self.error.value = "<pre style='background: #fdd'>" + traceback.format_exc() + "</pre>"
else:
raise
finally:
self.brain_running.clear()
# Otherwise, will show error
threading.Thread(target=wrapper).start() | Run a brain program in the background. | https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/simulation.py#L221-L242 |
Calysto/calysto | calysto/simulation.py | DiscreteSimulation.addCluster | def addCluster(self, cx, cy, item, count, lam_percent=.25):
"""
Add a Poisson cluster of count items around (x,y).
"""
dx, dy = map(lambda v: v * lam_percent, self.psize)
total = 0
while total < count:
points = np.random.poisson(lam=(dx, dy), size=(count, 2))
for x, y in points:
px, py = (int(x - dx + cx), int(y - dy + cy))
if self.getPatch(px, py) is None:
self.setPatch(px, py, item)
total += 1
if total == count:
break | python | def addCluster(self, cx, cy, item, count, lam_percent=.25):
"""
Add a Poisson cluster of count items around (x,y).
"""
dx, dy = map(lambda v: v * lam_percent, self.psize)
total = 0
while total < count:
points = np.random.poisson(lam=(dx, dy), size=(count, 2))
for x, y in points:
px, py = (int(x - dx + cx), int(y - dy + cy))
if self.getPatch(px, py) is None:
self.setPatch(px, py, item)
total += 1
if total == count:
break | Add a Poisson cluster of count items around (x,y). | https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/simulation.py#L286-L300 |
Calysto/calysto | calysto/simulation.py | Robot.forward | def forward(self, seconds, vx=5):
"""
Move continuously in simulator for seconds and velocity vx.
"""
self.vx = vx
self.sleep(seconds)
self.vx = 0 | python | def forward(self, seconds, vx=5):
"""
Move continuously in simulator for seconds and velocity vx.
"""
self.vx = vx
self.sleep(seconds)
self.vx = 0 | Move continuously in simulator for seconds and velocity vx. | https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/simulation.py#L393-L399 |
Calysto/calysto | calysto/simulation.py | DNARobot.codon2weight | def codon2weight(self, codon):
"""
Turn a codon of "000" to "999" to a number between
-5.0 and 5.0.
"""
length = len(codon)
retval = int(codon)
return retval/(10 ** (length - 1)) - 5.0 | python | def codon2weight(self, codon):
"""
Turn a codon of "000" to "999" to a number between
-5.0 and 5.0.
"""
length = len(codon)
retval = int(codon)
return retval/(10 ** (length - 1)) - 5.0 | Turn a codon of "000" to "999" to a number between
-5.0 and 5.0. | https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/simulation.py#L1248-L1255 |
Calysto/calysto | calysto/simulation.py | DNARobot.weight2codon | def weight2codon(self, weight, length=None):
"""
Given a weight between -5 and 5, turn it into
a codon, eg "000" to "999"
"""
if length is None:
length = self.clen
retval = 0
weight = min(max(weight + 5.0, 0), 10.0) * (10 ** (length - 1))
for i in range(length):
if i == length - 1: # last one
d = int(round(weight / (10 ** (length - i - 1))))
else:
d = int(weight / (10 ** (length - i - 1)))
weight = weight % (10 ** (length - i - 1))
retval += d * (10 ** (length - i - 1))
return ("%0" + str(length) + "d") % retval | python | def weight2codon(self, weight, length=None):
"""
Given a weight between -5 and 5, turn it into
a codon, eg "000" to "999"
"""
if length is None:
length = self.clen
retval = 0
weight = min(max(weight + 5.0, 0), 10.0) * (10 ** (length - 1))
for i in range(length):
if i == length - 1: # last one
d = int(round(weight / (10 ** (length - i - 1))))
else:
d = int(weight / (10 ** (length - i - 1)))
weight = weight % (10 ** (length - i - 1))
retval += d * (10 ** (length - i - 1))
return ("%0" + str(length) + "d") % retval | Given a weight between -5 and 5, turn it into
a codon, eg "000" to "999" | https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/simulation.py#L1257-L1273 |
marshallward/f90nml | f90nml/namelist.py | is_nullable_list | def is_nullable_list(val, vtype):
"""Return True if list contains either values of type `vtype` or None."""
return (isinstance(val, list) and
any(isinstance(v, vtype) for v in val) and
all((isinstance(v, vtype) or v is None) for v in val)) | python | def is_nullable_list(val, vtype):
"""Return True if list contains either values of type `vtype` or None."""
return (isinstance(val, list) and
any(isinstance(v, vtype) for v in val) and
all((isinstance(v, vtype) or v is None) for v in val)) | Return True if list contains either values of type `vtype` or None. | https://github.com/marshallward/f90nml/blob/4932cabc5221afc844ee6a5b4a05ceb8bd4a2711/f90nml/namelist.py#L721-L725 |
marshallward/f90nml | f90nml/namelist.py | Namelist.column_width | def column_width(self, width):
"""Validate and set the column width."""
if isinstance(width, int):
if width >= 0:
self._column_width = width
else:
raise ValueError('Column width must be nonnegative.')
else:
raise TypeError('Column width must be a nonnegative integer.') | python | def column_width(self, width):
"""Validate and set the column width."""
if isinstance(width, int):
if width >= 0:
self._column_width = width
else:
raise ValueError('Column width must be nonnegative.')
else:
raise TypeError('Column width must be a nonnegative integer.') | Validate and set the column width. | https://github.com/marshallward/f90nml/blob/4932cabc5221afc844ee6a5b4a05ceb8bd4a2711/f90nml/namelist.py#L158-L166 |
marshallward/f90nml | f90nml/namelist.py | Namelist.indent | def indent(self, value):
"""Validate and set the indent width."""
# Explicit indent setting
if isinstance(value, str):
if value.isspace() or len(value) == 0:
self._indent = value
else:
raise ValueError('String indentation can only contain '
'whitespace.')
# Set indent width
elif isinstance(value, int):
if value >= 0:
self._indent = value * ' '
else:
raise ValueError('Indentation spacing must be nonnegative.')
else:
raise TypeError('Indentation must be specified by string or space '
'width.') | python | def indent(self, value):
"""Validate and set the indent width."""
# Explicit indent setting
if isinstance(value, str):
if value.isspace() or len(value) == 0:
self._indent = value
else:
raise ValueError('String indentation can only contain '
'whitespace.')
# Set indent width
elif isinstance(value, int):
if value >= 0:
self._indent = value * ' '
else:
raise ValueError('Indentation spacing must be nonnegative.')
else:
raise TypeError('Indentation must be specified by string or space '
'width.') | Validate and set the indent width. | https://github.com/marshallward/f90nml/blob/4932cabc5221afc844ee6a5b4a05ceb8bd4a2711/f90nml/namelist.py#L179-L198 |
marshallward/f90nml | f90nml/namelist.py | Namelist.end_comma | def end_comma(self, value):
"""Validate and set the comma termination flag."""
if not isinstance(value, bool):
raise TypeError('end_comma attribute must be a logical type.')
self._end_comma = value | python | def end_comma(self, value):
"""Validate and set the comma termination flag."""
if not isinstance(value, bool):
raise TypeError('end_comma attribute must be a logical type.')
self._end_comma = value | Validate and set the comma termination flag. | https://github.com/marshallward/f90nml/blob/4932cabc5221afc844ee6a5b4a05ceb8bd4a2711/f90nml/namelist.py#L212-L216 |
marshallward/f90nml | f90nml/namelist.py | Namelist.index_spacing | def index_spacing(self, value):
"""Validate and set the index_spacing flag."""
if not isinstance(value, bool):
raise TypeError('index_spacing attribute must be a logical type.')
self._index_spacing = value | python | def index_spacing(self, value):
"""Validate and set the index_spacing flag."""
if not isinstance(value, bool):
raise TypeError('index_spacing attribute must be a logical type.')
self._index_spacing = value | Validate and set the index_spacing flag. | https://github.com/marshallward/f90nml/blob/4932cabc5221afc844ee6a5b4a05ceb8bd4a2711/f90nml/namelist.py#L224-L228 |
marshallward/f90nml | f90nml/namelist.py | Namelist.uppercase | def uppercase(self, value):
"""Validate and set the uppercase flag."""
if not isinstance(value, bool):
raise TypeError('uppercase attribute must be a logical type.')
self._uppercase = value | python | def uppercase(self, value):
"""Validate and set the uppercase flag."""
if not isinstance(value, bool):
raise TypeError('uppercase attribute must be a logical type.')
self._uppercase = value | Validate and set the uppercase flag. | https://github.com/marshallward/f90nml/blob/4932cabc5221afc844ee6a5b4a05ceb8bd4a2711/f90nml/namelist.py#L236-L240 |
marshallward/f90nml | f90nml/namelist.py | Namelist.float_format | def float_format(self, value):
"""Validate and set the upper case flag."""
if isinstance(value, str):
# Duck-test the format string; raise ValueError on fail
'{0:{1}}'.format(1.23, value)
self._float_format = value
else:
raise TypeError('Floating point format code must be a string.') | python | def float_format(self, value):
"""Validate and set the upper case flag."""
if isinstance(value, str):
# Duck-test the format string; raise ValueError on fail
'{0:{1}}'.format(1.23, value)
self._float_format = value
else:
raise TypeError('Floating point format code must be a string.') | Validate and set the upper case flag. | https://github.com/marshallward/f90nml/blob/4932cabc5221afc844ee6a5b4a05ceb8bd4a2711/f90nml/namelist.py#L252-L260 |
marshallward/f90nml | f90nml/namelist.py | Namelist.logical_repr | def logical_repr(self, value):
"""Set the string representation of logical values."""
if not any(isinstance(value, t) for t in (list, tuple)):
raise TypeError("Logical representation must be a tuple with "
"a valid true and false value.")
if not len(value) == 2:
raise ValueError("List must contain two values.")
self.false_repr = value[0]
self.true_repr = value[1] | python | def logical_repr(self, value):
"""Set the string representation of logical values."""
if not any(isinstance(value, t) for t in (list, tuple)):
raise TypeError("Logical representation must be a tuple with "
"a valid true and false value.")
if not len(value) == 2:
raise ValueError("List must contain two values.")
self.false_repr = value[0]
self.true_repr = value[1] | Set the string representation of logical values. | https://github.com/marshallward/f90nml/blob/4932cabc5221afc844ee6a5b4a05ceb8bd4a2711/f90nml/namelist.py#L278-L287 |
marshallward/f90nml | f90nml/namelist.py | Namelist.false_repr | def false_repr(self, value):
"""Validate and set the logical false representation."""
if isinstance(value, str):
if not (value.lower().startswith('f') or
value.lower().startswith('.f')):
raise ValueError("Logical false representation must start "
"with 'F' or '.F'.")
else:
self._logical_repr[0] = value
else:
raise TypeError('Logical false representation must be a string.') | python | def false_repr(self, value):
"""Validate and set the logical false representation."""
if isinstance(value, str):
if not (value.lower().startswith('f') or
value.lower().startswith('.f')):
raise ValueError("Logical false representation must start "
"with 'F' or '.F'.")
else:
self._logical_repr[0] = value
else:
raise TypeError('Logical false representation must be a string.') | Validate and set the logical false representation. | https://github.com/marshallward/f90nml/blob/4932cabc5221afc844ee6a5b4a05ceb8bd4a2711/f90nml/namelist.py#L319-L329 |
marshallward/f90nml | f90nml/namelist.py | Namelist.start_index | def start_index(self, value):
"""Validate and set the vector start index."""
# TODO: Validate contents? (May want to set before adding the data.)
if not isinstance(value, dict):
raise TypeError('start_index attribute must be a dict.')
self._start_index = value | python | def start_index(self, value):
"""Validate and set the vector start index."""
# TODO: Validate contents? (May want to set before adding the data.)
if not isinstance(value, dict):
raise TypeError('start_index attribute must be a dict.')
self._start_index = value | Validate and set the vector start index. | https://github.com/marshallward/f90nml/blob/4932cabc5221afc844ee6a5b4a05ceb8bd4a2711/f90nml/namelist.py#L364-L369 |
marshallward/f90nml | f90nml/namelist.py | Namelist.write | def write(self, nml_path, force=False, sort=False):
"""Write Namelist to a Fortran 90 namelist file.
>>> nml = f90nml.read('input.nml')
>>> nml.write('out.nml')
"""
nml_is_file = hasattr(nml_path, 'read')
if not force and not nml_is_file and os.path.isfile(nml_path):
raise IOError('File {0} already exists.'.format(nml_path))
nml_file = nml_path if nml_is_file else open(nml_path, 'w')
try:
self._writestream(nml_file, sort)
finally:
if not nml_is_file:
nml_file.close() | python | def write(self, nml_path, force=False, sort=False):
"""Write Namelist to a Fortran 90 namelist file.
>>> nml = f90nml.read('input.nml')
>>> nml.write('out.nml')
"""
nml_is_file = hasattr(nml_path, 'read')
if not force and not nml_is_file and os.path.isfile(nml_path):
raise IOError('File {0} already exists.'.format(nml_path))
nml_file = nml_path if nml_is_file else open(nml_path, 'w')
try:
self._writestream(nml_file, sort)
finally:
if not nml_is_file:
nml_file.close() | Write Namelist to a Fortran 90 namelist file.
>>> nml = f90nml.read('input.nml')
>>> nml.write('out.nml') | https://github.com/marshallward/f90nml/blob/4932cabc5221afc844ee6a5b4a05ceb8bd4a2711/f90nml/namelist.py#L391-L406 |
marshallward/f90nml | f90nml/namelist.py | Namelist.patch | def patch(self, nml_patch):
"""Update the namelist from another partial or full namelist.
This is different from the intrinsic `update()` method, which replaces
a namelist section. Rather, it updates the values within a section.
"""
for sec in nml_patch:
if sec not in self:
self[sec] = Namelist()
self[sec].update(nml_patch[sec]) | python | def patch(self, nml_patch):
"""Update the namelist from another partial or full namelist.
This is different from the intrinsic `update()` method, which replaces
a namelist section. Rather, it updates the values within a section.
"""
for sec in nml_patch:
if sec not in self:
self[sec] = Namelist()
self[sec].update(nml_patch[sec]) | Update the namelist from another partial or full namelist.
This is different from the intrinsic `update()` method, which replaces
a namelist section. Rather, it updates the values within a section. | https://github.com/marshallward/f90nml/blob/4932cabc5221afc844ee6a5b4a05ceb8bd4a2711/f90nml/namelist.py#L408-L417 |
marshallward/f90nml | f90nml/namelist.py | Namelist.groups | def groups(self):
"""Return an iterator that spans values with group and variable names.
Elements of the iterator consist of a tuple containing two values. The
first is internal tuple containing the current namelist group and its
variable name. The second element of the returned tuple is the value
associated with the current group and variable.
"""
for key, value in self.items():
for inner_key, inner_value in value.items():
yield (key, inner_key), inner_value | python | def groups(self):
"""Return an iterator that spans values with group and variable names.
Elements of the iterator consist of a tuple containing two values. The
first is internal tuple containing the current namelist group and its
variable name. The second element of the returned tuple is the value
associated with the current group and variable.
"""
for key, value in self.items():
for inner_key, inner_value in value.items():
yield (key, inner_key), inner_value | Return an iterator that spans values with group and variable names.
Elements of the iterator consist of a tuple containing two values. The
first is internal tuple containing the current namelist group and its
variable name. The second element of the returned tuple is the value
associated with the current group and variable. | https://github.com/marshallward/f90nml/blob/4932cabc5221afc844ee6a5b4a05ceb8bd4a2711/f90nml/namelist.py#L419-L429 |
marshallward/f90nml | f90nml/namelist.py | Namelist._write_nmlgrp | def _write_nmlgrp(self, grp_name, grp_vars, nml_file, sort=False):
"""Write namelist group to target file."""
if self._newline:
print(file=nml_file)
self._newline = True
if self.uppercase:
grp_name = grp_name.upper()
if sort:
grp_vars = Namelist(sorted(grp_vars.items(), key=lambda t: t[0]))
print('&{0}'.format(grp_name), file=nml_file)
for v_name, v_val in grp_vars.items():
v_start = grp_vars.start_index.get(v_name, None)
for v_str in self._var_strings(v_name, v_val, v_start=v_start):
nml_line = self.indent + '{0}'.format(v_str)
print(nml_line, file=nml_file)
print('/', file=nml_file) | python | def _write_nmlgrp(self, grp_name, grp_vars, nml_file, sort=False):
"""Write namelist group to target file."""
if self._newline:
print(file=nml_file)
self._newline = True
if self.uppercase:
grp_name = grp_name.upper()
if sort:
grp_vars = Namelist(sorted(grp_vars.items(), key=lambda t: t[0]))
print('&{0}'.format(grp_name), file=nml_file)
for v_name, v_val in grp_vars.items():
v_start = grp_vars.start_index.get(v_name, None)
for v_str in self._var_strings(v_name, v_val, v_start=v_start):
nml_line = self.indent + '{0}'.format(v_str)
print(nml_line, file=nml_file)
print('/', file=nml_file) | Write namelist group to target file. | https://github.com/marshallward/f90nml/blob/4932cabc5221afc844ee6a5b4a05ceb8bd4a2711/f90nml/namelist.py#L449-L471 |
marshallward/f90nml | f90nml/namelist.py | Namelist._var_strings | def _var_strings(self, v_name, v_values, v_idx=None, v_start=None):
"""Convert namelist variable to list of fixed-width strings."""
if self.uppercase:
v_name = v_name.upper()
var_strs = []
# Parse a multidimensional array
if is_nullable_list(v_values, list):
if not v_idx:
v_idx = []
i_s = v_start[::-1][len(v_idx)] if v_start else None
# FIXME: We incorrectly assume 1-based indexing if it is
# unspecified. This is necessary because our output method always
# separates the outer axes to one per line. But we cannot do this
# if we don't know the first index (which we are no longer assuming
# to be 1-based elsewhere). Unfortunately, the solution needs a
# rethink of multidimensional output.
# NOTE: Fixing this would also clean up the output of todict(),
# which is now incorrectly documenting unspecified indices as 1.
# For now, we will assume 1-based indexing here, just to keep
# things working smoothly.
if i_s is None:
i_s = 1
for idx, val in enumerate(v_values, start=i_s):
v_idx_new = v_idx + [idx]
v_strs = self._var_strings(v_name, val, v_idx=v_idx_new,
v_start=v_start)
var_strs.extend(v_strs)
# Parse derived type contents
elif isinstance(v_values, Namelist):
for f_name, f_vals in v_values.items():
v_title = '%'.join([v_name, f_name])
v_start_new = v_values.start_index.get(f_name, None)
v_strs = self._var_strings(v_title, f_vals,
v_start=v_start_new)
var_strs.extend(v_strs)
# Parse an array of derived types
elif is_nullable_list(v_values, Namelist):
if not v_idx:
v_idx = []
i_s = v_start[::-1][len(v_idx)] if v_start else 1
for idx, val in enumerate(v_values, start=i_s):
# Skip any empty elements in a list of derived types
if val is None:
continue
v_title = v_name + '({0})'.format(idx)
v_strs = self._var_strings(v_title, val)
var_strs.extend(v_strs)
else:
use_default_start_index = False
if not isinstance(v_values, list):
v_values = [v_values]
use_default_start_index = False
else:
use_default_start_index = self.default_start_index is not None
# Print the index range
# TODO: Include a check for len(v_values) to determine if vector
if v_idx or v_start or use_default_start_index:
v_idx_repr = '('
if v_start or use_default_start_index:
if v_start:
i_s = v_start[0]
else:
i_s = self.default_start_index
if i_s is None:
v_idx_repr += ':'
else:
i_e = i_s + len(v_values) - 1
if i_s == i_e:
v_idx_repr += '{0}'.format(i_s)
else:
v_idx_repr += '{0}:{1}'.format(i_s, i_e)
else:
v_idx_repr += ':'
if v_idx:
idx_delim = ', ' if self._index_spacing else ','
v_idx_repr += idx_delim
v_idx_repr += idx_delim.join(str(i) for i in v_idx[::-1])
v_idx_repr += ')'
else:
v_idx_repr = ''
# Split output across multiple lines (if necessary)
val_strs = []
val_line = ''
for v_val in v_values:
v_header = v_name + v_idx_repr + ' = '
# Increase column width if the header exceeds this value
if len(self.indent + v_header) >= self.column_width:
column_width = len(self.indent + v_header) + 1
else:
column_width = self.column_width
v_width = column_width - len(self.indent + v_header)
if len(val_line) < v_width:
val_line += self._f90repr(v_val) + ', '
if len(val_line) >= v_width:
val_strs.append(val_line.rstrip())
val_line = ''
# Append any remaining values
if val_line:
val_strs.append(val_line.rstrip())
if val_strs:
if self.end_comma or v_values[-1] is None:
pass
else:
val_strs[-1] = val_strs[-1][:-1]
# Complete the set of values
if val_strs:
var_strs.append('{0}{1} = {2}'
''.format(v_name, v_idx_repr,
val_strs[0]).strip())
for v_str in val_strs[1:]:
var_strs.append(' ' * len(v_header) + v_str)
return var_strs | python | def _var_strings(self, v_name, v_values, v_idx=None, v_start=None):
"""Convert namelist variable to list of fixed-width strings."""
if self.uppercase:
v_name = v_name.upper()
var_strs = []
# Parse a multidimensional array
if is_nullable_list(v_values, list):
if not v_idx:
v_idx = []
i_s = v_start[::-1][len(v_idx)] if v_start else None
# FIXME: We incorrectly assume 1-based indexing if it is
# unspecified. This is necessary because our output method always
# separates the outer axes to one per line. But we cannot do this
# if we don't know the first index (which we are no longer assuming
# to be 1-based elsewhere). Unfortunately, the solution needs a
# rethink of multidimensional output.
# NOTE: Fixing this would also clean up the output of todict(),
# which is now incorrectly documenting unspecified indices as 1.
# For now, we will assume 1-based indexing here, just to keep
# things working smoothly.
if i_s is None:
i_s = 1
for idx, val in enumerate(v_values, start=i_s):
v_idx_new = v_idx + [idx]
v_strs = self._var_strings(v_name, val, v_idx=v_idx_new,
v_start=v_start)
var_strs.extend(v_strs)
# Parse derived type contents
elif isinstance(v_values, Namelist):
for f_name, f_vals in v_values.items():
v_title = '%'.join([v_name, f_name])
v_start_new = v_values.start_index.get(f_name, None)
v_strs = self._var_strings(v_title, f_vals,
v_start=v_start_new)
var_strs.extend(v_strs)
# Parse an array of derived types
elif is_nullable_list(v_values, Namelist):
if not v_idx:
v_idx = []
i_s = v_start[::-1][len(v_idx)] if v_start else 1
for idx, val in enumerate(v_values, start=i_s):
# Skip any empty elements in a list of derived types
if val is None:
continue
v_title = v_name + '({0})'.format(idx)
v_strs = self._var_strings(v_title, val)
var_strs.extend(v_strs)
else:
use_default_start_index = False
if not isinstance(v_values, list):
v_values = [v_values]
use_default_start_index = False
else:
use_default_start_index = self.default_start_index is not None
# Print the index range
# TODO: Include a check for len(v_values) to determine if vector
if v_idx or v_start or use_default_start_index:
v_idx_repr = '('
if v_start or use_default_start_index:
if v_start:
i_s = v_start[0]
else:
i_s = self.default_start_index
if i_s is None:
v_idx_repr += ':'
else:
i_e = i_s + len(v_values) - 1
if i_s == i_e:
v_idx_repr += '{0}'.format(i_s)
else:
v_idx_repr += '{0}:{1}'.format(i_s, i_e)
else:
v_idx_repr += ':'
if v_idx:
idx_delim = ', ' if self._index_spacing else ','
v_idx_repr += idx_delim
v_idx_repr += idx_delim.join(str(i) for i in v_idx[::-1])
v_idx_repr += ')'
else:
v_idx_repr = ''
# Split output across multiple lines (if necessary)
val_strs = []
val_line = ''
for v_val in v_values:
v_header = v_name + v_idx_repr + ' = '
# Increase column width if the header exceeds this value
if len(self.indent + v_header) >= self.column_width:
column_width = len(self.indent + v_header) + 1
else:
column_width = self.column_width
v_width = column_width - len(self.indent + v_header)
if len(val_line) < v_width:
val_line += self._f90repr(v_val) + ', '
if len(val_line) >= v_width:
val_strs.append(val_line.rstrip())
val_line = ''
# Append any remaining values
if val_line:
val_strs.append(val_line.rstrip())
if val_strs:
if self.end_comma or v_values[-1] is None:
pass
else:
val_strs[-1] = val_strs[-1][:-1]
# Complete the set of values
if val_strs:
var_strs.append('{0}{1} = {2}'
''.format(v_name, v_idx_repr,
val_strs[0]).strip())
for v_str in val_strs[1:]:
var_strs.append(' ' * len(v_header) + v_str)
return var_strs | Convert namelist variable to list of fixed-width strings. | https://github.com/marshallward/f90nml/blob/4932cabc5221afc844ee6a5b4a05ceb8bd4a2711/f90nml/namelist.py#L473-L622 |
marshallward/f90nml | f90nml/namelist.py | Namelist.todict | def todict(self, complex_tuple=False):
"""Return a dict equivalent to the namelist.
Since Fortran variables and names cannot start with the ``_``
character, any keys starting with this token denote metadata, such as
starting index.
The ``complex_tuple`` flag is used to convert complex data into an
equivalent 2-tuple, with metadata stored to flag the variable as
complex. This is primarily used to facilitate the storage of the
namelist into an equivalent format which does not support complex
numbers, such as JSON or YAML.
"""
# TODO: Preserve ordering
nmldict = OrderedDict(self)
# Search for namelists within the namelist
# TODO: Move repeated stuff to new functions
for key, value in self.items():
if isinstance(value, Namelist):
nmldict[key] = value.todict(complex_tuple)
elif isinstance(value, complex) and complex_tuple:
nmldict[key] = [value.real, value.imag]
try:
nmldict['_complex'].append(key)
except KeyError:
nmldict['_complex'] = [key]
elif isinstance(value, list):
complex_list = False
for idx, entry in enumerate(value):
if isinstance(entry, Namelist):
nmldict[key][idx] = entry.todict(complex_tuple)
elif isinstance(entry, complex) and complex_tuple:
nmldict[key][idx] = [entry.real, entry.imag]
complex_list = True
if complex_list:
try:
nmldict['_complex'].append(key)
except KeyError:
nmldict['_complex'] = [key]
# Append the start index if present
if self.start_index:
nmldict['_start_index'] = self.start_index
return nmldict | python | def todict(self, complex_tuple=False):
"""Return a dict equivalent to the namelist.
Since Fortran variables and names cannot start with the ``_``
character, any keys starting with this token denote metadata, such as
starting index.
The ``complex_tuple`` flag is used to convert complex data into an
equivalent 2-tuple, with metadata stored to flag the variable as
complex. This is primarily used to facilitate the storage of the
namelist into an equivalent format which does not support complex
numbers, such as JSON or YAML.
"""
# TODO: Preserve ordering
nmldict = OrderedDict(self)
# Search for namelists within the namelist
# TODO: Move repeated stuff to new functions
for key, value in self.items():
if isinstance(value, Namelist):
nmldict[key] = value.todict(complex_tuple)
elif isinstance(value, complex) and complex_tuple:
nmldict[key] = [value.real, value.imag]
try:
nmldict['_complex'].append(key)
except KeyError:
nmldict['_complex'] = [key]
elif isinstance(value, list):
complex_list = False
for idx, entry in enumerate(value):
if isinstance(entry, Namelist):
nmldict[key][idx] = entry.todict(complex_tuple)
elif isinstance(entry, complex) and complex_tuple:
nmldict[key][idx] = [entry.real, entry.imag]
complex_list = True
if complex_list:
try:
nmldict['_complex'].append(key)
except KeyError:
nmldict['_complex'] = [key]
# Append the start index if present
if self.start_index:
nmldict['_start_index'] = self.start_index
return nmldict | Return a dict equivalent to the namelist.
Since Fortran variables and names cannot start with the ``_``
character, any keys starting with this token denote metadata, such as
starting index.
The ``complex_tuple`` flag is used to convert complex data into an
equivalent 2-tuple, with metadata stored to flag the variable as
complex. This is primarily used to facilitate the storage of the
namelist into an equivalent format which does not support complex
numbers, such as JSON or YAML. | https://github.com/marshallward/f90nml/blob/4932cabc5221afc844ee6a5b4a05ceb8bd4a2711/f90nml/namelist.py#L624-L673 |
marshallward/f90nml | f90nml/namelist.py | Namelist._f90repr | def _f90repr(self, value):
"""Convert primitive Python types to equivalent Fortran strings."""
if isinstance(value, bool):
return self._f90bool(value)
elif isinstance(value, numbers.Integral):
return self._f90int(value)
elif isinstance(value, numbers.Real):
return self._f90float(value)
elif isinstance(value, numbers.Complex):
return self._f90complex(value)
elif isinstance(value, basestring):
return self._f90str(value)
elif value is None:
return ''
else:
raise ValueError('Type {0} of {1} cannot be converted to a Fortran'
' type.'.format(type(value), value)) | python | def _f90repr(self, value):
"""Convert primitive Python types to equivalent Fortran strings."""
if isinstance(value, bool):
return self._f90bool(value)
elif isinstance(value, numbers.Integral):
return self._f90int(value)
elif isinstance(value, numbers.Real):
return self._f90float(value)
elif isinstance(value, numbers.Complex):
return self._f90complex(value)
elif isinstance(value, basestring):
return self._f90str(value)
elif value is None:
return ''
else:
raise ValueError('Type {0} of {1} cannot be converted to a Fortran'
' type.'.format(type(value), value)) | Convert primitive Python types to equivalent Fortran strings. | https://github.com/marshallward/f90nml/blob/4932cabc5221afc844ee6a5b4a05ceb8bd4a2711/f90nml/namelist.py#L675-L691 |
marshallward/f90nml | f90nml/namelist.py | Namelist._f90complex | def _f90complex(self, value):
"""Return a Fortran 90 representation of a complex number."""
return '({0:{fmt}}, {1:{fmt}})'.format(value.real, value.imag,
fmt=self.float_format) | python | def _f90complex(self, value):
"""Return a Fortran 90 representation of a complex number."""
return '({0:{fmt}}, {1:{fmt}})'.format(value.real, value.imag,
fmt=self.float_format) | Return a Fortran 90 representation of a complex number. | https://github.com/marshallward/f90nml/blob/4932cabc5221afc844ee6a5b4a05ceb8bd4a2711/f90nml/namelist.py#L705-L708 |
marshallward/f90nml | f90nml/namelist.py | Namelist._f90str | def _f90str(self, value):
"""Return a Fortran 90 representation of a string."""
# Replace Python quote escape sequence with Fortran
result = repr(str(value)).replace("\\'", "''").replace('\\"', '""')
# Un-escape the Python backslash escape sequence
result = result.replace('\\\\', '\\')
return result | python | def _f90str(self, value):
"""Return a Fortran 90 representation of a string."""
# Replace Python quote escape sequence with Fortran
result = repr(str(value)).replace("\\'", "''").replace('\\"', '""')
# Un-escape the Python backslash escape sequence
result = result.replace('\\\\', '\\')
return result | Return a Fortran 90 representation of a string. | https://github.com/marshallward/f90nml/blob/4932cabc5221afc844ee6a5b4a05ceb8bd4a2711/f90nml/namelist.py#L710-L718 |
numberly/appnexus-client | appnexus/cursor.py | Cursor.extract_data | def extract_data(self, page):
"""Extract the AppNexus object or list of objects from the response"""
response_keys = set(page.keys())
uncommon_keys = response_keys - self.common_keys
for possible_data_key in uncommon_keys:
element = page[possible_data_key]
if isinstance(element, dict):
return [self.representation(self.client, self.service_name,
element)]
if isinstance(element, list):
return [self.representation(self.client, self.service_name, x)
for x in element] | python | def extract_data(self, page):
"""Extract the AppNexus object or list of objects from the response"""
response_keys = set(page.keys())
uncommon_keys = response_keys - self.common_keys
for possible_data_key in uncommon_keys:
element = page[possible_data_key]
if isinstance(element, dict):
return [self.representation(self.client, self.service_name,
element)]
if isinstance(element, list):
return [self.representation(self.client, self.service_name, x)
for x in element] | Extract the AppNexus object or list of objects from the response | https://github.com/numberly/appnexus-client/blob/d6a813449ab6fd93bfbceaa937a168fa9a78b890/appnexus/cursor.py#L59-L71 |
numberly/appnexus-client | appnexus/cursor.py | Cursor.first | def first(self):
"""Extract the first AppNexus object present in the response"""
page = self.get_page(num_elements=1)
data = self.extract_data(page)
if data:
return data[0] | python | def first(self):
"""Extract the first AppNexus object present in the response"""
page = self.get_page(num_elements=1)
data = self.extract_data(page)
if data:
return data[0] | Extract the first AppNexus object present in the response | https://github.com/numberly/appnexus-client/blob/d6a813449ab6fd93bfbceaa937a168fa9a78b890/appnexus/cursor.py#L74-L79 |
numberly/appnexus-client | appnexus/cursor.py | Cursor.get_page | def get_page(self, start_element=0, num_elements=None):
"""Get a page (100 elements) starting from `start_element`"""
if num_elements is None:
num_elements = self.batch_size
specs = self.specs.copy()
specs.update(start_element=start_element, num_elements=num_elements)
return self.client.get(self.service_name, **specs) | python | def get_page(self, start_element=0, num_elements=None):
"""Get a page (100 elements) starting from `start_element`"""
if num_elements is None:
num_elements = self.batch_size
specs = self.specs.copy()
specs.update(start_element=start_element, num_elements=num_elements)
return self.client.get(self.service_name, **specs) | Get a page (100 elements) starting from `start_element` | https://github.com/numberly/appnexus-client/blob/d6a813449ab6fd93bfbceaa937a168fa9a78b890/appnexus/cursor.py#L81-L87 |
numberly/appnexus-client | appnexus/cursor.py | Cursor.size | def size(self):
"""Return the number of elements of the cursor with skip and limit"""
initial_count = self.count()
count_with_skip = max(0, initial_count - self._skip)
size = min(count_with_skip, self._limit)
return size | python | def size(self):
"""Return the number of elements of the cursor with skip and limit"""
initial_count = self.count()
count_with_skip = max(0, initial_count - self._skip)
size = min(count_with_skip, self._limit)
return size | Return the number of elements of the cursor with skip and limit | https://github.com/numberly/appnexus-client/blob/d6a813449ab6fd93bfbceaa937a168fa9a78b890/appnexus/cursor.py#L115-L120 |
Calysto/calysto | calysto/ai/conx.py | pad | def pad(s, n, p = " ", sep = "|", align = "left"):
"""
Returns a padded string.
s = string to pad
n = width of string to return
sep = separator (on end of string)
align = text alignment, "left", "center", or "right"
"""
if align == "left":
return (s + (p * n))[:n] + sep
elif align == "center":
pos = n + len(s)/2 - n/2
return ((p * n) + s + (p * n))[pos:pos + n] + sep
elif align == "right":
return ((p * n) + s)[-n:] + sep | python | def pad(s, n, p = " ", sep = "|", align = "left"):
"""
Returns a padded string.
s = string to pad
n = width of string to return
sep = separator (on end of string)
align = text alignment, "left", "center", or "right"
"""
if align == "left":
return (s + (p * n))[:n] + sep
elif align == "center":
pos = n + len(s)/2 - n/2
return ((p * n) + s + (p * n))[pos:pos + n] + sep
elif align == "right":
return ((p * n) + s)[-n:] + sep | Returns a padded string.
s = string to pad
n = width of string to return
sep = separator (on end of string)
align = text alignment, "left", "center", or "right" | https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/ai/conx.py#L33-L47 |
Calysto/calysto | calysto/ai/conx.py | sumMerge | def sumMerge(dict1, dict2):
"""
Adds two dictionaries together, and merges into the first, dict1.
Returns first dict.
"""
for key in dict2:
dict1[key] = list(map(lambda a,b: a + b, dict1.get(key, [0,0,0,0]), dict2[key]))
return dict1 | python | def sumMerge(dict1, dict2):
"""
Adds two dictionaries together, and merges into the first, dict1.
Returns first dict.
"""
for key in dict2:
dict1[key] = list(map(lambda a,b: a + b, dict1.get(key, [0,0,0,0]), dict2[key]))
return dict1 | Adds two dictionaries together, and merges into the first, dict1.
Returns first dict. | https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/ai/conx.py#L49-L56 |
Calysto/calysto | calysto/ai/conx.py | loadNetworkFromFile | def loadNetworkFromFile(filename, mode = 'pickle'):
"""
Deprecated. Use loadNetwork instead.
"""
if mode == 'pickle':
import pickle
fp = open(filename)
network = pickle.load(fp)
fp.close()
return network
elif mode in ['plain', 'conx']:
fp = open(filename, "r")
line = fp.readline()
network = None
while line:
if line.startswith("layer,"):
# layer, name, size
temp, name, sizeStr = line.split(",")
name = name.strip()
size = int(sizeStr)
network.addLayer(name, size)
line = fp.readline()
weights = [float(f) for f in line.split()]
for i in range(network[name].size):
network[name].weight[i] = weights[i]
elif line.startswith("connection,"):
# connection, fromLayer, toLayer
temp, nameFrom, nameTo = line.split(",")
nameFrom, nameTo = nameFrom.strip(), nameTo.strip()
network.connect(nameFrom, nameTo)
for i in range(network[nameFrom].size):
line = fp.readline()
weights = [float(f) for f in line.split()]
for j in range(network[nameTo].size):
network[nameFrom, nameTo].weight[i][j] = weights[j]
elif line.startswith("parameter,"):
temp, exp = line.split(",")
exec(exp) # network is the neural network object
elif line.startswith("network,"):
temp, netType = line.split(",")
netType = netType.strip().lower()
if netType == "cascornetwork":
from pyrobot.brain.cascor import CascorNetwork
network = CascorNetwork()
elif netType == "network":
network = Network()
elif netType == "srn":
network = SRN()
else:
raise AttributeError("unknown network type: '%s'" % netType)
line = fp.readline()
return network | python | def loadNetworkFromFile(filename, mode = 'pickle'):
"""
Deprecated. Use loadNetwork instead.
"""
if mode == 'pickle':
import pickle
fp = open(filename)
network = pickle.load(fp)
fp.close()
return network
elif mode in ['plain', 'conx']:
fp = open(filename, "r")
line = fp.readline()
network = None
while line:
if line.startswith("layer,"):
# layer, name, size
temp, name, sizeStr = line.split(",")
name = name.strip()
size = int(sizeStr)
network.addLayer(name, size)
line = fp.readline()
weights = [float(f) for f in line.split()]
for i in range(network[name].size):
network[name].weight[i] = weights[i]
elif line.startswith("connection,"):
# connection, fromLayer, toLayer
temp, nameFrom, nameTo = line.split(",")
nameFrom, nameTo = nameFrom.strip(), nameTo.strip()
network.connect(nameFrom, nameTo)
for i in range(network[nameFrom].size):
line = fp.readline()
weights = [float(f) for f in line.split()]
for j in range(network[nameTo].size):
network[nameFrom, nameTo].weight[i][j] = weights[j]
elif line.startswith("parameter,"):
temp, exp = line.split(",")
exec(exp) # network is the neural network object
elif line.startswith("network,"):
temp, netType = line.split(",")
netType = netType.strip().lower()
if netType == "cascornetwork":
from pyrobot.brain.cascor import CascorNetwork
network = CascorNetwork()
elif netType == "network":
network = Network()
elif netType == "srn":
network = SRN()
else:
raise AttributeError("unknown network type: '%s'" % netType)
line = fp.readline()
return network | Deprecated. Use loadNetwork instead. | https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/ai/conx.py#L64-L115 |
Calysto/calysto | calysto/ai/conx.py | ndim | def ndim(n, *args, **kwargs):
"""
Makes a multi-dimensional array of random floats. (Replaces RandomArray).
"""
thunk = kwargs.get("thunk", lambda: random.random())
if not args:
return [thunk() for i in range(n)]
A = []
for i in range(n):
A.append( ndim(*args, thunk=thunk) )
return A | python | def ndim(n, *args, **kwargs):
"""
Makes a multi-dimensional array of random floats. (Replaces RandomArray).
"""
thunk = kwargs.get("thunk", lambda: random.random())
if not args:
return [thunk() for i in range(n)]
A = []
for i in range(n):
A.append( ndim(*args, thunk=thunk) )
return A | Makes a multi-dimensional array of random floats. (Replaces RandomArray). | https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/ai/conx.py#L117-L127 |
Calysto/calysto | calysto/ai/conx.py | randomArray2 | def randomArray2(size, bound):
"""
Returns an array initialized to random values between -bound and
bound distributed in a gaussian probability distribution more
appropriate for a Tanh activation function.
"""
if type(size) == type(1):
size = (size,)
temp = Numeric.array( ndim(*size), thunk=lambda: random.gauss(0, 1)) * (2.0 * bound)
return temp - bound | python | def randomArray2(size, bound):
"""
Returns an array initialized to random values between -bound and
bound distributed in a gaussian probability distribution more
appropriate for a Tanh activation function.
"""
if type(size) == type(1):
size = (size,)
temp = Numeric.array( ndim(*size), thunk=lambda: random.gauss(0, 1)) * (2.0 * bound)
return temp - bound | Returns an array initialized to random values between -bound and
bound distributed in a gaussian probability distribution more
appropriate for a Tanh activation function. | https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/ai/conx.py#L129-L138 |
Calysto/calysto | calysto/ai/conx.py | randomArray | def randomArray(size, bound):
"""
Returns an array initialized to random values between -max and max.
"""
if type(size) == type(1):
size = (size,)
temp = Numeric.array( ndim(*size) ) * (2.0 * bound)
return temp - bound | python | def randomArray(size, bound):
"""
Returns an array initialized to random values between -max and max.
"""
if type(size) == type(1):
size = (size,)
temp = Numeric.array( ndim(*size) ) * (2.0 * bound)
return temp - bound | Returns an array initialized to random values between -max and max. | https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/ai/conx.py#L140-L147 |
Calysto/calysto | calysto/ai/conx.py | displayArray | def displayArray(name, a, width = 0):
"""
Prints an array (any sequence of floats, really) to the screen.
"""
print(name + ": ", end=" ")
cnt = 0
for i in a:
print("%4.2f" % i, end=" ")
if width > 0 and (cnt + 1) % width == 0:
print('')
cnt += 1 | python | def displayArray(name, a, width = 0):
"""
Prints an array (any sequence of floats, really) to the screen.
"""
print(name + ": ", end=" ")
cnt = 0
for i in a:
print("%4.2f" % i, end=" ")
if width > 0 and (cnt + 1) % width == 0:
print('')
cnt += 1 | Prints an array (any sequence of floats, really) to the screen. | https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/ai/conx.py#L149-L159 |
Calysto/calysto | calysto/ai/conx.py | toStringArray | def toStringArray(name, a, width = 0):
"""
Returns an array (any sequence of floats, really) as a string.
"""
string = name + ": "
cnt = 0
for i in a:
string += "%4.2f " % i
if width > 0 and (cnt + 1) % width == 0:
string += '\n'
cnt += 1
return string | python | def toStringArray(name, a, width = 0):
"""
Returns an array (any sequence of floats, really) as a string.
"""
string = name + ": "
cnt = 0
for i in a:
string += "%4.2f " % i
if width > 0 and (cnt + 1) % width == 0:
string += '\n'
cnt += 1
return string | Returns an array (any sequence of floats, really) as a string. | https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/ai/conx.py#L161-L172 |
Calysto/calysto | calysto/ai/conx.py | writeArray | def writeArray(fp, a, delim = " ", nl = 1):
"""
Writes a sequence a of floats to file pointed to by file pointer.
"""
for i in a:
fp.write("%f%s" % (i, delim))
if nl:
fp.write("\n") | python | def writeArray(fp, a, delim = " ", nl = 1):
"""
Writes a sequence a of floats to file pointed to by file pointer.
"""
for i in a:
fp.write("%f%s" % (i, delim))
if nl:
fp.write("\n") | Writes a sequence a of floats to file pointed to by file pointer. | https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/ai/conx.py#L174-L181 |
Calysto/calysto | calysto/ai/conx.py | Layer.initialize | def initialize(self):
"""
Initializes important node values to zero for each node in the
layer (target, error, activation, dbias, delta, netinput, bed).
"""
self.randomize()
self.dweight = Numeric.zeros(self.size, 'f')
self.delta = Numeric.zeros(self.size, 'f')
self.wed = Numeric.zeros(self.size, 'f')
self.wedLast = Numeric.zeros(self.size, 'f')
self.target = Numeric.zeros(self.size, 'f')
self.error = Numeric.zeros(self.size, 'f')
self.activation = Numeric.zeros(self.size, 'f')
self.netinput = Numeric.zeros(self.size, 'f')
self.targetSet = 0
self.activationSet = 0
self.verify = 1
# layer report of stats:
self.pcorrect = 0
self.ptotal = 0
self.correct = 0
# misc:
self.minTarget = 0.0
self.maxTarget = 1.0
self.minActivation = 0.0
self.maxActivation = 1.0 | python | def initialize(self):
"""
Initializes important node values to zero for each node in the
layer (target, error, activation, dbias, delta, netinput, bed).
"""
self.randomize()
self.dweight = Numeric.zeros(self.size, 'f')
self.delta = Numeric.zeros(self.size, 'f')
self.wed = Numeric.zeros(self.size, 'f')
self.wedLast = Numeric.zeros(self.size, 'f')
self.target = Numeric.zeros(self.size, 'f')
self.error = Numeric.zeros(self.size, 'f')
self.activation = Numeric.zeros(self.size, 'f')
self.netinput = Numeric.zeros(self.size, 'f')
self.targetSet = 0
self.activationSet = 0
self.verify = 1
# layer report of stats:
self.pcorrect = 0
self.ptotal = 0
self.correct = 0
# misc:
self.minTarget = 0.0
self.maxTarget = 1.0
self.minActivation = 0.0
self.maxActivation = 1.0 | Initializes important node values to zero for each node in the
layer (target, error, activation, dbias, delta, netinput, bed). | https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/ai/conx.py#L266-L291 |
Calysto/calysto | calysto/ai/conx.py | Layer.randomize | def randomize(self, force = 0):
"""
Initialize node biases to random values in the range [-max, max].
"""
if force or not self.frozen:
self.weight = randomArray(self.size, self._maxRandom) | python | def randomize(self, force = 0):
"""
Initialize node biases to random values in the range [-max, max].
"""
if force or not self.frozen:
self.weight = randomArray(self.size, self._maxRandom) | Initialize node biases to random values in the range [-max, max]. | https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/ai/conx.py#L293-L298 |
Calysto/calysto | calysto/ai/conx.py | Layer.changeSize | def changeSize(self, newsize):
"""
Changes the size of the layer. Should only be called through
Network.changeLayerSize().
"""
# overwrites current data
if newsize <= 0:
raise LayerError('Layer size changed to zero.', newsize)
minSize = min(self.size, newsize)
bias = randomArray(newsize, self._maxRandom)
Numeric.put(bias, Numeric.arange(minSize), self.weight)
self.weight = bias
self.size = newsize
self.displayWidth = newsize
self.targetSet = 0
self.activationSet = 0
self.target = Numeric.zeros(self.size, 'f')
self.error = Numeric.zeros(self.size, 'f')
self.activation = Numeric.zeros(self.size, 'f')
self.dweight = Numeric.zeros(self.size, 'f')
self.delta = Numeric.zeros(self.size, 'f')
self.netinput = Numeric.zeros(self.size, 'f')
self.wed = Numeric.zeros(self.size, 'f')
self.wedLast = Numeric.zeros(self.size, 'f') | python | def changeSize(self, newsize):
"""
Changes the size of the layer. Should only be called through
Network.changeLayerSize().
"""
# overwrites current data
if newsize <= 0:
raise LayerError('Layer size changed to zero.', newsize)
minSize = min(self.size, newsize)
bias = randomArray(newsize, self._maxRandom)
Numeric.put(bias, Numeric.arange(minSize), self.weight)
self.weight = bias
self.size = newsize
self.displayWidth = newsize
self.targetSet = 0
self.activationSet = 0
self.target = Numeric.zeros(self.size, 'f')
self.error = Numeric.zeros(self.size, 'f')
self.activation = Numeric.zeros(self.size, 'f')
self.dweight = Numeric.zeros(self.size, 'f')
self.delta = Numeric.zeros(self.size, 'f')
self.netinput = Numeric.zeros(self.size, 'f')
self.wed = Numeric.zeros(self.size, 'f')
self.wedLast = Numeric.zeros(self.size, 'f') | Changes the size of the layer. Should only be called through
Network.changeLayerSize(). | https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/ai/conx.py#L335-L358 |
Calysto/calysto | calysto/ai/conx.py | Layer.RMSError | def RMSError(self):
"""
Returns Root Mean Squared Error for this layer's pattern.
"""
tss = self.TSSError()
return math.sqrt(tss / self.size) | python | def RMSError(self):
"""
Returns Root Mean Squared Error for this layer's pattern.
"""
tss = self.TSSError()
return math.sqrt(tss / self.size) | Returns Root Mean Squared Error for this layer's pattern. | https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/ai/conx.py#L366-L371 |
Calysto/calysto | calysto/ai/conx.py | Layer.getCorrect | def getCorrect(self, tolerance):
"""
Returns the number of nodes within tolerance of the target.
"""
return Numeric.add.reduce(Numeric.fabs(self.target - self.activation) < tolerance) | python | def getCorrect(self, tolerance):
"""
Returns the number of nodes within tolerance of the target.
"""
return Numeric.add.reduce(Numeric.fabs(self.target - self.activation) < tolerance) | Returns the number of nodes within tolerance of the target. | https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/ai/conx.py#L372-L376 |
Calysto/calysto | calysto/ai/conx.py | Layer.getWinner | def getWinner(self, type = 'activation'):
"""
Returns the winner of the type specified {'activation' or
'target'}.
"""
maxvalue = -10000
maxpos = -1
ttlvalue = 0
if type == 'activation':
ttlvalue = Numeric.add.reduce(self.activation)
maxpos = Numeric.argmax(self.activation)
maxvalue = self.activation[maxpos]
elif type == 'target':
# note that backprop() resets self.targetSet flag
if self.verify and self.targetSet == 0:
raise LayerError('getWinner() called with \'target\' but target has not been set.', \
self.targetSet)
ttlvalue = Numeric.add.reduce(self.target)
maxpos = Numeric.argmax(self.target)
maxvalue = self.target[maxpos]
else:
raise LayerError('getWinner() called with unknown layer attribute.', \
type)
if self.size > 0:
avgvalue = ttlvalue / float(self.size)
else:
raise LayerError('getWinner() called for layer of size zero.', \
self.size)
return maxpos, maxvalue, avgvalue | python | def getWinner(self, type = 'activation'):
"""
Returns the winner of the type specified {'activation' or
'target'}.
"""
maxvalue = -10000
maxpos = -1
ttlvalue = 0
if type == 'activation':
ttlvalue = Numeric.add.reduce(self.activation)
maxpos = Numeric.argmax(self.activation)
maxvalue = self.activation[maxpos]
elif type == 'target':
# note that backprop() resets self.targetSet flag
if self.verify and self.targetSet == 0:
raise LayerError('getWinner() called with \'target\' but target has not been set.', \
self.targetSet)
ttlvalue = Numeric.add.reduce(self.target)
maxpos = Numeric.argmax(self.target)
maxvalue = self.target[maxpos]
else:
raise LayerError('getWinner() called with unknown layer attribute.', \
type)
if self.size > 0:
avgvalue = ttlvalue / float(self.size)
else:
raise LayerError('getWinner() called for layer of size zero.', \
self.size)
return maxpos, maxvalue, avgvalue | Returns the winner of the type specified {'activation' or
'target'}. | https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/ai/conx.py#L377-L405 |
Calysto/calysto | calysto/ai/conx.py | Layer.setLog | def setLog(self, fileName, writeName=False):
"""
Opens a log file with name fileName.
"""
self.log = 1
self.logFile = fileName
self._logPtr = open(fileName, "w")
if writeName:
self._namePtr = open(fileName + ".name", "w") | python | def setLog(self, fileName, writeName=False):
"""
Opens a log file with name fileName.
"""
self.log = 1
self.logFile = fileName
self._logPtr = open(fileName, "w")
if writeName:
self._namePtr = open(fileName + ".name", "w") | Opens a log file with name fileName. | https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/ai/conx.py#L420-L428 |
Calysto/calysto | calysto/ai/conx.py | Layer.closeLog | def closeLog(self):
"""
Closes the log file.
"""
self._logPtr.close()
if self._namePtr:
self._namePtr.close()
self.log = 0 | python | def closeLog(self):
"""
Closes the log file.
"""
self._logPtr.close()
if self._namePtr:
self._namePtr.close()
self.log = 0 | Closes the log file. | https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/ai/conx.py#L434-L441 |
Calysto/calysto | calysto/ai/conx.py | Layer.writeLog | def writeLog(self, network):
"""
Writes to the log file.
"""
if self.log:
writeArray(self._logPtr, self.activation)
if self._namePtr:
self._namePtr.write(network.getWord(self.activation))
self._namePtr.write("\n") | python | def writeLog(self, network):
"""
Writes to the log file.
"""
if self.log:
writeArray(self._logPtr, self.activation)
if self._namePtr:
self._namePtr.write(network.getWord(self.activation))
self._namePtr.write("\n") | Writes to the log file. | https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/ai/conx.py#L442-L450 |
Calysto/calysto | calysto/ai/conx.py | Layer.toString | def toString(self):
"""
Returns a string representation of Layer instance.
"""
string = "Layer '%s': (Kind: %s, Size: %d, Active: %d, Frozen: %d)\n" % (
self.name, self.kind, self.size, self.active, self.frozen)
if (self.type == 'Output'):
string += toStringArray('Target ', self.target, self.displayWidth)
string += toStringArray('Activation', self.activation, self.displayWidth)
if (self.type != 'Input' and self._verbosity > 1):
string += toStringArray('Error ', self.error, self.displayWidth)
if (self._verbosity > 4 and self.type != 'Input'):
string += toStringArray('weight ', self.weight, self.displayWidth)
string += toStringArray('dweight ', self.dweight, self.displayWidth)
string += toStringArray('delta ', self.delta, self.displayWidth)
string += toStringArray('netinput ', self.netinput, self.displayWidth)
string += toStringArray('wed ', self.wed, self.displayWidth)
return string | python | def toString(self):
"""
Returns a string representation of Layer instance.
"""
string = "Layer '%s': (Kind: %s, Size: %d, Active: %d, Frozen: %d)\n" % (
self.name, self.kind, self.size, self.active, self.frozen)
if (self.type == 'Output'):
string += toStringArray('Target ', self.target, self.displayWidth)
string += toStringArray('Activation', self.activation, self.displayWidth)
if (self.type != 'Input' and self._verbosity > 1):
string += toStringArray('Error ', self.error, self.displayWidth)
if (self._verbosity > 4 and self.type != 'Input'):
string += toStringArray('weight ', self.weight, self.displayWidth)
string += toStringArray('dweight ', self.dweight, self.displayWidth)
string += toStringArray('delta ', self.delta, self.displayWidth)
string += toStringArray('netinput ', self.netinput, self.displayWidth)
string += toStringArray('wed ', self.wed, self.displayWidth)
return string | Returns a string representation of Layer instance. | https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/ai/conx.py#L458-L475 |
Calysto/calysto | calysto/ai/conx.py | Layer.display | def display(self):
"""
Displays the Layer instance to the screen.
"""
if self.displayWidth == 0: return
print("=============================")
print("Layer '%s': (Kind: %s, Size: %d, Active: %d, Frozen: %d)" % (
self.name, self.kind, self.size, self.active, self.frozen))
if (self.type == 'Output'):
displayArray('Target ', self.target, self.displayWidth)
displayArray('Activation', self.activation, self.displayWidth)
if (self.type != 'Input' and self._verbosity > 1):
displayArray('Error ', self.error, self.displayWidth)
if (self._verbosity > 4 and self.type != 'Input'):
print(" ", end=" "); displayArray('weight', self.weight)
print(" ", end=" "); displayArray('dweight', self.dweight)
print(" ", end=" "); displayArray('delta', self.delta)
print(" ", end=" "); displayArray('netinput', self.netinput)
print(" ", end=" "); displayArray('wed', self.wed) | python | def display(self):
"""
Displays the Layer instance to the screen.
"""
if self.displayWidth == 0: return
print("=============================")
print("Layer '%s': (Kind: %s, Size: %d, Active: %d, Frozen: %d)" % (
self.name, self.kind, self.size, self.active, self.frozen))
if (self.type == 'Output'):
displayArray('Target ', self.target, self.displayWidth)
displayArray('Activation', self.activation, self.displayWidth)
if (self.type != 'Input' and self._verbosity > 1):
displayArray('Error ', self.error, self.displayWidth)
if (self._verbosity > 4 and self.type != 'Input'):
print(" ", end=" "); displayArray('weight', self.weight)
print(" ", end=" "); displayArray('dweight', self.dweight)
print(" ", end=" "); displayArray('delta', self.delta)
print(" ", end=" "); displayArray('netinput', self.netinput)
print(" ", end=" "); displayArray('wed', self.wed) | Displays the Layer instance to the screen. | https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/ai/conx.py#L476-L494 |
Calysto/calysto | calysto/ai/conx.py | Layer.setActivations | def setActivations(self, value):
"""
Sets all activations to the value of the argument. Value should be in the range [0,1].
"""
#if self.verify and not self.activationSet == 0:
# raise LayerError, \
# ('Activation flag not reset. Activations may have been set multiple times without any intervening call to propagate().', self.activationSet)
Numeric.put(self.activation, Numeric.arange(len(self.activation)), value)
self.activationSet = 1 | python | def setActivations(self, value):
"""
Sets all activations to the value of the argument. Value should be in the range [0,1].
"""
#if self.verify and not self.activationSet == 0:
# raise LayerError, \
# ('Activation flag not reset. Activations may have been set multiple times without any intervening call to propagate().', self.activationSet)
Numeric.put(self.activation, Numeric.arange(len(self.activation)), value)
self.activationSet = 1 | Sets all activations to the value of the argument. Value should be in the range [0,1]. | https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/ai/conx.py#L507-L515 |
Calysto/calysto | calysto/ai/conx.py | Layer.copyActivations | def copyActivations(self, arr, reckless = 0):
"""
Copies activations from the argument array into
layer activations.
"""
array = Numeric.array(arr)
if not len(array) == self.size:
raise LayerError('Mismatched activation size and layer size in call to copyActivations()', \
(len(array), self.size))
if self.verify and not self.activationSet == 0:
if not reckless:
raise LayerError('Activation flag not reset before call to copyActivations()', \
self.activationSet)
self.activation = array
self.activationSet = 1 | python | def copyActivations(self, arr, reckless = 0):
"""
Copies activations from the argument array into
layer activations.
"""
array = Numeric.array(arr)
if not len(array) == self.size:
raise LayerError('Mismatched activation size and layer size in call to copyActivations()', \
(len(array), self.size))
if self.verify and not self.activationSet == 0:
if not reckless:
raise LayerError('Activation flag not reset before call to copyActivations()', \
self.activationSet)
self.activation = array
self.activationSet = 1 | Copies activations from the argument array into
layer activations. | https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/ai/conx.py#L516-L530 |
Calysto/calysto | calysto/ai/conx.py | Layer.setTargets | def setTargets(self, value):
"""
Sets all targets the the value of the argument. This value must be in the range [min,max].
"""
# Removed this because both propagate and backprop (via compute_error) set targets
#if self.verify and not self.targetSet == 0:
# if not self.warningIssued:
# print 'Warning! Targets have already been set and no intervening backprop() was called.', \
# (self.name, self.targetSet)
# print "(Warning will not be issued again)"
# self.warningIssued = 1
if value > self.maxActivation or value < self.minActivation:
raise LayerError('Targets for this layer are out of the proper interval.', (self.name, value))
Numeric.put(self.target, Numeric.arange(len(self.target)), value)
self.targetSet = 1 | python | def setTargets(self, value):
"""
Sets all targets the the value of the argument. This value must be in the range [min,max].
"""
# Removed this because both propagate and backprop (via compute_error) set targets
#if self.verify and not self.targetSet == 0:
# if not self.warningIssued:
# print 'Warning! Targets have already been set and no intervening backprop() was called.', \
# (self.name, self.targetSet)
# print "(Warning will not be issued again)"
# self.warningIssued = 1
if value > self.maxActivation or value < self.minActivation:
raise LayerError('Targets for this layer are out of the proper interval.', (self.name, value))
Numeric.put(self.target, Numeric.arange(len(self.target)), value)
self.targetSet = 1 | Sets all targets the the value of the argument. This value must be in the range [min,max]. | https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/ai/conx.py#L543-L557 |
Calysto/calysto | calysto/ai/conx.py | Layer.copyTargets | def copyTargets(self, arr):
"""
Copies the targets of the argument array into the self.target attribute.
"""
array = Numeric.array(arr)
if not len(array) == self.size:
raise LayerError('Mismatched target size and layer size in call to copyTargets()', \
(len(array), self.size))
# Removed this because both propagate and backprop (via compute_error) set targets
#if self.verify and not self.targetSet == 0:
# if not self.warningIssued:
# print 'Warning! Targets have already been set and no intervening backprop() was called.', \
# (self.name, self.targetSet)
# print "(Warning will not be issued again)"
# self.warningIssued = 1
if Numeric.add.reduce(array < self.minTarget) or Numeric.add.reduce(array > self.maxTarget):
print(self.name, self.minTarget, self.maxTarget)
raise LayerError('Targets for this layer are out of range.', (self.name, array))
self.target = array
self.targetSet = 1 | python | def copyTargets(self, arr):
"""
Copies the targets of the argument array into the self.target attribute.
"""
array = Numeric.array(arr)
if not len(array) == self.size:
raise LayerError('Mismatched target size and layer size in call to copyTargets()', \
(len(array), self.size))
# Removed this because both propagate and backprop (via compute_error) set targets
#if self.verify and not self.targetSet == 0:
# if not self.warningIssued:
# print 'Warning! Targets have already been set and no intervening backprop() was called.', \
# (self.name, self.targetSet)
# print "(Warning will not be issued again)"
# self.warningIssued = 1
if Numeric.add.reduce(array < self.minTarget) or Numeric.add.reduce(array > self.maxTarget):
print(self.name, self.minTarget, self.maxTarget)
raise LayerError('Targets for this layer are out of range.', (self.name, array))
self.target = array
self.targetSet = 1 | Copies the targets of the argument array into the self.target attribute. | https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/ai/conx.py#L558-L577 |
Calysto/calysto | calysto/ai/conx.py | Connection.initialize | def initialize(self):
"""
Initializes self.dweight and self.wed to zero matrices.
"""
self.randomize()
self.dweight = Numeric.zeros((self.fromLayer.size, \
self.toLayer.size), 'f')
self.wed = Numeric.zeros((self.fromLayer.size, \
self.toLayer.size), 'f')
self.wedLast = Numeric.zeros((self.fromLayer.size, \
self.toLayer.size), 'f') | python | def initialize(self):
"""
Initializes self.dweight and self.wed to zero matrices.
"""
self.randomize()
self.dweight = Numeric.zeros((self.fromLayer.size, \
self.toLayer.size), 'f')
self.wed = Numeric.zeros((self.fromLayer.size, \
self.toLayer.size), 'f')
self.wedLast = Numeric.zeros((self.fromLayer.size, \
self.toLayer.size), 'f') | Initializes self.dweight and self.wed to zero matrices. | https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/ai/conx.py#L615-L625 |
Calysto/calysto | calysto/ai/conx.py | Connection.changeSize | def changeSize(self, fromLayerSize, toLayerSize):
"""
Changes the size of the connection depending on the size
change of either source or destination layer. Should only be
called through Network.changeLayerSize().
"""
if toLayerSize <= 0 or fromLayerSize <= 0:
raise LayerError('changeSize() called with invalid layer size.', \
(fromLayerSize, toLayerSize))
dweight = Numeric.zeros((fromLayerSize, toLayerSize), 'f')
wed = Numeric.zeros((fromLayerSize, toLayerSize), 'f')
wedLast = Numeric.zeros((fromLayerSize, toLayerSize), 'f')
weight = randomArray((fromLayerSize, toLayerSize),
self.toLayer._maxRandom)
# copy from old to new, considering one is smaller
minFromLayerSize = min( fromLayerSize, self.fromLayer.size)
minToLayerSize = min( toLayerSize, self.toLayer.size)
for i in range(minFromLayerSize):
for j in range(minToLayerSize):
wed[i][j] = self.wed[i][j]
wedLast[i][j] = self.wedLast[i][j]
dweight[i][j] = self.dweight[i][j]
weight[i][j] = self.weight[i][j]
self.dweight = dweight
self.wed = wed
self.wedLast = wedLast
self.weight = weight | python | def changeSize(self, fromLayerSize, toLayerSize):
"""
Changes the size of the connection depending on the size
change of either source or destination layer. Should only be
called through Network.changeLayerSize().
"""
if toLayerSize <= 0 or fromLayerSize <= 0:
raise LayerError('changeSize() called with invalid layer size.', \
(fromLayerSize, toLayerSize))
dweight = Numeric.zeros((fromLayerSize, toLayerSize), 'f')
wed = Numeric.zeros((fromLayerSize, toLayerSize), 'f')
wedLast = Numeric.zeros((fromLayerSize, toLayerSize), 'f')
weight = randomArray((fromLayerSize, toLayerSize),
self.toLayer._maxRandom)
# copy from old to new, considering one is smaller
minFromLayerSize = min( fromLayerSize, self.fromLayer.size)
minToLayerSize = min( toLayerSize, self.toLayer.size)
for i in range(minFromLayerSize):
for j in range(minToLayerSize):
wed[i][j] = self.wed[i][j]
wedLast[i][j] = self.wedLast[i][j]
dweight[i][j] = self.dweight[i][j]
weight[i][j] = self.weight[i][j]
self.dweight = dweight
self.wed = wed
self.wedLast = wedLast
self.weight = weight | Changes the size of the connection depending on the size
change of either source or destination layer. Should only be
called through Network.changeLayerSize(). | https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/ai/conx.py#L641-L667 |
Calysto/calysto | calysto/ai/conx.py | Connection.display | def display(self):
"""
Displays connection information to the screen.
"""
if self.toLayer._verbosity > 4:
print("wed: from '" + self.fromLayer.name + "' to '" + self.toLayer.name +"'")
for j in range(self.toLayer.size):
print(self.toLayer.name, "[", j, "]", end=" ")
print('')
for i in range(self.fromLayer.size):
print(self.fromLayer.name, "[", i, "]", ": ", end=" ")
for j in range(self.toLayer.size):
print(self.wed[i][j], end=" ")
print('')
print('')
print("dweight: from '" + self.fromLayer.name + "' to '" + self.toLayer.name +"'")
for j in range(self.toLayer.size):
print(self.toLayer.name, "[", j, "]", end=" ")
print('')
for i in range(self.fromLayer.size):
print(self.fromLayer.name, "[", i, "]", ": ", end=" ")
for j in range(self.toLayer.size):
print(self.dweight[i][j], end=" ")
print('')
print('')
if self.toLayer._verbosity > 2:
print("Weights: from '" + self.fromLayer.name + "' to '" + self.toLayer.name +"'")
print(" ", end=" ")
for j in range(self.toLayer.size):
print(self.toLayer.name, "[", j, "]", end=" ")
print('')
for i in range(self.fromLayer.size):
print(self.fromLayer.name, "[", i, "]", ": ", end=" ")
for j in range(self.toLayer.size):
print(self.weight[i][j], end=" ")
print('')
print('') | python | def display(self):
"""
Displays connection information to the screen.
"""
if self.toLayer._verbosity > 4:
print("wed: from '" + self.fromLayer.name + "' to '" + self.toLayer.name +"'")
for j in range(self.toLayer.size):
print(self.toLayer.name, "[", j, "]", end=" ")
print('')
for i in range(self.fromLayer.size):
print(self.fromLayer.name, "[", i, "]", ": ", end=" ")
for j in range(self.toLayer.size):
print(self.wed[i][j], end=" ")
print('')
print('')
print("dweight: from '" + self.fromLayer.name + "' to '" + self.toLayer.name +"'")
for j in range(self.toLayer.size):
print(self.toLayer.name, "[", j, "]", end=" ")
print('')
for i in range(self.fromLayer.size):
print(self.fromLayer.name, "[", i, "]", ": ", end=" ")
for j in range(self.toLayer.size):
print(self.dweight[i][j], end=" ")
print('')
print('')
if self.toLayer._verbosity > 2:
print("Weights: from '" + self.fromLayer.name + "' to '" + self.toLayer.name +"'")
print(" ", end=" ")
for j in range(self.toLayer.size):
print(self.toLayer.name, "[", j, "]", end=" ")
print('')
for i in range(self.fromLayer.size):
print(self.fromLayer.name, "[", i, "]", ": ", end=" ")
for j in range(self.toLayer.size):
print(self.weight[i][j], end=" ")
print('')
print('') | Displays connection information to the screen. | https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/ai/conx.py#L670-L706 |
Calysto/calysto | calysto/ai/conx.py | Connection.toString | def toString(self):
"""
Connection information as a string.
"""
string = ""
if self.toLayer._verbosity > 4:
string += "wed: from '" + self.fromLayer.name + "' to '" + self.toLayer.name +"'\n"
string += " "
for j in range(self.toLayer.size):
string += " " + self.toLayer.name + "[" + str(j) + "]"
string += '\n'
for i in range(self.fromLayer.size):
string += self.fromLayer.name+ "["+ str(i)+ "]"+ ": "
for j in range(self.toLayer.size):
string += " " + str(self.wed[i][j])
string += '\n'
string += '\n'
string += "dweight: from '" + self.fromLayer.name + "' to '" + self.toLayer.name +"'\n"
string += " "
for j in range(self.toLayer.size):
string += " " + self.toLayer.name+ "["+ str(j)+ "]"
string += '\n'
for i in range(self.fromLayer.size):
string += self.fromLayer.name+ "["+ str(i)+ "]"+ ": "
for j in range(self.toLayer.size):
string += " " + str(self.dweight[i][j])
string += '\n'
string += '\n'
if self.toLayer._verbosity > 2:
string += "Weights: from '" + self.fromLayer.name + "' to '" + self.toLayer.name +"'\n"
string += " "
for j in range(self.toLayer.size):
string += " " + self.toLayer.name+ "["+ str(j)+ "]"
string += '\n'
for i in range(self.fromLayer.size):
string += self.fromLayer.name+ "["+ str(i)+ "]"+ ": "
for j in range(self.toLayer.size):
string += " " + str(self.weight[i][j])
string += '\n'
string += '\n'
return string | python | def toString(self):
"""
Connection information as a string.
"""
string = ""
if self.toLayer._verbosity > 4:
string += "wed: from '" + self.fromLayer.name + "' to '" + self.toLayer.name +"'\n"
string += " "
for j in range(self.toLayer.size):
string += " " + self.toLayer.name + "[" + str(j) + "]"
string += '\n'
for i in range(self.fromLayer.size):
string += self.fromLayer.name+ "["+ str(i)+ "]"+ ": "
for j in range(self.toLayer.size):
string += " " + str(self.wed[i][j])
string += '\n'
string += '\n'
string += "dweight: from '" + self.fromLayer.name + "' to '" + self.toLayer.name +"'\n"
string += " "
for j in range(self.toLayer.size):
string += " " + self.toLayer.name+ "["+ str(j)+ "]"
string += '\n'
for i in range(self.fromLayer.size):
string += self.fromLayer.name+ "["+ str(i)+ "]"+ ": "
for j in range(self.toLayer.size):
string += " " + str(self.dweight[i][j])
string += '\n'
string += '\n'
if self.toLayer._verbosity > 2:
string += "Weights: from '" + self.fromLayer.name + "' to '" + self.toLayer.name +"'\n"
string += " "
for j in range(self.toLayer.size):
string += " " + self.toLayer.name+ "["+ str(j)+ "]"
string += '\n'
for i in range(self.fromLayer.size):
string += self.fromLayer.name+ "["+ str(i)+ "]"+ ": "
for j in range(self.toLayer.size):
string += " " + str(self.weight[i][j])
string += '\n'
string += '\n'
return string | Connection information as a string. | https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/ai/conx.py#L708-L748 |
Calysto/calysto | calysto/ai/conx.py | Network.setCache | def setCache(self, val = 1):
""" Sets cache on (or updates), or turns off """
# first clear the old cached values
self.cacheConnections = []
self.cacheLayers = []
if val:
for layer in self.layers:
if layer.active and not layer.frozen:
self.cacheLayers.append( layer )
for connection in self.connections:
if connection.active and not connection.frozen:
self.cacheConnections.append( connection ) | python | def setCache(self, val = 1):
""" Sets cache on (or updates), or turns off """
# first clear the old cached values
self.cacheConnections = []
self.cacheLayers = []
if val:
for layer in self.layers:
if layer.active and not layer.frozen:
self.cacheLayers.append( layer )
for connection in self.connections:
if connection.active and not connection.frozen:
self.cacheConnections.append( connection ) | Sets cache on (or updates), or turns off | https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/ai/conx.py#L829-L840 |
Calysto/calysto | calysto/ai/conx.py | Network.path | def path(self, startLayer, endLayer):
"""
Used in error checking with verifyArchitecture() and in prop_from().
"""
next = {startLayer.name : startLayer}
visited = {}
while next != {}:
for item in list(next.items()):
# item[0] : name, item[1] : layer reference
# add layer to visited dict and del from next
visited[item[0]] = item[1]
del next[item[0]]
for connection in self.connections:
if connection.fromLayer.name == item[0]:
if connection.toLayer.name == endLayer.name:
return 1 # a path!
elif connection.toLayer.name in next:
pass # already in the list to be traversed
elif connection.toLayer.name in visited:
pass # already been there
else:
# add to next
next[connection.toLayer.name] = connection.toLayer
return 0 # didn't find it and ran out of places to go | python | def path(self, startLayer, endLayer):
"""
Used in error checking with verifyArchitecture() and in prop_from().
"""
next = {startLayer.name : startLayer}
visited = {}
while next != {}:
for item in list(next.items()):
# item[0] : name, item[1] : layer reference
# add layer to visited dict and del from next
visited[item[0]] = item[1]
del next[item[0]]
for connection in self.connections:
if connection.fromLayer.name == item[0]:
if connection.toLayer.name == endLayer.name:
return 1 # a path!
elif connection.toLayer.name in next:
pass # already in the list to be traversed
elif connection.toLayer.name in visited:
pass # already been there
else:
# add to next
next[connection.toLayer.name] = connection.toLayer
return 0 # didn't find it and ran out of places to go | Used in error checking with verifyArchitecture() and in prop_from(). | https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/ai/conx.py#L860-L883 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.