filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_22367 | import pytest
from tests.actions.support.keys import Keys
from tests.actions.support.refine import filter_dict, get_keys, get_events
def test_null_response_value(session, key_chain):
value = key_chain.key_up("a").perform()
assert value is None
value = session.actions.release()
assert value is None
def test_lone_keyup_sends_no_events(session, key_reporter, key_chain):
key_chain.key_up("a").perform()
assert len(get_keys(key_reporter)) == 0
assert len(get_events(session)) == 0
session.actions.release()
assert len(get_keys(key_reporter)) == 0
assert len(get_events(session)) == 0
@pytest.mark.parametrize("value,code", [
(u"a", "KeyA",),
("a", "KeyA",),
(u"\"", "Quote"),
(u",", "Comma"),
(u"\u00E0", ""),
(u"\u0416", ""),
(u"@", "Digit2"),
(u"\u2603", ""),
(u"\uF6C2", ""), # PUA
])
def test_single_printable_key_sends_correct_events(session,
key_reporter,
key_chain,
value,
code):
key_chain \
.key_down(value) \
.key_up(value) \
.perform()
expected = [
{"code": code, "key": value, "type": "keydown"},
{"code": code, "key": value, "type": "keypress"},
{"code": code, "key": value, "type": "keyup"},
]
all_events = get_events(session)
events = [filter_dict(e, expected[0]) for e in all_events]
if len(events) > 0 and events[0]["code"] == None:
# Remove 'code' entry if browser doesn't support it
expected = [filter_dict(e, {"key": "", "type": ""}) for e in expected]
events = [filter_dict(e, expected[0]) for e in events]
assert events == expected
assert get_keys(key_reporter) == value
@pytest.mark.parametrize("value", [
(u"\U0001F604"),
(u"\U0001F60D"),
])
def test_single_emoji_records_correct_key(session, key_reporter, key_chain, value):
# Not using key_chain.send_keys() because we always want to treat value as
# one character here. `len(value)` varies by platform for non-BMP characters,
# so we don't want to iterate over value.
key_chain \
.key_down(value) \
.key_up(value) \
.perform()
# events sent by major browsers are inconsistent so only check key value
assert get_keys(key_reporter) == value
@pytest.mark.parametrize("value,code,key", [
(u"\uE050", "ShiftRight", "Shift"),
(u"\uE053", "OSRight", "Meta"),
(Keys.CONTROL, "ControlLeft", "Control"),
])
def test_single_modifier_key_sends_correct_events(session,
key_reporter,
key_chain,
value,
code,
key):
key_chain \
.key_down(value) \
.key_up(value) \
.perform()
all_events = get_events(session)
expected = [
{"code": code, "key": key, "type": "keydown"},
{"code": code, "key": key, "type": "keyup"},
]
events = [filter_dict(e, expected[0]) for e in all_events]
if len(events) > 0 and events[0]["code"] == None:
# Remove 'code' entry if browser doesn't support it
expected = [filter_dict(e, {"key": "", "type": ""}) for e in expected]
events = [filter_dict(e, expected[0]) for e in events]
assert events == expected
assert len(get_keys(key_reporter)) == 0
@pytest.mark.parametrize("value,code,key", [
(Keys.ESCAPE, "Escape", "Escape"),
(Keys.RIGHT, "ArrowRight", "ArrowRight"),
])
def test_single_nonprintable_key_sends_events(session,
key_reporter,
key_chain,
value,
code,
key):
key_chain \
.key_down(value) \
.key_up(value) \
.perform()
expected = [
{"code": code, "key": key, "type": "keydown"},
{"code": code, "key": key, "type": "keypress"},
{"code": code, "key": key, "type": "keyup"},
]
all_events = get_events(session)
events = [filter_dict(e, expected[0]) for e in all_events]
if len(events) > 0 and events[0]["code"] == None:
# Remove 'code' entry if browser doesn't support it
expected = [filter_dict(e, {"key": "", "type": ""}) for e in expected]
events = [filter_dict(e, expected[0]) for e in events]
if len(events) == 2:
# most browsers don't send a keypress for non-printable keys
assert events == [expected[0], expected[2]]
else:
assert events == expected
assert len(get_keys(key_reporter)) == 0
def test_sequence_of_keydown_printable_keys_sends_events(session,
key_reporter,
key_chain):
key_chain \
.key_down("a") \
.key_down("b") \
.perform()
expected = [
{"code": "KeyA", "key": "a", "type": "keydown"},
{"code": "KeyA", "key": "a", "type": "keypress"},
{"code": "KeyB", "key": "b", "type": "keydown"},
{"code": "KeyB", "key": "b", "type": "keypress"},
]
all_events = get_events(session)
events = [filter_dict(e, expected[0]) for e in all_events]
if len(events) > 0 and events[0]["code"] == None:
# Remove 'code' entry if browser doesn't support it
expected = [filter_dict(e, {"key": "", "type": ""}) for e in expected]
events = [filter_dict(e, expected[0]) for e in events]
assert events == expected
assert get_keys(key_reporter) == "ab"
def test_sequence_of_keydown_character_keys(session, key_reporter, key_chain):
key_chain.send_keys("ef").perform()
expected = [
{"code": "KeyE", "key": "e", "type": "keydown"},
{"code": "KeyE", "key": "e", "type": "keypress"},
{"code": "KeyE", "key": "e", "type": "keyup"},
{"code": "KeyF", "key": "f", "type": "keydown"},
{"code": "KeyF", "key": "f", "type": "keypress"},
{"code": "KeyF", "key": "f", "type": "keyup"},
]
all_events = get_events(session)
events = [filter_dict(e, expected[0]) for e in all_events]
if len(events) > 0 and events[0]["code"] == None:
# Remove 'code' entry if browser doesn't support it
expected = [filter_dict(e, {"key": "", "type": ""}) for e in expected]
events = [filter_dict(e, expected[0]) for e in events]
assert events == expected
assert get_keys(key_reporter) == "ef"
def test_backspace_erases_keys(session, key_reporter, key_chain):
key_chain \
.send_keys("efcd") \
.send_keys([Keys.BACKSPACE, Keys.BACKSPACE]) \
.perform()
assert get_keys(key_reporter) == "ef"
|
the-stack_106_22371 | # -*- coding: utf-8 -*-
from django.forms import ValidationError
from django.utils.translation import ugettext_lazy as _
# models
from comments.models import Comment
from comments.models import CommentLike
from comments.models import CommentImage
# forms
from base.forms import BaseModelForm
class CommentForm(BaseModelForm):
class Meta:
model = Comment
fields = (
'text', 'event', 'activity', 'parent', 'images', 'user_mentions',
'methodology', 'tool',
)
def clean(self):
cleaned_data = super(CommentForm, self).clean()
text = cleaned_data.get('text', '')
users = cleaned_data['user_mentions']
cleaned_data['text'] = Comment.get_parsed_content(text, users)
return cleaned_data
class CommentUpdateForm(BaseModelForm):
class Meta:
model = Comment
fields = ('text', 'event', 'activity', 'parent')
class CommentImageForm(BaseModelForm):
class Meta:
model = CommentImage
fields = ('image',)
class CommentLikeForm(BaseModelForm):
already_liked = _('you have already liked this comment')
class Meta:
model = CommentLike
fields = ('comment',)
def clean(self):
likes = CommentLike.objects.filter(user=self.instance.user)
likes = likes.filter(comment=self.cleaned_data['comment'])
if likes.exists():
raise ValidationError(self.already_liked)
return super(CommentLikeForm, self).clean()
|
the-stack_106_22372 | # Webhooks for external integrations.
from typing import Any, Dict, Optional
from django.http import HttpRequest, HttpResponse
from zerver.decorator import webhook_view
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.webhooks.common import check_send_webhook_message
from zerver.models import UserProfile
@webhook_view("Canarytokens")
@has_request_variables
def api_canarytoken_webhook(
request: HttpRequest,
user_profile: UserProfile,
message: Dict[str, Any] = REQ(argument_type="body"),
user_specified_topic: Optional[str] = REQ("topic", default=None),
) -> HttpResponse:
"""
Construct a response to a webhook event from a Thinkst canarytoken from
canarytokens.org. Canarytokens from Thinkst's paid product have a different
schema and should use the "thinkst" integration. See linked documentation
below for a schema:
https://help.canary.tools/hc/en-gb/articles/360002426577-How-do-I-configure-notifications-for-a-Generic-Webhook-
"""
topic = "canarytoken alert"
body = (
f"**:alert: Canarytoken has been triggered on {message['time']}!**\n\n"
f"{message['memo']} \n\n"
f"[Manage this canarytoken]({message['manage_url']})"
)
if user_specified_topic:
topic = user_specified_topic
check_send_webhook_message(request, user_profile, topic, body)
return json_success()
|
the-stack_106_22374 | """
UP42 authentication mechanism and base requests functionality
"""
import json
from pathlib import Path
from typing import Dict, Optional, Union
import requests
import requests.exceptions
from requests.auth import HTTPBasicAuth
from requests_oauthlib import OAuth2Session
from oauthlib.oauth2 import BackendApplicationClient, MissingTokenError
from tenacity import (
Retrying,
wait_fixed,
wait_random_exponential,
stop_after_attempt,
retry_if_exception,
retry_if_exception_type,
retry,
)
from up42.utils import get_logger
logger = get_logger(__name__)
class retry_if_401_invalid_token(retry_if_exception):
"""
Custom tenacity error response that enables separate retry strategy for
401 error (unauthorized response, unable decode JWT) due to invalid/timed out UP42 token.
Adapted from https://github.com/alexwlchan/handling-http-429-with-tenacity
"""
def __init__(self):
def is_http_401_error(exception):
return (
isinstance(
exception,
(
requests.exceptions.HTTPError,
requests.exceptions.RequestException,
),
)
and exception.response.status_code == 401
)
super().__init__(predicate=is_http_401_error)
class retry_if_429_rate_limit(retry_if_exception):
"""
Custom tenacity error response that enables separate retry strategy for
429 HTTPError (too many requests) due to UP42 rate limitation.
Also see https://docs.up42.com/developers/api#section/API-Usage-Constraints/Rate-limiting
Adapted from https://github.com/alexwlchan/handling-http-429-with-tenacity
"""
def __init__(self):
def is_http_429_error(exception):
return (
isinstance(exception, requests.exceptions.HTTPError)
and exception.response.status_code == 429
)
super().__init__(predicate=is_http_429_error)
class Auth:
def __init__(
self,
cfg_file: Union[str, Path, None] = None,
project_id: Optional[str] = None,
project_api_key: Optional[str] = None,
**kwargs,
):
"""
The Auth class handles the authentication with UP42.
Info:
Authentication is possible via the credentials of a specific project (project_id &
project_api_key). To get your **project id** and **project api key**, follow
the instructions in the docs authentication chapter.
Args:
cfg_file: File path to the cfg.json with {project_id: "...", project_api_key: "..."}.
project_id: The unique identifier of the project.
project_api_key: The project-specific API key.
"""
self.cfg_file = cfg_file
self.project_id = project_id
self.project_api_key = project_api_key
self.workspace_id: Optional[str] = None
self.token: Optional[str] = None
try:
self.env: str = kwargs["env"]
except KeyError:
self.env = "com"
try:
self.authenticate: bool = kwargs["authenticate"]
except KeyError:
self.authenticate = True
if self.authenticate:
self._find_credentials()
self._get_token()
self._get_workspace()
logger.info("Authentication with UP42 successful!")
def __repr__(self):
return f"UP42ProjectAuth(project_id={self.project_id}, env={self.env})"
def _find_credentials(self) -> None:
"""
Sources the project credentials from a provided config file, error handling
if no credentials are provided in arguments or config file.
"""
if self.project_id is None or self.project_api_key is None:
if self.cfg_file is None:
raise ValueError(
"Provide project_id and project_api_key via arguments or config file!"
)
# Source credentials from config file.
try:
with open(self.cfg_file) as src:
config = json.load(src)
try:
self.project_id = config["project_id"]
self.project_api_key = config["project_api_key"]
except KeyError as e:
raise ValueError(
"Provided config file does not contain project_id and "
"project_api_key!"
) from e
logger.info("Got credentials from config file.")
except FileNotFoundError as e:
raise ValueError("Selected config file does not exist!") from e
elif all(
v is not None
for v in [self.cfg_file, self.project_id, self.project_api_key]
):
logger.info(
"Credentials are provided via arguments and config file, "
"now using the argument credentials."
)
def _endpoint(self) -> str:
"""Gets the endpoint."""
return f"https://api.up42.{self.env}"
def _get_token(self):
"""Project specific authentication via project id and project api key."""
try:
client = BackendApplicationClient(
client_id=self.project_id, client_secret=self.project_api_key
)
auth = HTTPBasicAuth(self.project_id, self.project_api_key)
get_token_session = OAuth2Session(client=client)
token_response = get_token_session.fetch_token(
token_url=self._endpoint() + "/oauth/token", auth=auth
)
except MissingTokenError as err:
raise ValueError(
"Authentication was not successful, check the provided project credentials."
) from err
self.token = token_response["data"]["accessToken"]
def _get_workspace(self) -> None:
"""Get workspace id belonging to authenticated project."""
url = f"https://api.up42.{self.env}/projects/{self.project_id}"
resp = self._request("GET", url)
self.workspace_id = resp["data"]["workspaceId"] # type: ignore
@staticmethod
def _generate_headers(token: str) -> Dict[str, str]:
version = (
Path(__file__)
.resolve()
.parent.joinpath("_version.txt")
.read_text(encoding="utf-8")
)
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {token}",
"cache-control": "no-cache",
"X-UP42-info": f"python/{version}",
}
return headers
# pylint: disable=dangerous-default-value
@retry(
retry=retry_if_429_rate_limit(),
wait=wait_random_exponential(multiplier=0.5, max=180),
reraise=True,
)
def _request_helper(
self, request_type: str, url: str, data: dict = {}, querystring: dict = {}
) -> requests.Response:
"""
Helper function for the request, running the actual request with the correct headers.
Args:
request_type: 'GET', 'POST', 'PUT', 'PATCH', 'DELETE'
url: The requests url.
data: The payload, e.g. dictionary with job parameters etc.
querystring: The querystring.
Returns:
The request response.
"""
headers = self._generate_headers(self.token) # type: ignore
if querystring == {}:
response = requests.request(
method=request_type, url=url, data=json.dumps(data), headers=headers
)
else:
response = requests.request(
method=request_type,
url=url,
data=json.dumps(data),
headers=headers,
params=querystring,
)
logger.debug(response)
logger.debug(data)
response.raise_for_status()
return response
def _request(
self,
request_type: str,
url: str,
data: Union[dict, list] = {},
querystring: dict = {},
return_text: bool = True,
): # Union[str, dict, requests.Response]:
"""
Handles retrying the request and automatically retries and gets a new token if
the old is invalid.
Retry is enabled by default, can be set to False as kwargs of Auth.
In addition to this retry mechanic, 429-errors (too many requests) are retried
more extensively in _request_helper.
Args:
request_type: 'GET', 'POST', 'PUT', 'PATCH', 'DELETE'
url: The url to request.
data: The payload, e.g. dictionary with job parameters etc.
querystring: The querystring.
return_text: If true returns response text/json, false returns response.
retry: If False, after 5 minutes and invalid token will return 401
errors.
Returns:
The API response.
"""
retryer_token = Retrying(
stop=stop_after_attempt(2), # Original attempt + one retry
wait=wait_fixed(0.5),
retry=(
retry_if_401_invalid_token()
| retry_if_exception_type(requests.exceptions.ConnectionError)
),
after=lambda retry_state: self._get_token(), # type:ignore
reraise=True,
# after final failed attempt, raises last attempt's exception instead of RetryError.
)
try:
response = retryer_token(
self._request_helper, request_type, url, data, querystring
)
except requests.exceptions.RequestException as err: # Base error class
err_message = json.loads(err.response.text)["error"]
if "code" in err_message:
err_message = f"{err_message['code']} Error - {err_message['message']}!"
logger.error(err_message)
raise requests.exceptions.RequestException(err_message) from err
# Handle response text.
if return_text:
try:
response_text = json.loads(response.text)
except json.JSONDecodeError: # e.g. JobTask logs are str format.
response_text = response.text
# Handle api error messages here before handling it in every single function.
try:
if response_text["error"] is not None and response_text["data"] is None:
raise ValueError(response_text["error"])
return response_text
except (
KeyError,
TypeError,
): # Catalog search, JobTask logs etc. does not have the usual {"data":"",
# "error":""} format.
return response_text
else: # E.g. for DELETE
return response
|
the-stack_106_22375 | # -*- coding: utf-8 -*-
'''
Operations on regular files, special files, directories, and symlinks
=====================================================================
Salt States can aggressively manipulate files on a system. There are a number
of ways in which files can be managed.
Regular files can be enforced with the :mod:`file.managed
<salt.states.file.managed>` state. This state downloads files from the salt
master and places them on the target system. Managed files can be rendered as a
jinja, mako, or wempy template, adding a dynamic component to file management.
An example of :mod:`file.managed <salt.states.file.managed>` which makes use of
the jinja templating system would look like this:
.. code-block:: yaml
/etc/http/conf/http.conf:
file.managed:
- source: salt://apache/http.conf
- user: root
- group: root
- mode: 644
- template: jinja
- defaults:
custom_var: "default value"
other_var: 123
{% if grains['os'] == 'Ubuntu' %}
- context:
custom_var: "override"
{% endif %}
It is also possible to use the :mod:`py renderer <salt.renderers.py>` as a
templating option. The template would be a Python script which would need to
contain a function called ``run()``, which returns a string. All arguments
to the state will be made available to the Python script as globals. The
returned string will be the contents of the managed file. For example:
.. code-block:: python
def run():
lines = ['foo', 'bar', 'baz']
lines.extend([source, name, user, context]) # Arguments as globals
return '\\n\\n'.join(lines)
.. note::
The ``defaults`` and ``context`` arguments require extra indentation (four
spaces instead of the normal two) in order to create a nested dictionary.
:ref:`More information <nested-dict-indentation>`.
If using a template, any user-defined template variables in the file defined in
``source`` must be passed in using the ``defaults`` and/or ``context``
arguments. The general best practice is to place default values in
``defaults``, with conditional overrides going into ``context``, as seen above.
The template will receive a variable ``custom_var``, which would be accessed in
the template using ``{{ custom_var }}``. If the operating system is Ubuntu, the
value of the variable ``custom_var`` would be *override*, otherwise it is the
default *default value*
The ``source`` parameter can be specified as a list. If this is done, then the
first file to be matched will be the one that is used. This allows you to have
a default file on which to fall back if the desired file does not exist on the
salt fileserver. Here's an example:
.. code-block:: yaml
/etc/foo.conf:
file.managed:
- source:
- salt://foo.conf.{{ grains['fqdn'] }}
- salt://foo.conf.fallback
- user: foo
- group: users
- mode: 644
- backup: minion
.. note::
Salt supports backing up managed files via the backup option. For more
details on this functionality please review the
:doc:`backup_mode documentation </ref/states/backup_mode>`.
The ``source`` parameter can also specify a file in another Salt environment.
In this example ``foo.conf`` in the ``dev`` environment will be used instead.
.. code-block:: yaml
/etc/foo.conf:
file.managed:
- source:
- salt://foo.conf?saltenv=dev
- user: foo
- group: users
- mode: '0644'
.. warning::
When using a mode that includes a leading zero you must wrap the
value in single quotes. If the value is not wrapped in quotes it
will be read by YAML as an integer and evaluated as an octal.
Special files can be managed via the ``mknod`` function. This function will
create and enforce the permissions on a special file. The function supports the
creation of character devices, block devices, and fifo pipes. The function will
create the directory structure up to the special file if it is needed on the
minion. The function will not overwrite or operate on (change major/minor
numbers) existing special files with the exception of user, group, and
permissions. In most cases the creation of some special files require root
permisisons on the minion. This would require that the minion to be run as the
root user. Here is an example of a character device:
.. code-block:: yaml
/var/named/chroot/dev/random:
file.mknod:
- ntype: c
- major: 1
- minor: 8
- user: named
- group: named
- mode: 660
Here is an example of a block device:
.. code-block:: yaml
/var/named/chroot/dev/loop0:
file.mknod:
- ntype: b
- major: 7
- minor: 0
- user: named
- group: named
- mode: 660
Here is an example of a fifo pipe:
.. code-block:: yaml
/var/named/chroot/var/log/logfifo:
file.mknod:
- ntype: p
- user: named
- group: named
- mode: 660
Directories can be managed via the ``directory`` function. This function can
create and enforce the permissions on a directory. A directory statement will
look like this:
.. code-block:: yaml
/srv/stuff/substuf:
file.directory:
- user: fred
- group: users
- mode: 755
- makedirs: True
If you need to enforce user and/or group ownership or permissions recursively
on the directory's contents, you can do so by adding a ``recurse`` directive:
.. code-block:: yaml
/srv/stuff/substuf:
file.directory:
- user: fred
- group: users
- mode: 755
- makedirs: True
- recurse:
- user
- group
- mode
As a default, ``mode`` will resolve to ``dir_mode`` and ``file_mode``, to
specify both directory and file permissions, use this form:
.. code-block:: yaml
/srv/stuff/substuf:
file.directory:
- user: fred
- group: users
- file_mode: 744
- dir_mode: 755
- makedirs: True
- recurse:
- user
- group
- mode
Symlinks can be easily created; the symlink function is very simple and only
takes a few arguments:
.. code-block:: yaml
/etc/grub.conf:
file.symlink:
- target: /boot/grub/grub.conf
Recursive directory management can also be set via the ``recurse``
function. Recursive directory management allows for a directory on the salt
master to be recursively copied down to the minion. This is a great tool for
deploying large code and configuration systems. A state using ``recurse``
would look something like this:
.. code-block:: yaml
/opt/code/flask:
file.recurse:
- source: salt://code/flask
- include_empty: True
A more complex ``recurse`` example:
.. code-block:: yaml
{% set site_user = 'testuser' %}
{% set site_name = 'test_site' %}
{% set project_name = 'test_proj' %}
{% set sites_dir = 'test_dir' %}
django-project:
file.recurse:
- name: {{ sites_dir }}/{{ site_name }}/{{ project_name }}
- user: {{ site_user }}
- dir_mode: 2775
- file_mode: '0644'
- template: jinja
- source: salt://project/templates_dir
- include_empty: True
'''
# Import python libs
from __future__ import absolute_import
import difflib
import itertools
import logging
import os
import shutil
import traceback
from collections import Iterable, Mapping
# Import salt libs
import salt.loader
import salt.payload
import salt.utils
import salt.utils.templates
import salt.utils.url
from salt.utils.locales import sdecode
from salt.exceptions import CommandExecutionError
# Import 3rd-party libs
import salt.ext.six as six
from salt.ext.six.moves import zip_longest
log = logging.getLogger(__name__)
COMMENT_REGEX = r'^([[:space:]]*){0}[[:space:]]?'
__NOT_FOUND = object()
def _get_accumulator_filepath():
'''
Return accumulator data path.
'''
return os.path.join(salt.utils.get_accumulator_dir(__opts__['cachedir']),
__instance_id__)
def _load_accumulators():
def _deserialize(path):
serial = salt.payload.Serial(__opts__)
ret = {'accumulators': {}, 'accumulators_deps': {}}
try:
with salt.utils.fopen(path, 'rb') as f:
loaded = serial.load(f)
return loaded if loaded else ret
except (IOError, NameError):
# NameError is a msgpack error from salt-ssh
return ret
loaded = _deserialize(_get_accumulator_filepath())
return loaded['accumulators'], loaded['accumulators_deps']
def _persist_accummulators(accumulators, accumulators_deps):
accumm_data = {'accumulators': accumulators,
'accumulators_deps': accumulators_deps}
serial = salt.payload.Serial(__opts__)
try:
with salt.utils.fopen(_get_accumulator_filepath(), 'w+b') as f:
serial.dump(accumm_data, f)
except NameError:
# msgpack error from salt-ssh
pass
def _check_user(user, group):
'''
Checks if the named user and group are present on the minion
'''
err = ''
if user:
uid = __salt__['file.user_to_uid'](user)
if uid == '':
err += 'User {0} is not available '.format(user)
if group:
gid = __salt__['file.group_to_gid'](group)
if gid == '':
err += 'Group {0} is not available'.format(group)
return err
def _gen_keep_files(name, require, walk_d=None):
'''
Generate the list of files that need to be kept when a dir based function
like directory or recurse has a clean.
'''
def _is_child(path, directory):
'''
Check whether ``path`` is child of ``directory``
'''
path = os.path.abspath(path)
directory = os.path.abspath(directory)
relative = os.path.relpath(path, directory)
return not relative.startswith(os.pardir)
def _add_current_path(path):
_ret = set()
if os.path.isdir(path):
dirs, files = walk_d.get(path, ((), ()))
_ret.add(path)
for _name in files:
_ret.add(os.path.join(path, _name))
for _name in dirs:
_ret.add(os.path.join(path, _name))
return _ret
def _process_by_walk_d(name, ret):
if os.path.isdir(name):
walk_ret.update(_add_current_path(name))
dirs, _ = walk_d.get(name, ((), ()))
for _d in dirs:
p = os.path.join(name, _d)
walk_ret.update(_add_current_path(p))
_process_by_walk_d(p, ret)
def _process(name):
ret = set()
if os.path.isdir(name):
for root, dirs, files in os.walk(name):
ret.add(name)
for name in files:
ret.add(os.path.join(root, name))
for name in dirs:
ret.add(os.path.join(root, name))
return ret
keep = set()
if isinstance(require, list):
required_files = [comp for comp in require if 'file' in comp]
for comp in required_files:
for low in __lowstate__:
# A requirement should match either the ID and the name of
# another state.
if low['name'] == comp['file'] or low['__id__'] == comp['file']:
fn = low['name']
if os.path.isdir(fn):
if _is_child(fn, name):
if walk_d:
walk_ret = set()
_process_by_walk_d(fn, walk_ret)
keep.update(walk_ret)
else:
keep.update(_process(fn))
else:
keep.add(fn)
return list(keep)
def _check_file(name):
ret = True
msg = ''
if not os.path.isabs(name):
ret = False
msg = 'Specified file {0} is not an absolute path'.format(name)
elif not os.path.exists(name):
ret = False
msg = '{0}: file not found'.format(name)
return ret, msg
def _clean_dir(root, keep, exclude_pat):
'''
Clean out all of the files and directories in a directory (root) while
preserving the files in a list (keep) and part of exclude_pat
'''
removed = set()
real_keep = set()
real_keep.add(root)
if isinstance(keep, list):
for fn_ in keep:
if not os.path.isabs(fn_):
continue
real_keep.add(fn_)
while True:
fn_ = os.path.dirname(fn_)
real_keep.add(fn_)
if fn_ in ['/', ''.join([os.path.splitdrive(fn_)[0], '\\'])]:
break
def _delete_not_kept(nfn):
if nfn not in real_keep:
# -- check if this is a part of exclude_pat(only). No need to
# check include_pat
if not salt.utils.check_include_exclude(
os.path.relpath(nfn, root), None, exclude_pat):
return
removed.add(nfn)
if not __opts__['test']:
try:
os.remove(nfn)
except OSError:
__salt__['file.remove'](nfn)
for roots, dirs, files in os.walk(root):
for name in itertools.chain(dirs, files):
_delete_not_kept(os.path.join(roots, name))
return list(removed)
def _error(ret, err_msg):
ret['result'] = False
ret['comment'] = err_msg
return ret
def _check_directory(name,
user,
group,
recurse,
mode,
clean,
require,
exclude_pat):
'''
Check what changes need to be made on a directory
'''
changes = {}
if recurse or clean:
walk_l = list(os.walk(name)) # walk path only once and store the result
# root: (dirs, files) structure, compatible for python2.6
walk_d = {}
for i in walk_l:
walk_d[i[0]] = (i[1], i[2])
if recurse:
try:
recurse_set = _get_recurse_set(recurse)
except (TypeError, ValueError) as exc:
return False, '{0}'.format(exc), changes
if 'user' not in recurse_set:
user = None
if 'group' not in recurse_set:
group = None
if 'mode' not in recurse_set:
mode = None
check_files = 'ignore_files' not in recurse_set
check_dirs = 'ignore_dirs' not in recurse_set
for root, dirs, files in walk_l:
if check_files:
for fname in files:
fchange = {}
path = os.path.join(root, fname)
stats = __salt__['file.stats'](
path, None, follow_symlinks=False
)
if user is not None and user != stats.get('user'):
fchange['user'] = user
if group is not None and group != stats.get('group'):
fchange['group'] = group
if fchange:
changes[path] = fchange
if check_dirs:
for name_ in dirs:
path = os.path.join(root, name_)
fchange = _check_dir_meta(path, user, group, mode)
if fchange:
changes[path] = fchange
# Recurse skips root (we always do dirs, not root), so always check root:
fchange = _check_dir_meta(name, user, group, mode)
if fchange:
changes[name] = fchange
if clean:
keep = _gen_keep_files(name, require, walk_d)
def _check_changes(fname):
path = os.path.join(root, fname)
if path in keep:
return {}
else:
if not salt.utils.check_include_exclude(
os.path.relpath(path, name), None, exclude_pat):
return {}
else:
return {path: {'removed': 'Removed due to clean'}}
for root, dirs, files in walk_l:
for fname in files:
changes.update(_check_changes(fname))
for name_ in dirs:
changes.update(_check_changes(name_))
if not os.path.isdir(name):
changes[name] = {'directory': 'new'}
if changes:
comments = ['The following files will be changed:\n']
for fn_ in changes:
for key, val in six.iteritems(changes[fn_]):
comments.append('{0}: {1} - {2}\n'.format(fn_, key, val))
return None, ''.join(comments), changes
return True, 'The directory {0} is in the correct state'.format(name), changes
def _check_dir_meta(name,
user,
group,
mode):
'''
Check the changes in directory metadata
'''
stats = __salt__['file.stats'](name, follow_symlinks=False)
changes = {}
if not stats:
changes['directory'] = 'new'
return changes
if (user is not None
and user != stats['user']
and user != stats.get('uid')):
changes['user'] = user
if (group is not None
and group != stats['group']
and group != stats.get('gid')):
changes['group'] = group
# Normalize the dir mode
smode = __salt__['config.manage_mode'](stats['mode'])
mode = __salt__['config.manage_mode'](mode)
if mode is not None and mode != smode:
changes['mode'] = mode
return changes
def _check_touch(name, atime, mtime):
'''
Check to see if a file needs to be updated or created
'''
if not os.path.exists(name):
return None, 'File {0} is set to be created'.format(name)
stats = __salt__['file.stats'](name, follow_symlinks=False)
if atime is not None:
if str(atime) != str(stats['atime']):
return None, 'Times set to be updated on file {0}'.format(name)
if mtime is not None:
if str(mtime) != str(stats['mtime']):
return None, 'Times set to be updated on file {0}'.format(name)
return True, 'File {0} exists and has the correct times'.format(name)
def _get_symlink_ownership(path):
return (
__salt__['file.get_user'](path, follow_symlinks=False),
__salt__['file.get_group'](path, follow_symlinks=False)
)
def _check_symlink_ownership(path, user, group):
'''
Check if the symlink ownership matches the specified user and group
'''
cur_user, cur_group = _get_symlink_ownership(path)
return (cur_user == user) and (cur_group == group)
def _set_symlink_ownership(path, user, group):
'''
Set the ownership of a symlink and return a boolean indicating
success/failure
'''
try:
__salt__['file.lchown'](path, user, group)
except OSError:
pass
return _check_symlink_ownership(path, user, group)
def _symlink_check(name, target, force, user, group):
'''
Check the symlink function
'''
pchanges = {}
if not os.path.exists(name) and not __salt__['file.is_link'](name):
pchanges['new'] = name
return None, 'Symlink {0} to {1} is set for creation'.format(
name, target
), pchanges
if __salt__['file.is_link'](name):
if __salt__['file.readlink'](name) != target:
pchanges['change'] = name
return None, 'Link {0} target is set to be changed to {1}'.format(
name, target
), pchanges
else:
result = True
msg = 'The symlink {0} is present'.format(name)
if not _check_symlink_ownership(name, user, group):
result = None
pchanges['ownership'] = '{0}:{1}'.format(*_get_symlink_ownership(name))
msg += (
', but the ownership of the symlink would be changed '
'from {2}:{3} to {0}:{1}'
).format(user, group, *_get_symlink_ownership(name))
return result, msg, pchanges
else:
if force:
return None, ('The file or directory {0} is set for removal to '
'make way for a new symlink targeting {1}'
.format(name, target)), pchanges
return False, ('File or directory exists where the symlink {0} '
'should be. Did you mean to use force?'.format(name)), pchanges
def _test_owner(kwargs, user=None):
'''
Convert owner to user, since other config management tools use owner,
no need to punish people coming from other systems.
PLEASE DO NOT DOCUMENT THIS! WE USE USER, NOT OWNER!!!!
'''
if user:
return user
if 'owner' in kwargs:
log.warning(
'Use of argument owner found, "owner" is invalid, please '
'use "user"'
)
return kwargs['owner']
return user
def _unify_sources_and_hashes(source=None, source_hash=None,
sources=None, source_hashes=None):
'''
Silly little function to give us a standard tuple list for sources and
source_hashes
'''
if sources is None:
sources = []
if source_hashes is None:
source_hashes = []
if source and sources:
return (False,
"source and sources are mutually exclusive", [])
if source_hash and source_hashes:
return (False,
"source_hash and source_hashes are mutually exclusive", [])
if source:
return (True, '', [(source, source_hash)])
# Make a nice neat list of tuples exactly len(sources) long..
return True, '', list(zip_longest(sources, source_hashes[:len(sources)]))
def _get_template_texts(source_list=None,
template='jinja',
defaults=None,
context=None,
**kwargs):
'''
Iterate a list of sources and process them as templates.
Returns a list of 'chunks' containing the rendered templates.
'''
ret = {'name': '_get_template_texts',
'changes': {},
'result': True,
'comment': '',
'data': []}
if source_list is None:
return _error(ret,
'_get_template_texts called with empty source_list')
txtl = []
for (source, source_hash) in source_list:
tmpctx = defaults if defaults else {}
if context:
tmpctx.update(context)
rndrd_templ_fn = __salt__['cp.get_template'](
source,
'',
template=template,
saltenv=__env__,
context=tmpctx,
**kwargs
)
msg = 'cp.get_template returned {0} (Called with: {1})'
log.debug(msg.format(rndrd_templ_fn, source))
if rndrd_templ_fn:
tmplines = None
with salt.utils.fopen(rndrd_templ_fn, 'rb') as fp_:
tmplines = fp_.readlines()
if not tmplines:
msg = 'Failed to read rendered template file {0} ({1})'
log.debug(msg.format(rndrd_templ_fn, source))
ret['name'] = source
return _error(ret, msg.format(rndrd_templ_fn, source))
txtl.append(''.join(tmplines))
else:
msg = 'Failed to load template file {0}'.format(source)
log.debug(msg)
ret['name'] = source
return _error(ret, msg)
ret['data'] = txtl
return ret
def _validate_str_list(arg):
'''
ensure ``arg`` is a list of strings
'''
if isinstance(arg, six.string_types):
ret = [arg]
elif isinstance(arg, Iterable) and not isinstance(arg, Mapping):
ret = []
for item in arg:
if isinstance(item, six.string_types):
ret.append(item)
else:
ret.append(str(item))
else:
ret = [str(arg)]
return ret
def symlink(
name,
target,
force=False,
backupname=None,
makedirs=False,
user=None,
group=None,
mode=None,
**kwargs):
'''
Create a symbolic link (symlink, soft link)
If the file already exists and is a symlink pointing to any location other
than the specified target, the symlink will be replaced. If the symlink is
a regular file or directory then the state will return False. If the
regular file or directory is desired to be replaced with a symlink pass
force: True, if it is to be renamed, pass a backupname.
name
The location of the symlink to create
target
The location that the symlink points to
force
If the name of the symlink exists and is not a symlink and
force is set to False, the state will fail. If force is set to
True, the file or directory in the way of the symlink file
will be deleted to make room for the symlink, unless
backupname is set, when it will be renamed
backupname
If the name of the symlink exists and is not a symlink, it will be
renamed to the backupname. If the backupname already
exists and force is False, the state will fail. Otherwise, the
backupname will be removed first.
makedirs
If the location of the symlink does not already have a parent directory
then the state will fail, setting makedirs to True will allow Salt to
create the parent directory
user
The user to own the file, this defaults to the user salt is running as
on the minion
group
The group ownership set for the file, this defaults to the group salt
is running as on the minion. On Windows, this is ignored
mode
The permissions to set on this file, aka 644, 0775, 4664. Not supported
on Windows
'''
name = os.path.expanduser(name)
# Make sure that leading zeros stripped by YAML loader are added back
mode = __salt__['config.manage_mode'](mode)
user = _test_owner(kwargs, user=user)
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
if not name:
return _error(ret, 'Must provide name to file.symlink')
if user is None:
user = __opts__['user']
if salt.utils.is_windows():
# Make sure the user exists in Windows
# Salt default is 'root'
if not __salt__['user.info'](user):
# User not found, use the account salt is running under
# If username not found, use System
user = __salt__['user.current']()
if not user:
user = 'SYSTEM'
if group is not None:
log.warning(
'The group argument for {0} has been ignored as this '
'is a Windows system.'.format(name)
)
group = user
if group is None:
group = __salt__['file.gid_to_group'](
__salt__['user.info'](user).get('gid', 0)
)
preflight_errors = []
uid = __salt__['file.user_to_uid'](user)
gid = __salt__['file.group_to_gid'](group)
if uid == '':
preflight_errors.append('User {0} does not exist'.format(user))
if gid == '':
preflight_errors.append('Group {0} does not exist'.format(group))
if not os.path.isabs(name):
preflight_errors.append(
'Specified file {0} is not an absolute path'.format(name)
)
if preflight_errors:
msg = '. '.join(preflight_errors)
if len(preflight_errors) > 1:
msg += '.'
return _error(ret, msg)
presult, pcomment, ret['pchanges'] = _symlink_check(name,
target,
force,
user,
group)
if __opts__['test']:
ret['result'] = presult
ret['comment'] = pcomment
return ret
if not os.path.isdir(os.path.dirname(name)):
if makedirs:
__salt__['file.makedirs'](
name,
user=user,
group=group,
mode=mode)
else:
return _error(
ret,
'Directory {0} for symlink is not present'.format(
os.path.dirname(name)
)
)
if __salt__['file.is_link'](name):
# The link exists, verify that it matches the target
if __salt__['file.readlink'](name) != target:
# The target is wrong, delete the link
os.remove(name)
else:
if _check_symlink_ownership(name, user, group):
# The link looks good!
ret['comment'] = ('Symlink {0} is present and owned by '
'{1}:{2}'.format(name, user, group))
else:
if _set_symlink_ownership(name, user, group):
ret['comment'] = ('Set ownership of symlink {0} to '
'{1}:{2}'.format(name, user, group))
ret['changes']['ownership'] = '{0}:{1}'.format(user, group)
else:
ret['result'] = False
ret['comment'] += (
'Failed to set ownership of symlink {0} to '
'{1}:{2}'.format(name, user, group)
)
return ret
elif os.path.isfile(name) or os.path.isdir(name):
# It is not a link, but a file or dir
if backupname is not None:
# Make a backup first
if os.path.lexists(backupname):
if not force:
return _error(ret, ((
'File exists where the backup target {0} should go'
).format(backupname)))
else:
__salt__['file.remove'](backupname)
os.rename(name, backupname)
elif force:
# Remove whatever is in the way
if __salt__['file.is_link'](name):
__salt__['file.remove'](name)
ret['changes']['forced'] = 'Symlink was forcibly replaced'
else:
__salt__['file.remove'](name)
else:
# Otherwise throw an error
if os.path.isfile(name):
return _error(ret,
('File exists where the symlink {0} should be'
.format(name)))
else:
return _error(ret, ((
'Directory exists where the symlink {0} should be'
).format(name)))
if not os.path.exists(name):
# The link is not present, make it
try:
__salt__['file.symlink'](target, name)
except OSError as exc:
ret['result'] = False
ret['comment'] = ('Unable to create new symlink {0} -> '
'{1}: {2}'.format(name, target, exc))
return ret
else:
ret['comment'] = ('Created new symlink {0} -> '
'{1}'.format(name, target))
ret['changes']['new'] = name
if not _check_symlink_ownership(name, user, group):
if not _set_symlink_ownership(name, user, group):
ret['result'] = False
ret['comment'] += (', but was unable to set ownership to '
'{0}:{1}'.format(user, group))
return ret
def absent(name):
'''
Make sure that the named file or directory is absent. If it exists, it will
be deleted. This will work to reverse any of the functions in the file
state module. If a directory is supplied, it will be recursively deleted.
name
The path which should be deleted
'''
name = os.path.expanduser(name)
ret = {'name': name,
'changes': {},
'pchanges': {},
'result': True,
'comment': ''}
if not name:
return _error(ret, 'Must provide name to file.absent')
if not os.path.isabs(name):
return _error(
ret, 'Specified file {0} is not an absolute path'.format(name)
)
if name == '/':
return _error(ret, 'Refusing to make "/" absent')
if os.path.isfile(name) or os.path.islink(name):
ret['pchanges']['removed'] = name
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'File {0} is set for removal'.format(name)
return ret
try:
__salt__['file.remove'](name)
ret['comment'] = 'Removed file {0}'.format(name)
ret['changes']['removed'] = name
return ret
except CommandExecutionError as exc:
return _error(ret, '{0}'.format(exc))
elif os.path.isdir(name):
ret['pchanges']['removed'] = name
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Directory {0} is set for removal'.format(name)
return ret
try:
__salt__['file.remove'](name)
ret['comment'] = 'Removed directory {0}'.format(name)
ret['changes']['removed'] = name
return ret
except (OSError, IOError):
return _error(ret, 'Failed to remove directory {0}'.format(name))
ret['comment'] = 'File {0} is not present'.format(name)
return ret
def exists(name):
'''
Verify that the named file or directory is present or exists.
Ensures pre-requisites outside of Salt's purview
(e.g., keytabs, private keys, etc.) have been previously satisfied before
deployment.
name
Absolute path which must exist
'''
name = os.path.expanduser(name)
ret = {'name': name,
'changes': {},
'pchanges': {},
'result': True,
'comment': ''}
if not name:
return _error(ret, 'Must provide name to file.exists')
if not os.path.exists(name):
return _error(ret, 'Specified path {0} does not exist'.format(name))
ret['comment'] = 'Path {0} exists'.format(name)
return ret
def missing(name):
'''
Verify that the named file or directory is missing, this returns True only
if the named file is missing but does not remove the file if it is present.
name
Absolute path which must NOT exist
'''
name = os.path.expanduser(name)
ret = {'name': name,
'changes': {},
'pchanges': {},
'result': True,
'comment': ''}
if not name:
return _error(ret, 'Must provide name to file.missing')
if os.path.exists(name):
return _error(ret, 'Specified path {0} exists'.format(name))
ret['comment'] = 'Path {0} is missing'.format(name)
return ret
def managed(name,
source=None,
source_hash='',
user=None,
group=None,
mode=None,
template=None,
makedirs=False,
dir_mode=None,
context=None,
replace=True,
defaults=None,
backup='',
show_changes=True,
create=True,
contents=None,
contents_pillar=None,
contents_grains=None,
contents_newline=True,
contents_delimiter=':',
allow_empty=True,
follow_symlinks=True,
check_cmd=None,
skip_verify=False,
**kwargs):
'''
Manage a given file, this function allows for a file to be downloaded from
the salt master and potentially run through a templating system.
name
The location of the file to manage
source
The source file to download to the minion, this source file can be
hosted on either the salt master server, or on an HTTP or FTP server.
Both HTTPS and HTTP are supported as well as downloading directly
from Amazon S3 compatible URLs with both pre-configured and automatic
IAM credentials. (see s3.get state documentation)
File retrieval from Openstack Swift object storage is supported via
swift://container/object_path URLs, see swift.get documentation.
For files hosted on the salt file server, if the file is located on
the master in the directory named spam, and is called eggs, the source
string is salt://spam/eggs. If source is left blank or None
(use ~ in YAML), the file will be created as an empty file and
the content will not be managed. This is also the case when a file
already exists and the source is undefined; the contents of the file
will not be changed or managed.
If the file is hosted on a HTTP or FTP server then the source_hash
argument is also required
A list of sources can also be passed in to provide a default source and
a set of fallbacks. The first source in the list that is found to exist
will be used and subsequent entries in the list will be ignored. Source
list functionality only supports local files and remote files hosted on
the salt master server or retrievable via HTTP, HTTPS, or FTP.
.. code-block:: yaml
file_override_example:
file.managed:
- source:
- salt://file_that_does_not_exist
- salt://file_that_exists
source_hash
This can be one of the following:
1. a source hash string
2. the URI of a file that contains source hash strings
The function accepts the first encountered long unbroken alphanumeric
string of correct length as a valid hash, in order from most secure to
least secure:
.. code-block:: text
Type Length
====== ======
sha512 128
sha384 96
sha256 64
sha224 56
sha1 40
md5 32
**Using a Source Hash File**
The file can contain several checksums for several files. Each line
must contain both the file name and the hash. If no file name is
matched, the first hash encountered will be used, otherwise the most
secure hash with the correct source file name will be used.
When using a source hash file the source_hash argument needs to be a
url, the standard download urls are supported, ftp, http, salt etc:
Example:
.. code-block:: yaml
tomdroid-src-0.7.3.tar.gz:
file.managed:
- name: /tmp/tomdroid-src-0.7.3.tar.gz
- source: https://launchpad.net/tomdroid/beta/0.7.3/+download/tomdroid-src-0.7.3.tar.gz
- source_hash: https://launchpad.net/tomdroid/beta/0.7.3/+download/tomdroid-src-0.7.3.hash
The following is an example of the supported source_hash format:
.. code-block:: text
/etc/rc.conf ef6e82e4006dee563d98ada2a2a80a27
sha254c8525aee419eb649f0233be91c151178b30f0dff8ebbdcc8de71b1d5c8bcc06a /etc/resolv.conf
ead48423703509d37c4a90e6a0d53e143b6fc268
Debian file type ``*.dsc`` files are also supported.
**Inserting the Source Hash in the sls Data**
Examples:
.. code-block:: yaml
tomdroid-src-0.7.3.tar.gz:
file.managed:
- name: /tmp/tomdroid-src-0.7.3.tar.gz
- source: https://launchpad.net/tomdroid/beta/0.7.3/+download/tomdroid-src-0.7.3.tar.gz
- source_hash: md5=79eef25f9b0b2c642c62b7f737d4f53f
Known issues:
If the remote server URL has the hash file as an apparent
sub-directory of the source file, the module will discover that it
has already cached a directory where a file should be cached. For
example:
.. code-block:: yaml
tomdroid-src-0.7.3.tar.gz:
file.managed:
- name: /tmp/tomdroid-src-0.7.3.tar.gz
- source: https://launchpad.net/tomdroid/beta/0.7.3/+download/tomdroid-src-0.7.3.tar.gz
- source_hash: https://launchpad.net/tomdroid/beta/0.7.3/+download/tomdroid-src-0.7.3.tar.gz/+md5
user
The user to own the file, this defaults to the user salt is running as
on the minion
group
The group ownership set for the file, this defaults to the group salt
is running as on the minion On Windows, this is ignored
mode
The permissions to set on this file, aka 644, 0775, 4664. Not supported
on Windows
template
If this setting is applied then the named templating engine will be
used to render the downloaded file, currently jinja, mako, and wempy
are supported
makedirs : False
If set to ``True``, then the parent directories will be created to
facilitate the creation of the named file. If ``False``, and the parent
directory of the destination file doesn't exist, the state will fail.
dir_mode
If directories are to be created, passing this option specifies the
permissions for those directories. If this is not set, directories
will be assigned permissions from the 'mode' argument.
replace : True
If set to ``False`` and the file already exists, the file will not be
modified even if changes would otherwise be made. Permissions and
ownership will still be enforced, however.
context
Overrides default context variables passed to the template.
defaults
Default context passed to the template.
backup
Overrides the default backup mode for this specific file.
show_changes
Output a unified diff of the old file and the new file. If ``False``
return a boolean if any changes were made.
create : True
If set to ``False``, then the file will only be managed if the file
already exists on the system.
contents
Specify the contents of the file. Cannot be used in combination with
``source``. Ignores hashes and does not use a templating engine.
This value can be either a single string, a multiline YAML string or a
list of strings. If a list of strings, then the strings will be joined
together with newlines in the resulting file. For example, the below
two example states would result in identical file contents:
.. code-block:: yaml
/path/to/file1:
file.managed:
- contents:
- This is line 1
- This is line 2
/path/to/file2:
file.managed:
- contents: |
This is line 1
This is line 2
contents_pillar
.. versionadded:: 0.17.0
Operates like ``contents``, but draws from a value stored in pillar,
using the pillar path syntax used in :mod:`pillar.get
<salt.modules.pillar.get>`. This is useful when the pillar value
contains newlines, as referencing a pillar variable using a jinja/mako
template can result in YAML formatting issues due to the newlines
causing indentation mismatches.
For example, the following could be used to deploy an SSH private key:
.. code-block:: yaml
/home/deployer/.ssh/id_rsa:
file.managed:
- user: deployer
- group: deployer
- mode: 600
- contents_pillar: userdata:deployer:id_rsa
This would populate ``/home/deployer/.ssh/id_rsa`` with the contents of
``pillar['userdata']['deployer']['id_rsa']``. An example of this pillar
setup would be like so:
.. code-block:: yaml
userdata:
deployer:
id_rsa: |
-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEAoQiwO3JhBquPAalQF9qP1lLZNXVjYMIswrMe2HcWUVBgh+vY
U7sCwx/dH6+VvNwmCoqmNnP+8gTPKGl1vgAObJAnMT623dMXjVKwnEagZPRJIxDy
B/HaAre9euNiY3LvIzBTWRSeMfT+rWvIKVBpvwlgGrfgz70m0pqxu+UyFbAGLin+
GpxzZAMaFpZw4sSbIlRuissXZj/sHpQb8p9M5IeO4Z3rjkCP1cxI
-----END RSA PRIVATE KEY-----
.. note::
The private key above is shortened to keep the example brief, but
shows how to do multiline string in YAML. The key is followed by a
pipe character, and the mutliline string is indented two more
spaces.
To avoid the hassle of creating an indented multiline YAML string,
the :mod:`file_tree external pillar <salt.pillar.file_tree>` can
be used instead. However, this will not work for binary files in
Salt releases before 2015.8.4.
contents_grains
.. versionadded:: 2014.7.0
Same as ``contents_pillar``, but with grains
contents_newline : True
.. versionadded:: 2014.7.0
.. versionchanged:: 2015.8.4
This option is now ignored if the contents being deployed contain
binary data.
If ``True``, files managed using ``contents``, ``contents_pillar``, or
``contents_grains`` will have a newline added to the end of the file if
one is not present. Setting this option to ``False`` will omit this
final newline.
contents_delimiter
.. versionadded:: 2015.8.4
Can be used to specify an alternate delimiter for ``contents_pillar``
or ``contents_grains``. This delimiter will be passed through to
:py:func:`pillar.get <salt.modules.pillar.get>` or :py:func:`grains.get
<salt.modules.grains.get>` when retrieving the contents.
allow_empty : True
.. versionadded:: 2015.8.4
If set to ``False``, then the state will fail if the contents specified
by ``contents_pillar`` or ``contents_grains`` are empty.
follow_symlinks : True
.. versionadded:: 2014.7.0
If the desired path is a symlink follow it and make changes to the
file to which the symlink points.
check_cmd
.. versionadded:: 2014.7.0
The specified command will be run with an appended argument of a
*temporary* file containing the new managed contents. If the command
exits with a zero status the new managed contents will be written to
the managed destination. If the command exits with a nonzero exit
code, the state will fail and no changes will be made to the file.
For example, the following could be used to verify sudoers before making
changes:
.. code-block:: yaml
/etc/sudoers:
file.managed:
- user: root
- group: root
- mode: 0440
- source: salt://sudoers/files/sudoers.jinja
- template: jinja
- check_cmd: /usr/sbin/visudo -c -f
**NOTE**: This ``check_cmd`` functions differently than the requisite
``check_cmd``.
skip_verify : False
If ``True``, hash verification of remote file sources (``http://``,
``https://``, ``ftp://``) will be skipped, and the ``source_hash``
argument will be ignored.
.. versionadded:: 2016.3.0
'''
if 'env' in kwargs:
salt.utils.warn_until(
'Oxygen',
'Parameter \'env\' has been detected in the argument list. This '
'parameter is no longer used and has been replaced by \'saltenv\' '
'as of Salt Carbon. This warning will be removed in Salt Oxygen.'
)
kwargs.pop('env')
name = os.path.expanduser(name)
ret = {'changes': {},
'pchanges': {},
'comment': '',
'name': name,
'result': True}
content_sources = (contents, contents_pillar, contents_grains)
contents_count = len(
[x for x in content_sources if x is not None]
)
if source and contents_count > 0:
return _error(
ret,
'\'source\' cannot be used in combination with \'contents\', '
'\'contents_pillar\', or \'contents_grains\''
)
elif contents_count > 1:
return _error(
ret,
'Only one of \'contents\', \'contents_pillar\', and '
'\'contents_grains\' is permitted'
)
# If no source is specified, set replace to False, as there is nothing
# with which to replace the file.
if not source and contents_count == 0 and replace:
replace = False
log.warning(
'Neither \'source\' nor \'contents\' nor \'contents_pillar\' nor '
'\'contents_grains\' was defined, yet \'replace\' was set to '
'\'True\'. As there is no source to replace the file with, '
'\'replace\' has been set to \'False\' to avoid reading the file '
'unnecessarily.'
)
# Use this below to avoid multiple '\0' checks and save some CPU cycles
if contents_pillar is not None:
use_contents = __salt__['pillar.get'](contents_pillar, __NOT_FOUND)
if use_contents is __NOT_FOUND:
return _error(
ret,
'Pillar {0} does not exist'.format(contents_pillar)
)
elif contents_grains is not None:
use_contents = __salt__['grains.get'](contents_grains, __NOT_FOUND)
if use_contents is __NOT_FOUND:
return _error(
ret,
'Grain {0} does not exist'.format(contents_grains)
)
elif contents is not None:
use_contents = contents
else:
use_contents = None
if use_contents is not None:
if not allow_empty and not use_contents:
if contents_pillar:
contents_id = 'contents_pillar {0}'.format(contents_pillar)
elif contents_grains:
contents_id = 'contents_grains {0}'.format(contents_grains)
else:
contents_id = '\'contents\''
return _error(
ret,
'{0} value would result in empty contents. Set allow_empty '
'to True to allow the managed file to be empty.'
.format(contents_id)
)
contents_are_binary = \
isinstance(use_contents, six.string_types) and '\0' in use_contents
if contents_are_binary:
contents = use_contents
else:
validated_contents = _validate_str_list(use_contents)
if not validated_contents:
return _error(
ret,
'Contents specified by contents/contents_pillar/'
'contents_grains is not a string or list of strings, and '
'is not binary data. SLS is likely malformed.'
)
contents = os.linesep.join(validated_contents)
if contents_newline and not contents.endswith(os.linesep):
contents += os.linesep
# Make sure that leading zeros stripped by YAML loader are added back
mode = __salt__['config.manage_mode'](mode)
if not name:
return _error(ret, 'Must provide name to file.exists')
user = _test_owner(kwargs, user=user)
if salt.utils.is_windows():
if group is not None:
log.warning(
'The group argument for {0} has been ignored as this '
'is a Windows system.'.format(name)
)
group = user
if not create:
if not os.path.isfile(name):
# Don't create a file that is not already present
ret['comment'] = ('File {0} is not present and is not set for '
'creation').format(name)
return ret
u_check = _check_user(user, group)
if u_check:
# The specified user or group do not exist
return _error(ret, u_check)
if not os.path.isabs(name):
return _error(
ret, 'Specified file {0} is not an absolute path'.format(name))
if os.path.isdir(name):
ret['comment'] = 'Specified target {0} is a directory'.format(name)
ret['result'] = False
return ret
if context is None:
context = {}
elif not isinstance(context, dict):
return _error(
ret, 'Context must be formed as a dict')
if defaults and not isinstance(defaults, dict):
return _error(
ret, 'Defaults must be formed as a dict')
if not replace and os.path.exists(name):
# Check and set the permissions if necessary
ret, _ = __salt__['file.check_perms'](name, ret, user, group, mode,
follow_symlinks)
if __opts__['test']:
ret['comment'] = 'File {0} not updated'.format(name)
elif not ret['changes'] and ret['result']:
ret['comment'] = ('File {0} exists with proper permissions. '
'No changes made.'.format(name))
return ret
accum_data, _ = _load_accumulators()
if name in accum_data:
if not context:
context = {}
context['accumulator'] = accum_data[name]
if 'file.check_managed_changes' in __salt__:
ret['pchanges'] = __salt__['file.check_managed_changes'](
name,
source,
source_hash,
user,
group,
mode,
template,
context,
defaults,
__env__,
contents,
skip_verify,
**kwargs
)
else:
ret['pchanges'] = {}
try:
if __opts__['test']:
if ret['pchanges']:
ret['result'] = None
ret['comment'] = 'The file {0} is set to be changed'.format(name)
if show_changes and 'diff' in ret['pchanges']:
ret['changes']['diff'] = ret['pchanges']['diff']
if not show_changes:
ret['changes']['diff'] = '<show_changes=False>'
else:
ret['result'] = True
ret['comment'] = 'The file {0} is in the correct state'.format(name)
return ret
# If the source is a list then find which file exists
source, source_hash = __salt__['file.source_list'](
source,
source_hash,
__env__
)
except CommandExecutionError as exc:
ret['result'] = False
ret['comment'] = 'Unable to manage file: {0}'.format(exc)
return ret
# Gather the source file from the server
try:
sfn, source_sum, comment_ = __salt__['file.get_managed'](
name,
template,
source,
source_hash,
user,
group,
mode,
__env__,
context,
defaults,
skip_verify,
**kwargs
)
except Exception as exc:
ret['changes'] = {}
log.debug(traceback.format_exc())
return _error(ret, 'Unable to manage file: {0}'.format(exc))
tmp_filename = None
if check_cmd:
tmp_filename = salt.utils.mkstemp()
# if exists copy existing file to tmp to compare
if __salt__['file.file_exists'](name):
try:
__salt__['file.copy'](name, tmp_filename)
except Exception as exc:
return _error(
ret,
'Unable to copy file {0} to {1}: {2}'.format(
name, tmp_filename, exc
)
)
try:
ret = __salt__['file.manage_file'](
tmp_filename,
sfn,
ret,
source,
source_sum,
user,
group,
mode,
__env__,
backup,
makedirs,
template,
show_changes,
contents,
dir_mode,
follow_symlinks,
skip_verify)
except Exception as exc:
ret['changes'] = {}
log.debug(traceback.format_exc())
if os.path.isfile(tmp_filename):
os.remove(tmp_filename)
return _error(ret, 'Unable to check_cmd file: {0}'.format(exc))
# file being updated to verify using check_cmd
if ret['changes']:
# Reset ret
ret = {'changes': {},
'comment': '',
'name': name,
'result': True}
check_cmd_opts = {}
if 'shell' in __grains__:
check_cmd_opts['shell'] = __grains__['shell']
cret = mod_run_check_cmd(check_cmd, tmp_filename, **check_cmd_opts)
if isinstance(cret, dict):
ret.update(cret)
if os.path.isfile(tmp_filename):
os.remove(tmp_filename)
return ret
# Since we generated a new tempfile and we are not returning here
# lets change the original sfn to the new tempfile or else we will
# get file not found
sfn = tmp_filename
else:
ret = {'changes': {},
'comment': '',
'name': name,
'result': True}
if comment_ and contents is None:
return _error(ret, comment_)
else:
try:
return __salt__['file.manage_file'](
name,
sfn,
ret,
source,
source_sum,
user,
group,
mode,
__env__,
backup,
makedirs,
template,
show_changes,
contents,
dir_mode,
follow_symlinks,
skip_verify)
except Exception as exc:
ret['changes'] = {}
log.debug(traceback.format_exc())
return _error(ret, 'Unable to manage file: {0}'.format(exc))
finally:
if tmp_filename and os.path.isfile(tmp_filename):
os.remove(tmp_filename)
_RECURSE_TYPES = ['user', 'group', 'mode', 'ignore_files', 'ignore_dirs']
def _get_recurse_set(recurse):
'''
Converse *recurse* definition to a set of strings.
Raises TypeError or ValueError when *recurse* has wrong structure.
'''
if not recurse:
return set()
if not isinstance(recurse, list):
raise TypeError('"recurse" must be formed as a list of strings')
try:
recurse_set = set(recurse)
except TypeError: # non-hashable elements
recurse_set = None
if recurse_set is None or not set(_RECURSE_TYPES) >= recurse_set:
raise ValueError('Types for "recurse" limited to {0}.'.format(
', '.join('"{0}"'.format(rtype) for rtype in _RECURSE_TYPES)))
if 'ignore_files' in recurse_set and 'ignore_dirs' in recurse_set:
raise ValueError('Must not specify "recurse" options "ignore_files"'
' and "ignore_dirs" at the same time.')
return recurse_set
def directory(name,
user=None,
group=None,
recurse=None,
dir_mode=None,
file_mode=None,
makedirs=False,
clean=False,
require=None,
exclude_pat=None,
follow_symlinks=False,
force=False,
backupname=None,
allow_symlink=True,
**kwargs):
'''
Ensure that a named directory is present and has the right perms
name
The location to create or manage a directory
user
The user to own the directory; this defaults to the user salt is
running as on the minion
group
The group ownership set for the directory; this defaults to the group
salt is running as on the minion. On Windows, this is ignored
recurse
Enforce user/group ownership and mode of directory recursively. Accepts
a list of strings representing what you would like to recurse. If
``mode`` is defined, will recurse on both ``file_mode`` and ``dir_mode`` if
they are defined. If ``ignore_files`` or ``ignore_dirs`` is included, files or
directories will be left unchanged respectively.
Example:
.. code-block:: yaml
/var/log/httpd:
file.directory:
- user: root
- group: root
- dir_mode: 755
- file_mode: 644
- recurse:
- user
- group
- mode
Leave files or directories unchanged:
.. code-block:: yaml
/var/log/httpd:
file.directory:
- user: root
- group: root
- dir_mode: 755
- file_mode: 644
- recurse:
- user
- group
- mode
- ignore_dirs
.. versionadded:: 2015.5.0
dir_mode / mode
The permissions mode to set any directories created. Not supported on
Windows
file_mode
The permissions mode to set any files created if 'mode' is run in
'recurse'. This defaults to dir_mode. Not supported on Windows
makedirs
If the directory is located in a path without a parent directory, then
the state will fail. If makedirs is set to True, then the parent
directories will be created to facilitate the creation of the named
file.
clean
Make sure that only files that are set up by salt and required by this
function are kept. If this option is set then everything in this
directory will be deleted unless it is required.
require
Require other resources such as packages or files
exclude_pat
When 'clean' is set to True, exclude this pattern from removal list
and preserve in the destination.
follow_symlinks : False
If the desired path is a symlink (or ``recurse`` is defined and a
symlink is encountered while recursing), follow it and check the
permissions of the directory/file to which the symlink points.
.. versionadded:: 2014.1.4
force
If the name of the directory exists and is not a directory and
force is set to False, the state will fail. If force is set to
True, the file in the way of the directory will be deleted to
make room for the directory, unless backupname is set,
then it will be renamed.
.. versionadded:: 2014.7.0
backupname
If the name of the directory exists and is not a directory, it will be
renamed to the backupname. If the backupname already
exists and force is False, the state will fail. Otherwise, the
backupname will be removed first.
.. versionadded:: 2014.7.0
allow_symlink : True
If allow_symlink is True and the specified path is a symlink, it will be
allowed to remain if it points to a directory. If allow_symlink is False
then the state will fail, unless force is also set to True, in which case
it will be removed or renamed, depending on the value of the backupname
argument.
.. versionadded:: 2014.7.0
'''
name = os.path.expanduser(name)
ret = {'name': name,
'changes': {},
'pchanges': {},
'result': True,
'comment': ''}
if not name:
return _error(ret, 'Must provide name to file.directory')
# Remove trailing slash, if present and we're not working on "/" itself
if name[-1] == '/' and name != '/':
name = name[:-1]
user = _test_owner(kwargs, user=user)
if salt.utils.is_windows():
if group is not None:
log.warning(
'The group argument for {0} has been ignored as this is '
'a Windows system.'.format(name)
)
group = user
if 'mode' in kwargs and not dir_mode:
dir_mode = kwargs.get('mode', [])
if not file_mode:
file_mode = dir_mode
# Make sure that leading zeros stripped by YAML loader are added back
dir_mode = __salt__['config.manage_mode'](dir_mode)
file_mode = __salt__['config.manage_mode'](file_mode)
u_check = _check_user(user, group)
if u_check:
# The specified user or group do not exist
return _error(ret, u_check)
if not os.path.isabs(name):
return _error(
ret, 'Specified file {0} is not an absolute path'.format(name))
if os.path.isfile(name) or (not allow_symlink and os.path.islink(name)):
if backupname is not None:
# Make a backup first
if os.path.lexists(backupname):
if not force:
return _error(ret, ((
'File exists where the backup target {0} should go'
).format(backupname)))
else:
__salt__['file.remove'](backupname)
os.rename(name, backupname)
elif force:
# Remove whatever is in the way
if os.path.isfile(name):
os.remove(name)
ret['changes']['forced'] = 'File was forcibly replaced'
elif __salt__['file.is_link'](name):
__salt__['file.remove'](name)
ret['changes']['forced'] = 'Symlink was forcibly replaced'
else:
__salt__['file.remove'](name)
else:
if os.path.isfile(name):
return _error(
ret,
'Specified location {0} exists and is a file'.format(name))
elif os.path.islink(name):
return _error(
ret,
'Specified location {0} exists and is a symlink'.format(name))
presult, pcomment, ret['pchanges'] = _check_directory(
name,
user,
group,
recurse or [],
dir_mode,
clean,
require,
exclude_pat)
if __opts__['test']:
ret['result'] = presult
ret['comment'] = pcomment
return ret
if not os.path.isdir(name):
# The dir does not exist, make it
if not os.path.isdir(os.path.dirname(name)):
# The parent directory does not exist, create them
if makedirs:
# Make sure the drive is mapped before trying to create the
# path in windows
if salt.utils.is_windows():
drive, path = os.path.splitdrive(name)
if not os.path.isdir(drive):
return _error(
ret, 'Drive {0} is not mapped'.format(drive))
# Everything's good, create the path
__salt__['file.makedirs'](
name, user=user, group=group, mode=dir_mode
)
else:
return _error(
ret, 'No directory to create {0} in'.format(name))
__salt__['file.mkdir'](
name, user=user, group=group, mode=dir_mode
)
ret['changes'][name] = 'New Dir'
if not os.path.isdir(name):
return _error(ret, 'Failed to create directory {0}'.format(name))
# Check permissions
ret, perms = __salt__['file.check_perms'](name,
ret,
user,
group,
dir_mode,
follow_symlinks)
if recurse or clean:
walk_l = list(os.walk(name)) # walk path only once and store the result
# root: (dirs, files) structure, compatible for python2.6
walk_d = {}
for i in walk_l:
walk_d[i[0]] = (i[1], i[2])
recurse_set = None
if recurse:
try:
recurse_set = _get_recurse_set(recurse)
except (TypeError, ValueError) as exc:
ret['result'] = False
ret['comment'] = '{0}'.format(exc)
# NOTE: Should this be enough to stop the whole check altogether?
if recurse_set:
if 'user' in recurse_set:
if user:
uid = __salt__['file.user_to_uid'](user)
# file.user_to_uid returns '' if user does not exist. Above
# check for user is not fatal, so we need to be sure user
# exists.
if isinstance(uid, six.string_types):
ret['result'] = False
ret['comment'] = 'Failed to enforce ownership for ' \
'user {0} (user does not ' \
'exist)'.format(user)
else:
ret['result'] = False
ret['comment'] = 'user not specified, but configured as ' \
'a target for recursive ownership ' \
'management'
else:
user = None
if 'group' in recurse_set:
if group:
gid = __salt__['file.group_to_gid'](group)
# As above with user, we need to make sure group exists.
if isinstance(gid, six.string_types):
ret['result'] = False
ret['comment'] = 'Failed to enforce group ownership ' \
'for group {0}'.format(group)
else:
ret['result'] = False
ret['comment'] = 'group not specified, but configured ' \
'as a target for recursive ownership ' \
'management'
else:
group = None
if 'mode' not in recurse_set:
file_mode = None
dir_mode = None
check_files = 'ignore_files' not in recurse_set
check_dirs = 'ignore_dirs' not in recurse_set
for root, dirs, files in walk_l:
if check_files:
for fn_ in files:
full = os.path.join(root, fn_)
ret, perms = __salt__['file.check_perms'](
full,
ret,
user,
group,
file_mode,
follow_symlinks)
if check_dirs:
for dir_ in dirs:
full = os.path.join(root, dir_)
ret, perms = __salt__['file.check_perms'](
full,
ret,
user,
group,
dir_mode,
follow_symlinks)
if clean:
keep = _gen_keep_files(name, require, walk_d)
log.debug('List of kept files when use file.directory with clean: %s',
keep)
removed = _clean_dir(name, list(keep), exclude_pat)
if removed:
ret['changes']['removed'] = removed
ret['comment'] = 'Files cleaned from directory {0}'.format(name)
if not ret['comment']:
ret['comment'] = 'Directory {0} updated'.format(name)
if __opts__['test']:
ret['comment'] = 'Directory {0} not updated'.format(name)
elif not ret['changes'] and ret['result']:
ret['comment'] = 'Directory {0} is in the correct state'.format(name)
return ret
def recurse(name,
source,
clean=False,
require=None,
user=None,
group=None,
dir_mode=None,
file_mode=None,
sym_mode=None,
template=None,
context=None,
defaults=None,
include_empty=False,
backup='',
include_pat=None,
exclude_pat=None,
maxdepth=None,
keep_symlinks=False,
force_symlinks=False,
**kwargs):
'''
Recurse through a subdirectory on the master and copy said subdirectory
over to the specified path.
name
The directory to set the recursion in
source
The source directory, this directory is located on the salt master file
server and is specified with the salt:// protocol. If the directory is
located on the master in the directory named spam, and is called eggs,
the source string is salt://spam/eggs
clean
Make sure that only files that are set up by salt and required by this
function are kept. If this option is set then everything in this
directory will be deleted unless it is required.
require
Require other resources such as packages or files
user
The user to own the directory. This defaults to the user salt is
running as on the minion
group
The group ownership set for the directory. This defaults to the group
salt is running as on the minion. On Windows, this is ignored
dir_mode
The permissions mode to set on any directories created. Not supported on
Windows
file_mode
The permissions mode to set on any files created. Not supported on
Windows
sym_mode
The permissions mode to set on any symlink created. Not supported on
Windows
template
If this setting is applied then the named templating engine will be
used to render the downloaded file. Supported templates are:
`jinja`, `mako` and `wempy`.
.. note::
The template option is required when recursively applying templates.
context
Overrides default context variables passed to the template.
defaults
Default context passed to the template.
include_empty
Set this to True if empty directories should also be created
(default is False)
include_pat
When copying, include only this pattern from the source. Default
is glob match; if prefixed with 'E@', then regexp match.
Example:
.. code-block:: yaml
- include_pat: hello* :: glob matches 'hello01', 'hello02'
... but not 'otherhello'
- include_pat: E@hello :: regexp matches 'otherhello',
'hello01' ...
exclude_pat
Exclude this pattern from the source when copying. If both
`include_pat` and `exclude_pat` are supplied, then it will apply
conditions cumulatively. i.e. first select based on include_pat, and
then within that result apply exclude_pat.
Also, when 'clean=True', exclude this pattern from the removal
list and preserve in the destination.
Example:
.. code-block:: yaml
- exclude_pat: APPDATA* :: glob matches APPDATA.01,
APPDATA.02,.. for exclusion
- exclude_pat: E@(APPDATA)|(TEMPDATA) :: regexp matches APPDATA
or TEMPDATA for exclusion
maxdepth
When copying, only copy paths which are of depth `maxdepth` from the
source path.
Example:
.. code-block:: yaml
- maxdepth: 0 :: Only include files located in the source
directory
- maxdepth: 1 :: Only include files located in the source
or immediate subdirectories
keep_symlinks
Keep symlinks when copying from the source. This option will cause
the copy operation to terminate at the symlink. If desire behavior
similar to rsync, then set this to True.
force_symlinks
Force symlink creation. This option will force the symlink creation.
If a file or directory is obstructing symlink creation it will be
recursively removed so that symlink creation can proceed. This
option is usually not needed except in special circumstances.
'''
if 'env' in kwargs:
salt.utils.warn_until(
'Oxygen',
'Parameter \'env\' has been detected in the argument list. This '
'parameter is no longer used and has been replaced by \'saltenv\' '
'as of Salt Carbon. This warning will be removed in Salt Oxygen.'
)
kwargs.pop('env')
name = os.path.expanduser(sdecode(name))
user = _test_owner(kwargs, user=user)
if salt.utils.is_windows():
if group is not None:
log.warning(
'The group argument for {0} has been ignored as this '
'is a Windows system.'.format(name)
)
group = user
ret = {
'name': name,
'changes': {},
'pchanges': {},
'result': True,
'comment': {} # { path: [comment, ...] }
}
if 'mode' in kwargs:
ret['result'] = False
ret['comment'] = (
'\'mode\' is not allowed in \'file.recurse\'. Please use '
'\'file_mode\' and \'dir_mode\'.'
)
return ret
# Make sure that leading zeros stripped by YAML loader are added back
dir_mode = __salt__['config.manage_mode'](dir_mode)
file_mode = __salt__['config.manage_mode'](file_mode)
u_check = _check_user(user, group)
if u_check:
# The specified user or group do not exist
return _error(ret, u_check)
if not os.path.isabs(name):
return _error(
ret, 'Specified file {0} is not an absolute path'.format(name))
# expand source into source_list
source_list = _validate_str_list(source)
for idx, val in enumerate(source_list):
source_list[idx] = val.rstrip('/')
for precheck in source_list:
if not precheck.startswith('salt://'):
return _error(ret, ('Invalid source {0!r} '
'(must be a salt:// URI)'.format(precheck)))
# Select the first source in source_list that exists
try:
source, source_hash = __salt__['file.source_list'](source_list, '', __env__)
except CommandExecutionError as exc:
ret['result'] = False
ret['comment'] = 'Recurse failed: {0}'.format(exc)
return ret
# Check source path relative to fileserver root, make sure it is a
# directory
source_rel = source.partition('://')[2]
master_dirs = __salt__['cp.list_master_dirs'](__env__)
if source_rel not in master_dirs \
and not any((x for x in master_dirs
if x.startswith(source_rel + '/'))):
ret['result'] = False
ret['comment'] = (
'The directory {0!r} does not exist on the salt fileserver '
'in saltenv {1!r}'.format(source, __env__)
)
return ret
# Verify the target directory
if not os.path.isdir(name):
if os.path.exists(name):
# it is not a dir, but it exists - fail out
return _error(
ret, 'The path {0} exists and is not a directory'.format(name))
if not __opts__['test']:
__salt__['file.makedirs_perms'](
name, user, group, int(str(dir_mode), 8) if dir_mode else None)
def add_comment(path, comment):
comments = ret['comment'].setdefault(path, [])
if isinstance(comment, six.string_types):
comments.append(comment)
else:
comments.extend(comment)
def merge_ret(path, _ret):
# Use the most "negative" result code (out of True, None, False)
if _ret['result'] is False or ret['result'] is True:
ret['result'] = _ret['result']
# Only include comments about files that changed
if _ret['result'] is not True and _ret['comment']:
add_comment(path, _ret['comment'])
if _ret['changes']:
ret['changes'][path] = _ret['changes']
def manage_file(path, source):
source = salt.utils.url.escape(source)
if clean and os.path.exists(path) and os.path.isdir(path):
_ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''}
if __opts__['test']:
_ret['comment'] = 'Replacing directory {0} with a ' \
'file'.format(path)
_ret['result'] = None
merge_ret(path, _ret)
return
else:
__salt__['file.remove'](path)
_ret['changes'] = {'diff': 'Replaced directory with a '
'new file'}
merge_ret(path, _ret)
# Conflicts can occur if some kwargs are passed in here
pass_kwargs = {}
faults = ['mode', 'makedirs']
for key in kwargs:
if key not in faults:
pass_kwargs[key] = kwargs[key]
_ret = managed(
path,
source=source,
user=user,
group=group,
mode=file_mode,
template=template,
makedirs=True,
context=context,
defaults=defaults,
backup=backup,
**pass_kwargs)
merge_ret(path, _ret)
def manage_directory(path):
if os.path.basename(path) == '..':
return
if clean and os.path.exists(path) and not os.path.isdir(path):
_ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''}
if __opts__['test']:
_ret['comment'] = 'Replacing {0} with a directory'.format(path)
_ret['result'] = None
merge_ret(path, _ret)
return
else:
os.remove(path)
_ret['changes'] = {'diff': 'Replaced file with a directory'}
merge_ret(path, _ret)
_ret = directory(
path,
user=user,
group=group,
recurse=[],
dir_mode=dir_mode,
file_mode=None,
makedirs=True,
clean=False,
require=None)
merge_ret(path, _ret)
# Process symlinks and return the updated filenames list
def process_symlinks(filenames, symlinks):
for lname, ltarget in six.iteritems(symlinks):
if not salt.utils.check_include_exclude(
os.path.relpath(lname, srcpath), include_pat, exclude_pat):
continue
srelpath = os.path.relpath(lname, srcpath)
# Check for max depth
if maxdepth is not None:
srelpieces = srelpath.split('/')
if not srelpieces[-1]:
srelpieces = srelpieces[:-1]
if len(srelpieces) > maxdepth + 1:
continue
# Check for all paths that begin with the symlink
# and axe it leaving only the dirs/files below it.
# This needs to use list() otherwise they reference
# the same list.
_filenames = list(filenames)
for filename in _filenames:
if filename.startswith(lname):
log.debug('** skipping file ** {0}, it intersects a '
'symlink'.format(filename))
filenames.remove(filename)
# Create the symlink along with the necessary dirs.
# The dir perms/ownership will be adjusted later
# if needed
_ret = symlink(os.path.join(name, srelpath),
ltarget,
makedirs=True,
force=force_symlinks,
user=user,
group=group,
mode=sym_mode)
if not _ret:
continue
merge_ret(os.path.join(name, srelpath), _ret)
# Add the path to the keep set in case clean is set to True
keep.add(os.path.join(name, srelpath))
vdir.update(keep)
return filenames
keep = set()
vdir = set()
srcpath = salt.utils.url.parse(source)[0]
if not srcpath.endswith('/'):
# we're searching for things that start with this *directory*.
# use '/' since #master only runs on POSIX
srcpath = srcpath + '/'
fns_ = __salt__['cp.list_master'](__env__, srcpath)
# If we are instructed to keep symlinks, then process them.
if keep_symlinks:
# Make this global so that emptydirs can use it if needed.
symlinks = __salt__['cp.list_master_symlinks'](__env__, srcpath)
fns_ = process_symlinks(fns_, symlinks)
for fn_ in fns_:
if not fn_.strip():
continue
# fn_ here is the absolute (from file_roots) source path of
# the file to copy from; it is either a normal file or an
# empty dir(if include_empty==true).
relname = sdecode(os.path.relpath(fn_, srcpath))
if relname.startswith('..'):
continue
# Check for maxdepth of the relative path
if maxdepth is not None:
# Since paths are all master, just use POSIX separator
relpieces = relname.split('/')
# Handle empty directories (include_empty==true) by removing the
# the last piece if it is an empty string
if not relpieces[-1]:
relpieces = relpieces[:-1]
if len(relpieces) > maxdepth + 1:
continue
# Check if it is to be excluded. Match only part of the path
# relative to the target directory
if not salt.utils.check_include_exclude(
relname, include_pat, exclude_pat):
continue
dest = os.path.join(name, relname)
dirname = os.path.dirname(dest)
keep.add(dest)
if dirname not in vdir:
# verify the directory perms if they are set
manage_directory(dirname)
vdir.add(dirname)
src = salt.utils.url.create(fn_)
manage_file(dest, src)
if include_empty:
mdirs = __salt__['cp.list_master_dirs'](__env__, srcpath)
for mdir in mdirs:
if not salt.utils.check_include_exclude(
os.path.relpath(mdir, srcpath), include_pat, exclude_pat):
continue
mdest = os.path.join(name, os.path.relpath(mdir, srcpath))
# Check for symlinks that happen to point to an empty dir.
if keep_symlinks:
islink = False
for link in symlinks:
if mdir.startswith(link, 0):
log.debug('** skipping empty dir ** {0}, it intersects'
' a symlink'.format(mdir))
islink = True
break
if islink:
continue
manage_directory(mdest)
keep.add(mdest)
keep = list(keep)
if clean:
# TODO: Use directory(clean=True) instead
keep += _gen_keep_files(name, require)
removed = _clean_dir(name, list(keep), exclude_pat)
if removed:
if __opts__['test']:
if ret['result']:
ret['result'] = None
add_comment('removed', removed)
else:
ret['changes']['removed'] = removed
# Flatten comments until salt command line client learns
# to display structured comments in a readable fashion
ret['comment'] = '\n'.join(u'\n#### {0} ####\n{1}'.format(
k, v if isinstance(v, six.string_types) else '\n'.join(v)
) for (k, v) in six.iteritems(ret['comment'])).strip()
if not ret['comment']:
ret['comment'] = 'Recursively updated {0}'.format(name)
if not ret['changes'] and ret['result']:
ret['comment'] = 'The directory {0} is in the correct state'.format(
name
)
return ret
def line(name, content, match=None, mode=None, location=None,
before=None, after=None, show_changes=True, backup=False,
quiet=False, indent=True, create=False, user=None,
group=None, file_mode=None):
'''
Line-based editing of a file.
.. versionadded:: 2015.8.0
:param name:
Filesystem path to the file to be edited.
:param content:
Content of the line.
:param match:
Match the target line for an action by
a fragment of a string or regular expression.
:param mode:
:Ensure:
If line does not exist, it will be added.
:Replace:
If line already exist, it will be replaced.
:Delete:
Delete the line, once found.
:Insert:
Insert a line.
:param location:
:start:
Place the content at the beginning of the file.
:end:
Place the content at the end of the file.
:param before:
Regular expression or an exact case-sensitive fragment of the string.
:param after:
Regular expression or an exact case-sensitive fragment of the string.
:param show_changes:
Output a unified diff of the old file and the new file.
If ``False`` return a boolean if any changes were made.
Default is ``True``
.. note::
Using this option will store two copies of the file in-memory
(the original version and the edited version) in order to generate the diff.
:param backup:
Create a backup of the original file with the extension:
"Year-Month-Day-Hour-Minutes-Seconds".
:param quiet:
Do not raise any exceptions. E.g. ignore the fact that the file that is
tried to be edited does not exist and nothing really happened.
:param indent:
Keep indentation with the previous line.
:param create:
Create an empty file if doesn't exists.
.. versionadded:: Carbon
:param user:
The user to own the file, this defaults to the user salt is running as
on the minion.
.. versionadded:: Carbon
:param group:
The group ownership set for the file, this defaults to the group salt
is running as on the minion On Windows, this is ignored.
.. versionadded:: Carbon
:param file_mode:
The permissions to set on this file, aka 644, 0775, 4664. Not supported
on Windows.
.. versionadded:: Carbon
If an equal sign (``=``) appears in an argument to a Salt command, it is
interpreted as a keyword argument in the format of ``key=val``. That
processing can be bypassed in order to pass an equal sign through to the
remote shell command by manually specifying the kwarg:
.. code-block: yaml
update_config:
file.line:
- name: /etc/myconfig.conf
- mode: ensure
- content: my key = my value
- before: somekey.*?
'''
name = os.path.expanduser(name)
ret = {'name': name,
'changes': {},
'pchanges': {},
'result': True,
'comment': ''}
if not name:
return _error(ret, 'Must provide name to file.line')
if create and not os.path.isfile(name):
managed(name, create=create, user=user, group=group, mode=file_mode)
check_res, check_msg = _check_file(name)
if not check_res:
return _error(ret, check_msg)
changes = __salt__['file.line'](
name, content, match=match, mode=mode, location=location,
before=before, after=after, show_changes=show_changes,
backup=backup, quiet=quiet, indent=indent)
if changes:
ret['pchanges']['diff'] = changes
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Changes would be made:\ndiff:\n{0}'.format(changes)
else:
ret['result'] = True
ret['comment'] = 'Changes were made'
ret['changes'] = {'diff': changes}
else:
ret['result'] = True
ret['comment'] = 'No changes needed to be made'
return ret
def replace(name,
pattern,
repl,
count=0,
flags=8,
bufsize=1,
append_if_not_found=False,
prepend_if_not_found=False,
not_found_content=None,
backup='.bak',
show_changes=True):
r'''
Maintain an edit in a file.
.. versionadded:: 0.17.0
name
Filesystem path to the file to be edited.
pattern
A regular expression, to be matched using Python's
:py:func:`~re.search`.
repl
The replacement text
count
Maximum number of pattern occurrences to be replaced. Defaults to 0.
If count is a positive integer n, no more than n occurrences will be
replaced, otherwise all occurrences will be replaced.
flags
A list of flags defined in the :ref:`re module documentation
<contents-of-module-re>`. Each list item should be a string that will
correlate to the human-friendly flag name. E.g., ``['IGNORECASE',
'MULTILINE']``. Optionally, ``flags`` may be an int, with a value
corresponding to the XOR (``|``) of all the desired flags. Defaults to
8 (which supports 'MULTILINE').
bufsize
How much of the file to buffer into memory at once. The default value
``1`` processes one line at a time. The special value ``file`` may be
specified which will read the entire file into memory before
processing.
append_if_not_found : False
If set to ``True``, and pattern is not found, then the content will be
appended to the file.
.. versionadded:: 2014.7.0
prepend_if_not_found : False
If set to ``True`` and pattern is not found, then the content will be
prepended to the file.
.. versionadded:: 2014.7.0
not_found_content
Content to use for append/prepend if not found. If ``None`` (default),
uses ``repl``. Useful when ``repl`` uses references to group in
pattern.
.. versionadded:: 2014.7.0
backup
The file extension to use for a backup of the file before editing. Set
to ``False`` to skip making a backup.
show_changes : True
Output a unified diff of the old file and the new file. If ``False``
return a boolean if any changes were made. Returns a boolean or a
string.
.. note:
Using this option will store two copies of the file in memory (the
original version and the edited version) in order to generate the
diff. This may not normally be a concern, but could impact
performance if used with large files.
For complex regex patterns, it can be useful to avoid the need for complex
quoting and escape sequences by making use of YAML's multiline string
syntax.
.. code-block:: yaml
complex_search_and_replace:
file.replace:
# <...snip...>
- pattern: |
CentOS \(2.6.32[^\n]+\n\s+root[^\n]+\n\)+
.. note::
When using YAML multiline string syntax in ``pattern:``, make sure to
also use that syntax in the ``repl:`` part, or you might loose line
feeds.
'''
name = os.path.expanduser(name)
ret = {'name': name,
'changes': {},
'pchanges': {},
'result': True,
'comment': ''}
if not name:
return _error(ret, 'Must provide name to file.replace')
check_res, check_msg = _check_file(name)
if not check_res:
return _error(ret, check_msg)
changes = __salt__['file.replace'](name,
pattern,
repl,
count=count,
flags=flags,
bufsize=bufsize,
append_if_not_found=append_if_not_found,
prepend_if_not_found=prepend_if_not_found,
not_found_content=not_found_content,
backup=backup,
dry_run=__opts__['test'],
show_changes=show_changes)
if changes:
ret['pchanges']['diff'] = changes
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Changes would have been made:\ndiff:\n{0}'.format(changes)
else:
ret['result'] = True
ret['comment'] = 'Changes were made'
ret['changes'] = {'diff': changes}
else:
ret['result'] = True
ret['comment'] = 'No changes needed to be made'
return ret
def blockreplace(
name,
marker_start='#-- start managed zone --',
marker_end='#-- end managed zone --',
source=None,
source_hash=None,
template='jinja',
sources=None,
source_hashes=None,
defaults=None,
context=None,
content='',
append_if_not_found=False,
prepend_if_not_found=False,
backup='.bak',
show_changes=True):
'''
Maintain an edit in a file in a zone delimited by two line markers
.. versionadded:: 2014.1.0
A block of content delimited by comments can help you manage several lines
entries without worrying about old entries removal. This can help you
maintaining an un-managed file containing manual edits.
Note: this function will store two copies of the file in-memory
(the original version and the edited version) in order to detect changes
and only edit the targeted file if necessary.
name
Filesystem path to the file to be edited
marker_start
The line content identifying a line as the start of the content block.
Note that the whole line containing this marker will be considered, so
whitespace or extra content before or after the marker is included in
final output
marker_end
The line content identifying a line as the end of the content block.
Note that the whole line containing this marker will be considered, so
whitespace or extra content before or after the marker is included in
final output. Note: you can use file.accumulated and target this state.
All accumulated data dictionaries content will be added as new lines in
the content
content
The content to be used between the two lines identified by
``marker_start`` and ``marker_end``
source
The source file to download to the minion, this source file can be
hosted on either the salt master server, or on an HTTP or FTP server.
Both HTTPS and HTTP are supported as well as downloading directly
from Amazon S3 compatible URLs with both pre-configured and automatic
IAM credentials. (see s3.get state documentation)
File retrieval from Openstack Swift object storage is supported via
swift://container/object_path URLs, see swift.get documentation.
For files hosted on the salt file server, if the file is located on
the master in the directory named spam, and is called eggs, the source
string is salt://spam/eggs. If source is left blank or None
(use ~ in YAML), the file will be created as an empty file and
the content will not be managed. This is also the case when a file
already exists and the source is undefined; the contents of the file
will not be changed or managed.
If the file is hosted on a HTTP or FTP server then the source_hash
argument is also required
A list of sources can also be passed in to provide a default source and
a set of fallbacks. The first source in the list that is found to exist
will be used and subsequent entries in the list will be ignored.
.. code-block:: yaml
file_override_example:
file.managed:
- source:
- salt://file_that_does_not_exist
- salt://file_that_exists
source_hash
This can be one of the following:
1. a source hash string
2. the URI of a file that contains source hash strings
The function accepts the first encountered long unbroken alphanumeric
string of correct length as a valid hash, in order from most secure to
least secure:
.. code-block:: text
Type Length
====== ======
sha512 128
sha384 96
sha256 64
sha224 56
sha1 40
md5 32
**Using a Source Hash File**
The file can contain several checksums for several files. Each line
must contain both the file name and the hash. If no file name is
matched, the first hash encountered will be used, otherwise the most
secure hash with the correct source file name will be used.
When using a source hash file the source_hash argument needs to be a
url, the standard download urls are supported, ftp, http, salt etc:
Example:
.. code-block:: yaml
tomdroid-src-0.7.3.tar.gz:
file.managed:
- name: /tmp/tomdroid-src-0.7.3.tar.gz
- source: https://launchpad.net/tomdroid/beta/0.7.3/+download/tomdroid-src-0.7.3.tar.gz
- source_hash: https://launchpad.net/tomdroid/beta/0.7.3/+download/tomdroid-src-0.7.3.hash
The following is an example of the supported source_hash format:
.. code-block:: text
/etc/rc.conf ef6e82e4006dee563d98ada2a2a80a27
sha254c8525aee419eb649f0233be91c151178b30f0dff8ebbdcc8de71b1d5c8bcc06a /etc/resolv.conf
ead48423703509d37c4a90e6a0d53e143b6fc268
Debian file type ``*.dsc`` files are also supported.
**Inserting the Source Hash in the sls Data**
Examples:
.. code-block:: yaml
tomdroid-src-0.7.3.tar.gz:
file.managed:
- name: /tmp/tomdroid-src-0.7.3.tar.gz
- source: https://launchpad.net/tomdroid/beta/0.7.3/+download/tomdroid-src-0.7.3.tar.gz
- source_hash: md5=79eef25f9b0b2c642c62b7f737d4f53f
template
If this setting is applied then the named templating engine will be
used to render the downloaded file, currently jinja, mako, and wempy
are supported
context
Overrides default context variables passed to the template.
defaults
Default context passed to the template.
append_if_not_found
If markers are not found and set to True then the markers and content
will be appended to the file. Default is ``False``
prepend_if_not_found
If markers are not found and set to True then the markers and content
will be prepended to the file. Default is ``False``
backup
The file extension to use for a backup of the file if any edit is made.
Set this to ``False`` to skip making a backup.
dry_run
Don't make any edits to the file
show_changes
Output a unified diff of the old file and the new file. If ``False``
return a boolean if any changes were made
Example of usage with an accumulator and with a variable:
.. code-block:: yaml
{% set myvar = 42 %}
hosts-config-block-{{ myvar }}:
file.blockreplace:
- name: /etc/hosts
- marker_start: "# START managed zone {{ myvar }} -DO-NOT-EDIT-"
- marker_end: "# END managed zone {{ myvar }} --"
- content: 'First line of content'
- append_if_not_found: True
- backup: '.bak'
- show_changes: True
hosts-config-block-{{ myvar }}-accumulated1:
file.accumulated:
- filename: /etc/hosts
- name: my-accumulator-{{ myvar }}
- text: "text 2"
- require_in:
- file: hosts-config-block-{{ myvar }}
hosts-config-block-{{ myvar }}-accumulated2:
file.accumulated:
- filename: /etc/hosts
- name: my-accumulator-{{ myvar }}
- text: |
text 3
text 4
- require_in:
- file: hosts-config-block-{{ myvar }}
will generate and maintain a block of content in ``/etc/hosts``:
.. code-block:: text
# START managed zone 42 -DO-NOT-EDIT-
First line of content
text 2
text 3
text 4
# END managed zone 42 --
'''
name = os.path.expanduser(name)
ret = {'name': name,
'changes': {},
'pchanges': {},
'result': False,
'comment': ''}
if not name:
return _error(ret, 'Must provide name to file.blockreplace')
if sources is None:
sources = []
if source_hashes is None:
source_hashes = []
(ok_, err, sl_) = _unify_sources_and_hashes(source=source,
source_hash=source_hash,
sources=sources,
source_hashes=source_hashes)
if not ok_:
return _error(ret, err)
check_res, check_msg = _check_file(name)
if not check_res:
return _error(ret, check_msg)
accum_data, accum_deps = _load_accumulators()
if name in accum_data:
accumulator = accum_data[name]
# if we have multiple accumulators for a file, only apply the one
# required at a time
deps = accum_deps.get(name, [])
filtered = [a for a in deps if
__low__['__id__'] in deps[a] and a in accumulator]
if not filtered:
filtered = [a for a in accumulator]
for acc in filtered:
acc_content = accumulator[acc]
for line in acc_content:
if content == '':
content = line
else:
content += "\n" + line
if sl_:
tmpret = _get_template_texts(source_list=sl_,
template=template,
defaults=defaults,
context=context)
if not tmpret['result']:
return tmpret
text = tmpret['data']
for index, item in enumerate(text):
content += str(item)
changes = __salt__['file.blockreplace'](
name,
marker_start,
marker_end,
content=content,
append_if_not_found=append_if_not_found,
prepend_if_not_found=prepend_if_not_found,
backup=backup,
dry_run=__opts__['test'],
show_changes=show_changes
)
if changes:
ret['pchanges'] = {'diff': changes}
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Changes would be made'
else:
ret['changes'] = {'diff': changes}
ret['result'] = True
ret['comment'] = 'Changes were made'
else:
ret['result'] = True
ret['comment'] = 'No changes needed to be made'
return ret
def comment(name, regex, char='#', backup='.bak'):
'''
Comment out specified lines in a file.
name
The full path to the file to be edited
regex
A regular expression used to find the lines that are to be commented;
this pattern will be wrapped in parenthesis and will move any
preceding/trailing ``^`` or ``$`` characters outside the parenthesis
(e.g., the pattern ``^foo$`` will be rewritten as ``^(foo)$``)
Note that you _need_ the leading ^, otherwise each time you run
highstate, another comment char will be inserted.
char : ``#``
The character to be inserted at the beginning of a line in order to
comment it out
backup : ``.bak``
The file will be backed up before edit with this file extension
.. warning::
This backup will be overwritten each time ``sed`` / ``comment`` /
``uncomment`` is called. Meaning the backup will only be useful
after the first invocation.
Usage:
.. code-block:: yaml
/etc/fstab:
file.comment:
- regex: ^bind 127.0.0.1
.. versionadded:: 0.9.5
'''
name = os.path.expanduser(name)
ret = {'name': name,
'changes': {},
'pchanges': {},
'result': False,
'comment': ''}
if not name:
return _error(ret, 'Must provide name to file.comment')
check_res, check_msg = _check_file(name)
if not check_res:
return _error(ret, check_msg)
unanchor_regex = regex.lstrip('^').rstrip('$')
# Make sure the pattern appears in the file before continuing
if not __salt__['file.search'](name, regex, multiline=True):
if __salt__['file.search'](name, unanchor_regex, multiline=True):
ret['comment'] = 'Pattern already commented'
ret['result'] = True
return ret
else:
return _error(ret, '{0}: Pattern not found'.format(unanchor_regex))
ret['pchanges'][name] = 'updated'
if __opts__['test']:
ret['comment'] = 'File {0} is set to be updated'.format(name)
ret['result'] = None
return ret
with salt.utils.fopen(name, 'rb') as fp_:
slines = fp_.readlines()
# Perform the edit
__salt__['file.comment_line'](name, regex, char, True, backup)
with salt.utils.fopen(name, 'rb') as fp_:
nlines = fp_.readlines()
# Check the result
ret['result'] = __salt__['file.search'](name, unanchor_regex, multiline=True)
if slines != nlines:
if not salt.utils.istextfile(name):
ret['changes']['diff'] = 'Replace binary file'
else:
# Changes happened, add them
ret['changes']['diff'] = (
''.join(difflib.unified_diff(slines, nlines))
)
if ret['result']:
ret['comment'] = 'Commented lines successfully'
else:
ret['comment'] = 'Expected commented lines not found'
return ret
def uncomment(name, regex, char='#', backup='.bak'):
'''
Uncomment specified commented lines in a file
name
The full path to the file to be edited
regex
A regular expression used to find the lines that are to be uncommented.
This regex should not include the comment character. A leading ``^``
character will be stripped for convenience (for easily switching
between comment() and uncomment()). The regex will be searched for
from the beginning of the line, ignoring leading spaces (we prepend
'^[ \\t]*')
char : ``#``
The character to remove in order to uncomment a line
backup : ``.bak``
The file will be backed up before edit with this file extension;
.. warning::
This backup will be overwritten each time ``sed`` / ``comment`` /
``uncomment`` is called. Meaning the backup will only be useful
after the first invocation.
Usage:
.. code-block:: yaml
/etc/adduser.conf:
file.uncomment:
- regex: EXTRA_GROUPS
.. versionadded:: 0.9.5
'''
name = os.path.expanduser(name)
ret = {'name': name,
'changes': {},
'pchanges': {},
'result': False,
'comment': ''}
if not name:
return _error(ret, 'Must provide name to file.uncomment')
check_res, check_msg = _check_file(name)
if not check_res:
return _error(ret, check_msg)
# Make sure the pattern appears in the file
if __salt__['file.search'](
name,
'^[ \t]*{0}'.format(regex.lstrip('^')),
multiline=True):
ret['comment'] = 'Pattern already uncommented'
ret['result'] = True
return ret
elif __salt__['file.search'](
name,
'{0}[ \t]*{1}'.format(char, regex.lstrip('^')),
multiline=True):
# Line exists and is commented
pass
else:
return _error(ret, '{0}: Pattern not found'.format(regex))
ret['pchanges'][name] = 'updated'
if __opts__['test']:
ret['comment'] = 'File {0} is set to be updated'.format(name)
ret['result'] = None
return ret
with salt.utils.fopen(name, 'rb') as fp_:
slines = fp_.readlines()
# Perform the edit
__salt__['file.comment_line'](name, regex, char, False, backup)
with salt.utils.fopen(name, 'rb') as fp_:
nlines = fp_.readlines()
# Check the result
ret['result'] = __salt__['file.search'](
name,
'^[ \t]*{0}'.format(regex.lstrip('^')),
multiline=True
)
if slines != nlines:
if not salt.utils.istextfile(name):
ret['changes']['diff'] = 'Replace binary file'
else:
# Changes happened, add them
ret['changes']['diff'] = (
''.join(difflib.unified_diff(slines, nlines))
)
if ret['result']:
ret['comment'] = 'Uncommented lines successfully'
else:
ret['comment'] = 'Expected uncommented lines not found'
return ret
def append(name,
text=None,
makedirs=False,
source=None,
source_hash=None,
template='jinja',
sources=None,
source_hashes=None,
defaults=None,
context=None,
ignore_whitespace=True):
'''
Ensure that some text appears at the end of a file.
The text will not be appended if it already exists in the file.
A single string of text or a list of strings may be appended.
name
The location of the file to append to.
text
The text to be appended, which can be a single string or a list
of strings.
makedirs
If the file is located in a path without a parent directory,
then the state will fail. If makedirs is set to True, then
the parent directories will be created to facilitate the
creation of the named file. Defaults to False.
source
A single source file to append. This source file can be hosted on either
the salt master server, or on an HTTP or FTP server. Both HTTPS and
HTTP are supported as well as downloading directly from Amazon S3
compatible URLs with both pre-configured and automatic IAM credentials
(see s3.get state documentation). File retrieval from Openstack Swift
object storage is supported via swift://container/object_path URLs
(see swift.get documentation).
For files hosted on the salt file server, if the file is located on
the master in the directory named spam, and is called eggs, the source
string is salt://spam/eggs.
If the file is hosted on an HTTP or FTP server, the source_hash argument
is also required.
source_hash
This can be one of the following:
1. a source hash string
2. the URI of a file that contains source hash strings
The function accepts the first encountered long unbroken alphanumeric
string of correct length as a valid hash, in order from most secure to
least secure:
.. code-block:: text
Type Length
====== ======
sha512 128
sha384 96
sha256 64
sha224 56
sha1 40
md5 32
The file can contain several checksums for several files. Each line
must contain both the file name and the hash. If no file name is
matched, the first hash encountered will be used, otherwise the most
secure hash with the correct source file name will be used.
Debian file type ``*.dsc`` is supported.
Examples:
.. code-block:: text
/etc/rc.conf ef6e82e4006dee563d98ada2a2a80a27
sha254c8525aee419eb649f0233be91c151178b30f0dff8ebbdcc8de71b1d5c8bcc06a /etc/resolv.conf
ead48423703509d37c4a90e6a0d53e143b6fc268
Known issues:
If the remote server URL has the hash file as an apparent
sub-directory of the source file, the module will discover that it
has already cached a directory where a file should be cached. For
example:
.. code-block:: yaml
tomdroid-src-0.7.3.tar.gz:
file.managed:
- name: /tmp/tomdroid-src-0.7.3.tar.gz
- source: https://launchpad.net/tomdroid/beta/0.7.3/+download/tomdroid-src-0.7.3.tar.gz
- source_hash: https://launchpad.net/tomdroid/beta/0.7.3/+download/tomdroid-src-0.7.3.tar.gz/+md5
template : ``jinja``
The named templating engine will be used to render the appended-to
file. Defaults to jinja.
sources
A list of source files to append. If the files are hosted on an HTTP or
FTP server, the source_hashes argument is also required.
source_hashes
A list of source_hashes corresponding to the sources list specified in
the sources argument.
defaults
Default context passed to the template.
context
Overrides default context variables passed to the template.
ignore_whitespace
.. versionadded:: 2015.8.4
Spaces and Tabs in text are ignored by default, when searching for the
appending content, one space or multiple tabs are the same for salt.
Set this option to ``False`` if you want to change this behavior.
Multi-line example:
.. code-block:: yaml
/etc/motd:
file.append:
- text: |
Thou hadst better eat salt with the Philosophers of Greece,
than sugar with the Courtiers of Italy.
- Benjamin Franklin
Multiple lines of text:
.. code-block:: yaml
/etc/motd:
file.append:
- text:
- Trust no one unless you have eaten much salt with him.
- "Salt is born of the purest of parents: the sun and the sea."
Gather text from multiple template files:
.. code-block:: yaml
/etc/motd:
file:
- append
- template: jinja
- sources:
- salt://motd/devops-messages.tmpl
- salt://motd/hr-messages.tmpl
- salt://motd/general-messages.tmpl
.. versionadded:: 0.9.5
'''
name = os.path.expanduser(name)
ret = {
'name': name,
'changes': {},
'pchanges': {},
'result': False,
'comment': ''}
if not name:
return _error(ret, 'Must provide name to file.append')
if sources is None:
sources = []
if source_hashes is None:
source_hashes = []
# Add sources and source_hashes with template support
# NOTE: FIX 'text' and any 'source' are mutually exclusive as 'text'
# is re-assigned in the original code.
(ok_, err, sl_) = _unify_sources_and_hashes(source=source,
source_hash=source_hash,
sources=sources,
source_hashes=source_hashes)
if not ok_:
return _error(ret, err)
if makedirs is True:
dirname = os.path.dirname(name)
if not __salt__['file.directory_exists'](dirname):
__salt__['file.makedirs'](name)
check_res, check_msg, ret['pchanges'] = _check_directory(
dirname, None, None, False, None, False, False, None
)
if not check_res:
return _error(ret, check_msg)
check_res, check_msg = _check_file(name)
if not check_res:
# Try to create the file
touch(name, makedirs=makedirs)
retry_res, retry_msg = _check_file(name)
if not retry_res:
return _error(ret, check_msg)
# Follow the original logic and re-assign 'text' if using source(s)...
if sl_:
tmpret = _get_template_texts(source_list=sl_,
template=template,
defaults=defaults,
context=context)
if not tmpret['result']:
return tmpret
text = tmpret['data']
text = _validate_str_list(text)
with salt.utils.fopen(name, 'rb') as fp_:
slines = fp_.readlines()
count = 0
test_lines = []
try:
for chunk in text:
if ignore_whitespace:
if __salt__['file.search'](
name,
salt.utils.build_whitespace_split_regex(chunk),
multiline=True):
continue
elif __salt__['file.search'](
name,
chunk,
multiline=True):
continue
lines = chunk.splitlines()
for line in lines:
if __opts__['test']:
ret['comment'] = 'File {0} is set to be updated'.format(name)
ret['result'] = None
test_lines.append('{0}\n'.format(line))
else:
__salt__['file.append'](name, line)
count += 1
except TypeError:
return _error(ret, 'No text found to append. Nothing appended')
if __opts__['test']:
nlines = slines + test_lines
ret['result'] = None
if slines != nlines:
if not salt.utils.istextfile(name):
ret['changes']['diff'] = 'Replace binary file'
else:
# Changes happened, add them
ret['changes']['diff'] = (
''.join(difflib.unified_diff(slines, nlines))
)
else:
ret['comment'] = 'File {0} is in correct state'.format(name)
ret['result'] = True
return ret
with salt.utils.fopen(name, 'rb') as fp_:
nlines = fp_.readlines()
if slines != nlines:
if not salt.utils.istextfile(name):
ret['changes']['diff'] = 'Replace binary file'
else:
# Changes happened, add them
ret['changes']['diff'] = (
''.join(difflib.unified_diff(slines, nlines))
)
if count:
ret['comment'] = 'Appended {0} lines'.format(count)
else:
ret['comment'] = 'File {0} is in correct state'.format(name)
ret['result'] = True
return ret
def prepend(name,
text=None,
makedirs=False,
source=None,
source_hash=None,
template='jinja',
sources=None,
source_hashes=None,
defaults=None,
context=None):
'''
Ensure that some text appears at the beginning of a file
The text will not be prepended again if it already exists in the file. You
may specify a single line of text or a list of lines to append.
Multi-line example:
.. code-block:: yaml
/etc/motd:
file.prepend:
- text: |
Thou hadst better eat salt with the Philosophers of Greece,
than sugar with the Courtiers of Italy.
- Benjamin Franklin
Multiple lines of text:
.. code-block:: yaml
/etc/motd:
file.prepend:
- text:
- Trust no one unless you have eaten much salt with him.
- "Salt is born of the purest of parents: the sun and the sea."
Gather text from multiple template files:
.. code-block:: yaml
/etc/motd:
file:
- prepend
- template: jinja
- sources:
- salt://motd/devops-messages.tmpl
- salt://motd/hr-messages.tmpl
- salt://motd/general-messages.tmpl
.. versionadded:: 2014.7.0
'''
name = os.path.expanduser(name)
ret = {'name': name,
'changes': {},
'pchanges': {},
'result': False,
'comment': ''}
if not name:
return _error(ret, 'Must provide name to file.prepend')
if sources is None:
sources = []
if source_hashes is None:
source_hashes = []
# Add sources and source_hashes with template support
# NOTE: FIX 'text' and any 'source' are mutually exclusive as 'text'
# is re-assigned in the original code.
(ok_, err, sl_) = _unify_sources_and_hashes(source=source,
source_hash=source_hash,
sources=sources,
source_hashes=source_hashes)
if not ok_:
return _error(ret, err)
if makedirs is True:
dirname = os.path.dirname(name)
if not __salt__['file.directory_exists'](dirname):
__salt__['file.makedirs'](name)
check_res, check_msg, ret['pchanges'] = _check_directory(
dirname, None, None, False, None, False, False, None
)
if not check_res:
return _error(ret, check_msg)
check_res, check_msg = _check_file(name)
if not check_res:
# Try to create the file
touch(name, makedirs=makedirs)
retry_res, retry_msg = _check_file(name)
if not retry_res:
return _error(ret, check_msg)
# Follow the original logic and re-assign 'text' if using source(s)...
if sl_:
tmpret = _get_template_texts(source_list=sl_,
template=template,
defaults=defaults,
context=context)
if not tmpret['result']:
return tmpret
text = tmpret['data']
text = _validate_str_list(text)
with salt.utils.fopen(name, 'rb') as fp_:
slines = fp_.readlines()
count = 0
test_lines = []
preface = []
for chunk in text:
if __salt__['file.search'](
name,
salt.utils.build_whitespace_split_regex(chunk),
multiline=True):
continue
lines = chunk.splitlines()
for line in lines:
if __opts__['test']:
ret['comment'] = 'File {0} is set to be updated'.format(name)
ret['result'] = None
test_lines.append('{0}\n'.format(line))
else:
preface.append(line)
count += 1
if __opts__['test']:
nlines = test_lines + slines
if slines != nlines:
if not salt.utils.istextfile(name):
ret['changes']['diff'] = 'Replace binary file'
else:
# Changes happened, add them
ret['changes']['diff'] = (
''.join(difflib.unified_diff(slines, nlines))
)
ret['result'] = None
else:
ret['comment'] = 'File {0} is in correct state'.format(name)
ret['result'] = True
return ret
__salt__['file.prepend'](name, *preface)
with salt.utils.fopen(name, 'rb') as fp_:
nlines = fp_.readlines()
if slines != nlines:
if not salt.utils.istextfile(name):
ret['changes']['diff'] = 'Replace binary file'
else:
# Changes happened, add them
ret['changes']['diff'] = (
''.join(difflib.unified_diff(slines, nlines))
)
if count:
ret['comment'] = 'Prepended {0} lines'.format(count)
else:
ret['comment'] = 'File {0} is in correct state'.format(name)
ret['result'] = True
return ret
def patch(name,
source=None,
hash=None,
options='',
dry_run_first=True,
**kwargs):
'''
Apply a patch to a file or directory.
.. note::
A suitable ``patch`` executable must be available on the minion when
using this state function.
name
The file or directory to which the patch will be applied.
source
The source patch to download to the minion, this source file must be
hosted on the salt master server. If the file is located in the
directory named spam, and is called eggs, the source string is
salt://spam/eggs. A source is required.
hash
Hash of the patched file. If the hash of the target file matches this
value then the patch is assumed to have been applied. The hash string
is the hash algorithm followed by the hash of the file:
md5=e138491e9d5b97023cea823fe17bac22
options
Extra options to pass to patch.
dry_run_first : ``True``
Run patch with ``--dry-run`` first to check if it will apply cleanly.
saltenv
Specify the environment from which to retrieve the patch file indicated
by the ``source`` parameter. If not provided, this defaults to the
environment from which the state is being executed.
Usage:
.. code-block:: yaml
# Equivalent to ``patch --forward /opt/file.txt file.patch``
/opt/file.txt:
file.patch:
- source: salt://file.patch
- hash: md5=e138491e9d5b97023cea823fe17bac22
'''
if 'env' in kwargs:
salt.utils.warn_until(
'Oxygen',
'Parameter \'env\' has been detected in the argument list. This '
'parameter is no longer used and has been replaced by \'saltenv\' '
'as of Salt Carbon. This warning will be removed in Salt Oxygen.'
)
kwargs.pop('env')
name = os.path.expanduser(name)
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
if not name:
return _error(ret, 'Must provide name to file.patch')
check_res, check_msg = _check_file(name)
if not check_res:
return _error(ret, check_msg)
if not source:
return _error(ret, 'Source is required')
if hash is None:
return _error(ret, 'Hash is required')
if hash and __salt__['file.check_hash'](name, hash):
ret.update(result=True, comment='Patch is already applied')
return ret
# get cached file or copy it to cache
cached_source_path = __salt__['cp.cache_file'](source, __env__)
if not cached_source_path:
ret['comment'] = ('Unable to cache {0} from saltenv {1!r}'
.format(source, __env__))
return ret
log.debug(
'State patch.applied cached source {0} -> {1}'.format(
source, cached_source_path
)
)
if dry_run_first or __opts__['test']:
ret['changes'] = __salt__['file.patch'](
name, cached_source_path, options=options, dry_run=True
)
if __opts__['test']:
ret['comment'] = 'File {0} will be patched'.format(name)
ret['result'] = None
return ret
if ret['changes']['retcode']:
return ret
ret['changes'] = __salt__['file.patch'](
name, cached_source_path, options=options
)
ret['result'] = not ret['changes']['retcode']
if ret['result'] and hash and not __salt__['file.check_hash'](name, hash):
ret.update(
result=False,
comment='File {0} hash mismatch after patch was applied'.format(
name
)
)
return ret
def touch(name, atime=None, mtime=None, makedirs=False):
'''
Replicate the 'nix "touch" command to create a new empty
file or update the atime and mtime of an existing file.
Note that if you just want to create a file and don't care about atime or
mtime, you should use ``file.managed`` instead, as it is more
feature-complete. (Just leave out the ``source``/``template``/``contents``
arguments, and it will just create the file and/or check its permissions,
without messing with contents)
name
name of the file
atime
atime of the file
mtime
mtime of the file
makedirs
whether we should create the parent directory/directories in order to
touch the file
Usage:
.. code-block:: yaml
/var/log/httpd/logrotate.empty:
file.touch
.. versionadded:: 0.9.5
'''
name = os.path.expanduser(name)
ret = {
'name': name,
'changes': {},
}
if not name:
return _error(ret, 'Must provide name to file.touch')
if not os.path.isabs(name):
return _error(
ret, 'Specified file {0} is not an absolute path'.format(name)
)
if __opts__['test']:
ret['result'], ret['comment'] = _check_touch(name, atime, mtime)
return ret
if makedirs:
__salt__['file.makedirs'](name)
if not os.path.isdir(os.path.dirname(name)):
return _error(
ret, 'Directory not present to touch file {0}'.format(name)
)
extant = os.path.exists(name)
ret['result'] = __salt__['file.touch'](name, atime, mtime)
if not extant and ret['result']:
ret['comment'] = 'Created empty file {0}'.format(name)
ret['changes']['new'] = name
elif extant and ret['result']:
ret['comment'] = 'Updated times on {0} {1}'.format(
'directory' if os.path.isdir(name) else 'file', name
)
ret['changes']['touched'] = name
return ret
def copy(
name,
source,
force=False,
makedirs=False,
preserve=False,
user=None,
group=None,
mode=None,
subdir=False,
**kwargs):
'''
If the source file exists on the system, copy it to the named file. The
named file will not be overwritten if it already exists unless the force
option is set to True.
name
The location of the file to copy to
source
The location of the file to copy to the location specified with name
force
If the target location is present then the file will not be moved,
specify "force: True" to overwrite the target file
makedirs
If the target subdirectories don't exist create them
preserve
.. versionadded:: 2015.5.0
Set ``preserve: True`` to preserve user/group ownership and mode
after copying. Default is ``False``. If ``preserve`` is set to ``True``,
then user/group/mode attributes will be ignored.
user
.. versionadded:: 2015.5.0
The user to own the copied file, this defaults to the user salt is
running as on the minion. If ``preserve`` is set to ``True``, then
this will be ignored
group
.. versionadded:: 2015.5.0
The group to own the copied file, this defaults to the group salt is
running as on the minion. If ``preserve`` is set to ``True`` or on
Windows this will be ignored
mode
.. versionadded:: 2015.5.0
The permissions to set on the copied file, aka 644, '0775', '4664'.
If ``preserve`` is set to ``True``, then this will be ignored.
Not supported on Windows
subdir
.. versionadded:: 2015.5.0
If the name is a directory then place the file inside the named
directory
'''
name = os.path.expanduser(name)
source = os.path.expanduser(source)
ret = {
'name': name,
'changes': {},
'comment': 'Copied "{0}" to "{1}"'.format(source, name),
'result': True}
if not name:
return _error(ret, 'Must provide name to file.copy')
changed = True
if not os.path.isabs(name):
return _error(
ret, 'Specified file {0} is not an absolute path'.format(name))
if not os.path.exists(source):
return _error(ret, 'Source file "{0}" is not present'.format(source))
if preserve:
user = __salt__['file.get_user'](source)
group = __salt__['file.get_group'](source)
mode = __salt__['file.get_mode'](source)
else:
user = _test_owner(kwargs, user=user)
if user is None:
user = __opts__['user']
if salt.utils.is_windows():
if group is not None:
log.warning(
'The group argument for {0} has been ignored as this is '
'a Windows system.'.format(name)
)
group = user
if group is None:
group = __salt__['file.gid_to_group'](
__salt__['user.info'](user).get('gid', 0)
)
u_check = _check_user(user, group)
if u_check:
# The specified user or group do not exist
return _error(ret, u_check)
if mode is None:
mode = __salt__['file.get_mode'](source)
if os.path.isdir(name) and subdir:
# If the target is a dir, and overwrite_dir is False, copy into the dir
name = os.path.join(name, os.path.basename(source))
if os.path.lexists(source) and os.path.lexists(name):
# if this is a file which did not change, do not update
if force and os.path.isfile(name):
hash1 = salt.utils.get_hash(name)
hash2 = salt.utils.get_hash(source)
if hash1 == hash2:
changed = False
if not force:
changed = False
elif not __opts__['test'] and changed:
# Remove the destination to prevent problems later
try:
__salt__['file.remove'](name)
except (IOError, OSError):
return _error(
ret,
'Failed to delete "{0}" in preparation for '
'forced move'.format(name)
)
if __opts__['test']:
if changed:
ret['comment'] = 'File "{0}" is set to be copied to "{1}"'.format(
source,
name
)
ret['result'] = None
else:
ret['comment'] = ('The target file "{0}" exists and will not be '
'overwritten'.format(name))
ret['result'] = True
return ret
if not changed:
ret['comment'] = ('The target file "{0}" exists and will not be '
'overwritten'.format(name))
ret['result'] = True
return ret
# Run makedirs
dname = os.path.dirname(name)
if not os.path.isdir(dname):
if makedirs:
__salt__['file.makedirs'](name)
else:
return _error(
ret,
'The target directory {0} is not present'.format(dname))
# All tests pass, move the file into place
try:
shutil.copy(source, name)
ret['changes'] = {name: source}
# Preserve really means just keep the behavior of the cp command. If
# the filesystem we're copying to is squashed or doesn't support chown
# then we shouldn't be checking anything.
if not preserve:
__salt__['file.check_perms'](name, ret, user, group, mode)
except (IOError, OSError):
return _error(
ret, 'Failed to copy "{0}" to "{1}"'.format(source, name))
return ret
def rename(name, source, force=False, makedirs=False):
'''
If the source file exists on the system, rename it to the named file. The
named file will not be overwritten if it already exists unless the force
option is set to True.
name
The location of the file to rename to
source
The location of the file to move to the location specified with name
force
If the target location is present then the file will not be moved,
specify "force: True" to overwrite the target file
makedirs
If the target subdirectories don't exist create them
'''
name = os.path.expanduser(name)
source = os.path.expanduser(source)
ret = {
'name': name,
'changes': {},
'comment': '',
'result': True}
if not name:
return _error(ret, 'Must provide name to file.rename')
if not os.path.isabs(name):
return _error(
ret, 'Specified file {0} is not an absolute path'.format(name))
if not os.path.lexists(source):
ret['comment'] = ('Source file "{0}" has already been moved out of '
'place').format(source)
return ret
if os.path.lexists(source) and os.path.lexists(name):
if not force:
ret['comment'] = ('The target file "{0}" exists and will not be '
'overwritten'.format(name))
ret['result'] = False
return ret
elif not __opts__['test']:
# Remove the destination to prevent problems later
try:
__salt__['file.remove'](name)
except (IOError, OSError):
return _error(
ret,
'Failed to delete "{0}" in preparation for '
'forced move'.format(name)
)
if __opts__['test']:
ret['comment'] = 'File "{0}" is set to be moved to "{1}"'.format(
source,
name
)
ret['result'] = None
return ret
# Run makedirs
dname = os.path.dirname(name)
if not os.path.isdir(dname):
if makedirs:
__salt__['file.makedirs'](name)
else:
return _error(
ret,
'The target directory {0} is not present'.format(dname))
# All tests pass, move the file into place
try:
if os.path.islink(source):
linkto = os.readlink(source)
os.symlink(linkto, name)
os.unlink(source)
else:
shutil.move(source, name)
except (IOError, OSError):
return _error(
ret, 'Failed to move "{0}" to "{1}"'.format(source, name))
ret['comment'] = 'Moved "{0}" to "{1}"'.format(source, name)
ret['changes'] = {name: source}
return ret
def accumulated(name, filename, text, **kwargs):
'''
Prepare accumulator which can be used in template in file.managed state.
Accumulator dictionary becomes available in template. It can also be used
in file.blockreplace.
name
Accumulator name
filename
Filename which would receive this accumulator (see file.managed state
documentation about ``name``)
text
String or list for adding in accumulator
require_in / watch_in
One of them required for sure we fill up accumulator before we manage
the file. Probably the same as filename
Example:
Given the following:
.. code-block:: yaml
animals_doing_things:
file.accumulated:
- filename: /tmp/animal_file.txt
- text: ' jumps over the lazy dog.'
- require_in:
- file: animal_file
animal_file:
file.managed:
- name: /tmp/animal_file.txt
- source: salt://animal_file.txt
- template: jinja
One might write a template for ``animal_file.txt`` like the following:
.. code-block:: jinja
The quick brown fox{% for animal in accumulator['animals_doing_things'] %}{{ animal }}{% endfor %}
Collectively, the above states and template file will produce:
.. code-block:: text
The quick brown fox jumps over the lazy dog.
Multiple accumulators can be "chained" together.
.. note::
The 'accumulator' data structure is a Python dictionary.
Do not expect any loop over the keys in a deterministic order!
'''
ret = {
'name': name,
'changes': {},
'result': True,
'comment': ''
}
if not name:
return _error(ret, 'Must provide name to file.accumulated')
if text is None:
ret['result'] = False
ret['comment'] = 'No text supplied for accumulator'
return ret
require_in = __low__.get('require_in', [])
watch_in = __low__.get('watch_in', [])
deps = require_in + watch_in
if not [x for x in deps if 'file' in x]:
ret['result'] = False
ret['comment'] = 'Orphaned accumulator {0} in {1}:{2}'.format(
name,
__low__['__sls__'],
__low__['__id__']
)
return ret
if isinstance(text, six.string_types):
text = (text,)
elif isinstance(text, dict):
text = (text,)
accum_data, accum_deps = _load_accumulators()
if filename not in accum_data:
accum_data[filename] = {}
if filename not in accum_deps:
accum_deps[filename] = {}
if name not in accum_deps[filename]:
accum_deps[filename][name] = []
for accumulator in deps:
accum_deps[filename][name].extend(six.itervalues(accumulator))
if name not in accum_data[filename]:
accum_data[filename][name] = []
for chunk in text:
if chunk not in accum_data[filename][name]:
accum_data[filename][name].append(chunk)
ret['comment'] = ('Accumulator {0} for file {1} '
'was charged by text'.format(name, filename))
_persist_accummulators(accum_data, accum_deps)
return ret
def _merge_dict(obj, k, v):
changes = {}
if k in obj:
if isinstance(obj[k], list):
if isinstance(v, list):
for a in v:
if a not in obj[k]:
changes[k] = a
obj[k].append(a)
else:
if obj[k] != v:
changes[k] = v
obj[k] = v
elif isinstance(obj[k], dict):
if isinstance(v, dict):
for a, b in six.iteritems(v):
if isinstance(b, dict) or isinstance(b, list):
updates = _merge_dict(obj[k], a, b)
for x, y in six.iteritems(updates):
changes[k + "." + x] = y
else:
if a not in obj[k] or obj[k][a] != b:
changes[k + "." + a] = b
obj[k][a] = b
else:
if obj[k] != v:
changes[k] = v
obj[k] = v
else:
if obj[k] != v:
changes[k] = v
obj[k] = v
else:
changes[k] = v
obj[k] = v
return changes
def serialize(name,
dataset=None,
dataset_pillar=None,
user=None,
group=None,
mode=None,
backup='',
makedirs=False,
show_diff=True,
create=True,
merge_if_exists=False,
**kwargs):
'''
Serializes dataset and store it into managed file. Useful for sharing
simple configuration files.
name
The location of the file to create
dataset
The dataset that will be serialized
dataset_pillar
Operates like ``dataset``, but draws from a value stored in pillar,
using the pillar path syntax used in :mod:`pillar.get
<salt.modules.pillar.get>`. This is useful when the pillar value
contains newlines, as referencing a pillar variable using a jinja/mako
template can result in YAML formatting issues due to the newlines
causing indentation mismatches.
.. versionadded:: FIXME
formatter
Write the data as this format. Supported output formats:
* JSON
* YAML
* Python (via pprint.pformat)
user
The user to own the directory, this defaults to the user salt is
running as on the minion
group
The group ownership set for the directory, this defaults to the group
salt is running as on the minion
mode
The permissions to set on this file, aka 644, 0775, 4664
backup
Overrides the default backup mode for this specific file.
makedirs
Create parent directories for destination file.
.. versionadded:: 2014.1.3
show_diff
If set to False, the diff will not be shown.
create
Default is True, if create is set to False then the file will only be
managed if the file already exists on the system.
merge_if_exists
Default is False, if merge_if_exists is True then the existing file will
be parsed and the dataset passed in will be merged with the existing
content
.. versionadded:: 2014.7.0
For example, this state:
.. code-block:: yaml
/etc/dummy/package.json:
file.serialize:
- dataset:
name: naive
description: A package using naive versioning
author: A confused individual <[email protected]>
dependencies:
express: >= 1.2.0
optimist: >= 0.1.0
engine: node 0.4.1
- formatter: json
will manage the file ``/etc/dummy/package.json``:
.. code-block:: json
{
"author": "A confused individual <[email protected]>",
"dependencies": {
"express": ">= 1.2.0",
"optimist": ">= 0.1.0"
},
"description": "A package using naive versioning",
"engine": "node 0.4.1",
"name": "naive"
}
'''
if 'env' in kwargs:
salt.utils.warn_until(
'Oxygen',
'Parameter \'env\' has been detected in the argument list. This '
'parameter is no longer used and has been replaced by \'saltenv\' '
'as of Salt Carbon. This warning will be removed in Salt Oxygen.'
)
kwargs.pop('env')
name = os.path.expanduser(name)
ret = {'changes': {},
'comment': '',
'name': name,
'result': True}
if not name:
return _error(ret, 'Must provide name to file.serialize')
if not create:
if not os.path.isfile(name):
# Don't create a file that is not already present
ret['comment'] = ('File {0} is not present and is not set for '
'creation').format(name)
return ret
formatter = kwargs.pop('formatter', 'yaml').lower()
if len([x for x in (dataset, dataset_pillar) if x]) > 1:
return _error(
ret, 'Only one of \'dataset\' and \'dataset_pillar\' is permitted')
if dataset_pillar:
dataset = __salt__['pillar.get'](dataset_pillar)
if dataset is None:
return _error(
ret, 'Neither \'dataset\' nor \'dataset_pillar\' was defined')
if salt.utils.is_windows():
if group is not None:
log.warning(
'The group argument for {0} has been ignored as this '
'is a Windows system.'.format(name)
)
group = user
serializer_name = '{0}.serialize'.format(formatter)
if serializer_name in __serializers__:
serializer = __serializers__[serializer_name]
if merge_if_exists:
if os.path.isfile(name):
if '{0}.deserialize'.format(formatter) in __serializers__:
with salt.utils.fopen(name, 'r') as fhr:
existing_data = serializer.deserialize(fhr)
else:
return {'changes': {},
'comment': ('{0} format is not supported for merging'
.format(formatter.capitalize())),
'name': name,
'result': False}
if existing_data is not None:
for k, v in six.iteritems(dataset):
if k in existing_data:
ret['changes'].update(_merge_dict(existing_data, k, v))
else:
ret['changes'][k] = v
existing_data[k] = v
dataset = existing_data
contents = __serializers__[serializer_name](dataset)
else:
return {'changes': {},
'comment': '{0} format is not supported'.format(
formatter.capitalize()),
'name': name,
'result': False
}
contents += '\n'
if __opts__['test']:
ret['changes'] = __salt__['file.check_managed_changes'](
name=name,
source=None,
source_hash={},
user=user,
group=group,
mode=mode,
template=None,
context=None,
defaults=None,
saltenv=__env__,
contents=contents,
skip_verify=False,
**kwargs
)
if ret['changes']:
ret['result'] = None
ret['comment'] = 'Dataset will be serialized and stored into {0}'.format(
name)
else:
ret['result'] = True
ret['comment'] = 'The file {0} is in the correct state'.format(name)
return ret
return __salt__['file.manage_file'](name=name,
sfn='',
ret=ret,
source=None,
source_sum={},
user=user,
group=group,
mode=mode,
saltenv=__env__,
backup=backup,
makedirs=makedirs,
template=None,
show_diff=show_diff,
contents=contents)
def mknod(name, ntype, major=0, minor=0, user=None, group=None, mode='0600'):
'''
Create a special file similar to the 'nix mknod command. The supported
device types are ``p`` (fifo pipe), ``c`` (character device), and ``b``
(block device). Provide the major and minor numbers when specifying a
character device or block device. A fifo pipe does not require this
information. The command will create the necessary dirs if needed. If a
file of the same name not of the same type/major/minor exists, it will not
be overwritten or unlinked (deleted). This is logically in place as a
safety measure because you can really shoot yourself in the foot here and
it is the behavior of 'nix ``mknod``. It is also important to note that not
just anyone can create special devices. Usually this is only done as root.
If the state is executed as none other than root on a minion, you may
receive a permission error.
name
name of the file
ntype
node type 'p' (fifo pipe), 'c' (character device), or 'b'
(block device)
major
major number of the device
does not apply to a fifo pipe
minor
minor number of the device
does not apply to a fifo pipe
user
owning user of the device/pipe
group
owning group of the device/pipe
mode
permissions on the device/pipe
Usage:
.. code-block:: yaml
/dev/chr:
file.mknod:
- ntype: c
- major: 180
- minor: 31
- user: root
- group: root
- mode: 660
/dev/blk:
file.mknod:
- ntype: b
- major: 8
- minor: 999
- user: root
- group: root
- mode: 660
/dev/fifo:
file.mknod:
- ntype: p
- user: root
- group: root
- mode: 660
.. versionadded:: 0.17.0
'''
name = os.path.expanduser(name)
ret = {'name': name,
'changes': {},
'comment': '',
'result': False}
if not name:
return _error(ret, 'Must provide name to file.mknod')
if ntype == 'c':
# Check for file existence
if __salt__['file.file_exists'](name):
ret['comment'] = (
'File exists and is not a character device {0}. Cowardly '
'refusing to continue'.format(name)
)
# Check if it is a character device
elif not __salt__['file.is_chrdev'](name):
if __opts__['test']:
ret['comment'] = (
'Character device {0} is set to be created'
).format(name)
ret['result'] = None
else:
ret = __salt__['file.mknod'](name,
ntype,
major,
minor,
user,
group,
mode)
# Check the major/minor
else:
devmaj, devmin = __salt__['file.get_devmm'](name)
if (major, minor) != (devmaj, devmin):
ret['comment'] = (
'Character device {0} exists and has a different '
'major/minor {1}/{2}. Cowardly refusing to continue'
.format(name, devmaj, devmin)
)
# Check the perms
else:
ret = __salt__['file.check_perms'](name,
None,
user,
group,
mode)[0]
if not ret['changes']:
ret['comment'] = (
'Character device {0} is in the correct state'.format(
name
)
)
elif ntype == 'b':
# Check for file existence
if __salt__['file.file_exists'](name):
ret['comment'] = (
'File exists and is not a block device {0}. Cowardly '
'refusing to continue'.format(name)
)
# Check if it is a block device
elif not __salt__['file.is_blkdev'](name):
if __opts__['test']:
ret['comment'] = (
'Block device {0} is set to be created'
).format(name)
ret['result'] = None
else:
ret = __salt__['file.mknod'](name,
ntype,
major,
minor,
user,
group,
mode)
# Check the major/minor
else:
devmaj, devmin = __salt__['file.get_devmm'](name)
if (major, minor) != (devmaj, devmin):
ret['comment'] = (
'Block device {0} exists and has a different major/minor '
'{1}/{2}. Cowardly refusing to continue'.format(
name, devmaj, devmin
)
)
# Check the perms
else:
ret = __salt__['file.check_perms'](name,
None,
user,
group,
mode)[0]
if not ret['changes']:
ret['comment'] = (
'Block device {0} is in the correct state'.format(name)
)
elif ntype == 'p':
# Check for file existence
if __salt__['file.file_exists'](name):
ret['comment'] = (
'File exists and is not a fifo pipe {0}. Cowardly refusing '
'to continue'.format(name)
)
# Check if it is a fifo
elif not __salt__['file.is_fifo'](name):
if __opts__['test']:
ret['comment'] = 'Fifo pipe {0} is set to be created'.format(
name
)
ret['result'] = None
else:
ret = __salt__['file.mknod'](name,
ntype,
major,
minor,
user,
group,
mode)
# Check the perms
else:
ret = __salt__['file.check_perms'](name,
None,
user,
group,
mode)[0]
if not ret['changes']:
ret['comment'] = (
'Fifo pipe {0} is in the correct state'.format(name)
)
else:
ret['comment'] = (
'Node type unavailable: {0!r}. Available node types are '
'character (\'c\'), block (\'b\'), and pipe (\'p\')'.format(ntype)
)
return ret
def mod_run_check_cmd(cmd, filename, **check_cmd_opts):
'''
Execute the check_cmd logic.
Return a result dict if ``check_cmd`` succeeds (check_cmd == 0)
otherwise return True
'''
log.debug('running our check_cmd')
_cmd = '{0} {1}'.format(cmd, filename)
cret = __salt__['cmd.run_all'](_cmd, **check_cmd_opts)
if cret['retcode'] != 0:
ret = {'comment': 'check_cmd execution failed',
'skip_watch': True,
'result': False}
if cret.get('stdout'):
ret['comment'] += '\n' + cret['stdout']
if cret.get('stderr'):
ret['comment'] += '\n' + cret['stderr']
return ret
# No reason to stop, return True
return True
def decode(name,
encoded_data=None,
contents_pillar=None,
encoding_type='base64',
checksum='md5'):
'''
Decode an encoded file and write it to disk
.. versionadded:: 2016.3.0
name
Path of the file to be written.
encoded_data
The encoded file. Either this option or ``contents_pillar`` must be
specified.
contents_pillar
A Pillar path to the encoded file. Uses the same path syntax as
:py:func:`pillar.get <salt.modules.pillar.get>`. The
:py:func:`hashutil.base64_encodefile
<salt.modules.hashutil.base64_encodefile>` function can load encoded
content into Pillar. Either this option or ``encoded_data`` must be
specified.
encoding_type : ``base64``
The type of encoding.
checksum : ``md5``
The hashing algorithm to use to generate checksums. Wraps the
:py:func:`hashutil.digest <salt.modules.hashutil.digest>` execution
function.
Usage:
.. code-block:: yaml
write_base64_encoded_string_to_a_file:
file.decode:
- name: /tmp/new_file
- encoding_type: base64
- contents_pillar: mypillar:thefile
# or
write_base64_encoded_string_to_a_file:
file.decode:
- name: /tmp/new_file
- encoding_type: base64
- encoded_data: |
Z2V0IHNhbHRlZAo=
Be careful with multi-line strings that the YAML indentation is correct.
E.g.,
.. code-block:: yaml
write_base64_encoded_string_to_a_file:
file.decode:
- name: /tmp/new_file
- encoding_type: base64
- encoded_data: |
{{ salt.pillar.get('path:to:data') | indent(8) }}
'''
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
if not (encoded_data or contents_pillar):
raise CommandExecutionError("Specify either the 'encoded_data' or "
"'contents_pillar' argument.")
elif encoded_data and contents_pillar:
raise CommandExecutionError("Specify only one 'encoded_data' or "
"'contents_pillar' argument.")
elif encoded_data:
content = encoded_data
elif contents_pillar:
content = __salt__['pillar.get'](contents_pillar, False)
if content is False:
raise CommandExecutionError('Pillar data not found.')
else:
raise CommandExecutionError('No contents given.')
dest_exists = __salt__['file.file_exists'](name)
if dest_exists:
instr = __salt__['hashutil.base64_decodestring'](content)
insum = __salt__['hashutil.digest'](instr, checksum)
del instr # no need to keep in-memory after we have the hash
outsum = __salt__['hashutil.digest_file'](name, checksum)
if insum != outsum:
ret['changes'] = {
'old': outsum,
'new': insum,
}
if not ret['changes']:
ret['comment'] = 'File is in the correct state.'
ret['result'] = True
return ret
if __opts__['test'] is True:
ret['comment'] = 'File is set to be updated.'
ret['result'] = None
return ret
ret['result'] = __salt__['hashutil.base64_decodefile'](content, name)
ret['comment'] = 'File was updated.'
if not ret['changes']:
ret['changes'] = {
'old': None,
'new': __salt__['hashutil.digest_file'](name, checksum),
}
return ret
|
the-stack_106_22376 | #!/usr/bin/env python3
##############################################################################
#
# Module: mcci-catena-provision-helium.py
#
# Function:
# Provision a catena device through Helium cli
#
# Copyright and License:
# This file copyright (c) 2021 by
#
# MCCI Corporation
# 3520 Krums Corners Road
# Ithaca, NY 14850
#
# See accompanying LICENSE file for copyright and license information.
#
# Author:
# Sivaprakash Veluthambi, MCCI May 2021
#
##############################################################################
# Built-in imports
import argparse
import os
import re
import subprocess
import sys
# Lib imports
import pexpect
from pexpect import fdpexpect
import serial
from serial.tools import list_ports
class AppContext:
'''
Class contains common attributes and default values
'''
def __init__(self):
self.nWarnings = 0
self.nErrors = 0
self.fVerbose = False
self.fWerror = False
self.fDebug = False
self.sPort = None
self.nBaudRate = 115200
self.fWriteEnable = True
self.fEcho = False
self.fInfo = False
self.fPermissive = False
self.fRegister = False
self.dVariables = {
'APPEUI': None,
'DEVEUI': None,
'APPKEY': None,
'BASENAME' : None,
'SYSEUI' : None
}
def warning(self, msg):
'''
Display warning message
Args:
msg: receives warning messages
Returns:
No explicit result
'''
self.nWarnings = self.nWarnings + 1
print (msg, end='\n')
def error(self, msg):
'''
Display error message
Args:
msg: receives error messages
Returns:
No explicit result
'''
self.nErrors = self.nErrors + 1
print (msg, end='\n')
def fatal(self, msg):
'''
Display error message and exit
Args:
msg: receives error messages
Returns:
No explicit result
'''
self.error(msg)
sys.exit(1)
def debug(self, msg):
'''
Display debug message
Args:
msg: receives debug messages
Returns:
No explicit result
'''
if (self.fDebug):
print (msg, end='\n')
def verbose(self, msg):
'''
Display verbose message
Args:
msg: receives verbose message
Returns:
No explicit result
'''
if (self.fVerbose):
print (msg, end='\n')
def getnumerrors(self):
'''
Get the error count
Args:
NA
Returns:
Number of errors occured
'''
nErrors = self.nErrors
if (self.fWerror):
nErrors = nErrors + self.nWarnings
return nErrors
def exitchecks(self):
'''
Display total errors detected
Args:
NA
Returns:
0 if no errors occured or 1 otherwise
'''
errCount = self.getnumerrors()
if (errCount > 0):
self.error("{} errors detected".format(errCount))
return 1
else:
self.debug("No errors detected")
return 0
##############################################################################
#
# Provisioning Functions
#
##############################################################################
def openport(sPortName):
'''
Open serial port
Args:
sPortName: serial port name
Returns:
True if port opens or None otherwise
'''
# Check port is available
listPort = []
listPort = list(list_ports.comports())
portAvail = [p.device for p in listPort if p.device == sPortName]
if not portAvail:
oAppContext.error("Port {} is unavailable".format(sPortName))
return None
# Open port
if not comPort.is_open:
try:
comPort.open()
if comPort.is_open:
oAppContext.debug("Port {} opened".format(sPortName))
return True
except Exception as err:
oAppContext.fatal("Can't open port {0} : {1}".format(
sPortName,
err)
)
return None
else:
oAppContext.warning("Port {} is already opened".format(sPortName))
return True
def writecommand(sCommand):
'''
Transfer command to catena and receive result.
It sends `sCommand` (followed by a new line) to the port. It then reads
up to 1k characters until a timeout occurs (which is one second). It
then tries to parse the normal catena response which ends either with
"\nOK\n" or "\n?<error>\n"
Args:
sCommand: catena command
Returns:
catena result if success; None and error message if fail.
'''
oAppContext.debug(">>> {}".format(sCommand))
if comPort.in_waiting != 0:
comPort.reset_input_buffer()
try:
comPort.write(sCommand.encode())
oAppContext.verbose("Command sent: {}".format(sCommand))
except Exception as err:
oAppContext.error("Can't write command {0} : {1}".format(
sCommand,
err)
)
return None
try:
result = comPort.read(1024)
sResult = result.decode()
comPort.reset_input_buffer()
except Exception as err:
oAppContext.error("Can't read command response : {}".format(err))
return None
if sResult:
debugMsg = '<<< ' + sResult.replace('\r', '')
oAppContext.debug(debugMsg)
sResult = '\n'.join(sResult.splitlines())
sResult = sResult + '\n'
# Parse the results
d= {'code': 'timed out', 'msg': None}
sResult = re.search(
r'^([\s\S]*)^\n([OK]*[\s\S]*)\n$',
sResult,
re.MULTILINE)
if sResult:
d['msg'] = sResult.group(1)
d['code'] = sResult.group(2)
else:
oAppContext.error("Error parsing catena response")
if 'OK' in d['code']:
return d['msg']
else:
return None, d['code'], d['msg']
def setechooff():
'''
To turn off the system echo
Args:
NA
Returns:
True; None if fails
'''
sEchoOffCommand = "system echo off\n"
sEcho = writecommand(sEchoOffCommand)
if type(sEcho) is tuple and sEcho[0] is None:
oAppContext.fatal("Can't turn off echo: {}".format(sEcho[1]))
else:
return True
def getversion():
'''
Get the identity of the attached device.
Args:
NA
Returns:
A dict containing the catena version info; None if fails
'''
sVersionCommand = "system version\n"
sVersion = writecommand(sVersionCommand)
if type(sVersion) is tuple and sVersion[0] is None:
dResult = {'Board': '?', 'Platform-Version': '?'}
return dResult
sVersion = re.sub(r'\n', '\n\r', sVersion, re.MULTILINE)
sVersionWrap = '\r' + sVersion + '\n'
oAppContext.verbose("sVersionWrap: {}".format(sVersionWrap))
sVersionWrap = re.findall(
r'\r(\S+): ([ \S]+)\n',
sVersionWrap,
re.MULTILINE
)
dResult = dict(sVersionWrap)
if ('Board' in dResult and 'Platform-Version' in dResult):
return dResult
else:
oAppContext.error("Unrecognized version response: {}".format(
sVersion)
)
return None
def getsyseui(fPermissive):
'''
Get the system EUI for the attached device.
The device is queried to get the system EUI, which is returned as a
16-character hex string.
Args:
fPermissive: boolean value
Returns:
A dict containing the system EUI info; None if error occurs
'''
sEuiCommand = "system configure syseui\n"
lenEui = 64 / 4
kLenEuiStr = int(lenEui + (lenEui / 2))
sEUI = writecommand(sEuiCommand)
if (type(sEUI) is tuple) and (sEUI[0] is None):
if not fPermissive:
oAppContext.error("Error getting syseui: {}".format(sEUI[1]))
else:
oAppContext.warning("Error getting syseui: {}".format(
sEUI[1])
)
return None
hexmatch = re.match(r'^(([0-9A-Fa-f]{2})-){7}([0-9A-Fa-f]{2})', sEUI)
if (len(sEUI) != kLenEuiStr) or hexmatch is None:
oAppContext.error("Unrecognized EUI response: {}".format(sEUI))
return None
else:
sEUI = re.sub(r'-', '', sEUI)
sEUI = sEUI.replace('\n', '')
return sEUI
def checkcomms(fPermissive):
'''
Try to recognize the attached device, and verify that comms are
working.
The device is queried to get the system EUI, which is returned as a
16-character hex string, as well as the firmware version.
${SYSEUI} (aka oAppContext.dVariables['SYSEUI']) is set to the fetched
syseui.
oAppContext.tVersion is set to the fetched version
Args:
fPermissive: boolean value
Returns:
A dict containing the information; True if success or False if fails
'''
oAppContext.debug("CheckComms")
tVersion = getversion()
if tVersion is not None:
sEUI = getsyseui(fPermissive)
else:
sEUI = None
if (tVersion is not None) and (sEUI is None) and fPermissive:
sEUI = '{syseui-not-set}'
if (tVersion is not None) and (sEUI is not None):
oAppContext.verbose(
"\n Catena Type: {0}\
\n Platform Version: {1}\n SysEUI: {2}"
.format(
tVersion['Board'],
tVersion['Platform-Version'],
sEUI
)
)
if oAppContext.fInfo:
oAppContext.verbose(
"\n Catena Type: {0}\
\n Platform Version: {1}\n SysEUI: {2}"
.format(
tVersion['Board'],
tVersion['Platform-Version'],
sEUI
)
)
oAppContext.dVariables['SYSEUI'] = sEUI.upper()
oAppContext.tVersion = tVersion
return True
elif (tVersion is not None) and (sEUI is None):
oAppContext.fatal("SysEUI not set")
return False
def writeheliumcommand(hCmd):
'''
Transer helium command and receive result.
This function sends `hCmd` to the helium cli, then reads the result. It
checks the return code number, if it is 0 send result or send error
message otherwise.
Args:
hCmd: helium command
Returns:
helium result if success, None if failure
'''
heliumcmd = hCmd
oAppContext.debug("HELIUM COMMAND: {}".format(' '.join(heliumcmd)))
sResult = subprocess.Popen(
heliumcmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
hResult = pexpect.fdpexpect.fdspawn(sResult.stdout.fileno())
try:
index = hResult.expect('Enter API key')
if index == 0:
flag = 0
msg = (hResult.before).decode()
print("Enter API key: ", end="\n")
opt = input()
opt = str(opt)
sResult.stdin.write(opt.encode())
heliumResult = sResult.communicate()
except Exception:
flag = 1
msg = (hResult.before).decode()
oAppContext.verbose("HELIUM RESULT:\n {}".format(msg))
heliumResult = sResult.communicate()
if (sResult.returncode == 0) and flag == 0:
return heliumResult[0].decode()
elif (sResult.returncode == 0) and flag == 1:
return msg
else:
return None
def heliumcomms(**dVarArgs):
'''
Send helium cli commands and receives information to config catena
This function checks for information in dict to send it to helium cli.
It then sends command and receives registered device info from helium.
Parse the results and store it in dict to use it later in script lines.
Args:
**dVarArgs: helium config info in dict
Returns:
True if success else None
'''
devInfo = {}
dAppeui = None
dDeveui = None
dAppKey = None
if ((not dVarArgs['SYSEUI']) or
('SYSEUI-NOT-SET' in dVarArgs['SYSEUI'])):
while True:
devEUI = input('Enter Device EUI: ')
if re.match(r'[0-9A-F]{16}', devEUI):
oAppContext.dVariables['SYSEUI'] = devEUI
break
else:
print('Invalid device EUI entered.')
if (not dVarArgs['APPEUI']):
while True:
appEUI = input('Enter App EUI: ')
if re.match(r'[0-9A-F]{16}', appEUI):
oAppContext.dVariables['APPEUI'] = appEUI
break
else:
print('Invalid application EUI entered.')
if (not dVarArgs['APPKEY']):
while True:
appKey = input('Enter App Key: ')
if re.match(r'[0-9A-F]{32}', appKey):
oAppContext.dVariables['APPKEY'] = appKey
break
else:
print("Invalid application key entered.")
if dVarArgs['BASENAME']:
devBaseName = dVarArgs['BASENAME'].replace('\n', '')
sysEUI = oAppContext.dVariables['SYSEUI'].lower()
sysEUI = sysEUI.replace('\n', '')
devBaseName = devBaseName + sysEUI.lower()
else:
oAppContext.fatal("Must specify devcie basename")
devRegisterCmdList = ['helium-console-cli', 'device', 'create']
devInfoCmdList = ['helium-console-cli', 'device', 'get']
devRegisterCmdList.append(oAppContext.dVariables['APPEUI'])
devRegisterCmdList.append(oAppContext.dVariables['APPKEY'])
devRegisterCmdList.append(oAppContext.dVariables['SYSEUI'])
devRegisterCmdList.append(devBaseName)
devInfoCmdList.append(oAppContext.dVariables['APPEUI'])
devInfoCmdList.append(oAppContext.dVariables['APPKEY'])
devInfoCmdList.append(oAppContext.dVariables['SYSEUI'])
devRegisterResult = writeheliumcommand(devRegisterCmdList)
if devRegisterResult is not None:
oAppContext.debug("HELIUM - Device Registered:\n {}".format(
devRegisterResult)
)
else:
oAppContext.fatal("Device Registration failed")
devInfoResult = writeheliumcommand(devInfoCmdList)
if devInfoResult is not None:
oAppContext.debug("HELIUM - Device Info:\n {}".format(devInfoResult))
else:
oAppContext.fatal("Getting Device Info failed")
regMatch = re.search(
r'([\s\S]*){\n([\s\S]*)}\n',
devInfoResult,
re.MULTILINE)
if not regMatch.group(2):
oAppContext.fatal("Error in Device Info")
else:
devInfoPacked = regMatch.group(2)
devInfoPacked = re.sub(' {2,}', '', devInfoPacked)
devInfoUnpack = re.findall(
r'(\S+): \"(\S+)\"\,\n',
devInfoPacked,
re.MULTILINE)
devInfo = dict(devInfoUnpack)
for k, v in devInfo.items():
if k.upper() == "APP_EUI":
dAppeui = v
if k.upper() == "DEV_EUI":
dDeveui = v
if k.upper() == "APP_KEY":
dAppKey = v
if not dAppeui:
oAppContext.fatal("APPEUI is none")
elif not dDeveui:
oAppContext.fatal("DEVEUI is none")
elif not dAppKey:
oAppContext.fatal("APPKEY is none")
else:
oAppContext.debug("APPEUI: {0}\nDEVEUI: {1}\nAPPKEY: {2}\n".format(
dAppeui,
dDeveui,
dAppKey)
)
if ((dAppeui == oAppContext.dVariables['APPEUI']) and
(dAppKey == oAppContext.dVariables['APPKEY']) and
(dDeveui == oAppContext.dVariables['SYSEUI'])):
oAppContext.dVariables['DEVEUI'] = dDeveui
return True
else:
return None
def expand(sLine):
'''
Perform macro expansion on a line of text
This function is looking for strings of the form "${name}" in sLine. If
${name} was written, and name was found in the dict, name's value is
used.
Args:
sLine: catena command line from cat file
Returns:
String suitably expanded
'''
sResult = re.search(r'^([a-z ]+)\$(\{.*\})$', sLine)
if not sResult:
return sLine
if sResult:
sPrefix = sResult.group(1)
sWord = re.search(r'\$\{(.*)\}', sLine)
sName = sWord.group(1)
if not sName in oAppContext.dVariables:
oAppContext.error("Unknown macro {}".format(sName))
sValue = '{' + sName + '}'
else:
sValue = oAppContext.dVariables[sName]
sResult = sPrefix + sValue
oAppContext.verbose("Expansion of {0}: {1}"
.format(sLine, sResult)
)
return sResult
def doscript(sFileName):
'''
Perform macro expansion on a line of text.
The file is opened and read line by line.
Blank lines are ignored. Any text after a '#' character is treated as a
comment and discarded. Variables of the form ${name} are expanded. Any
error causes the script to stop.
Args:
sFileName: script name
Returns:
True for script success, False for failure
'''
oAppContext.debug("DoScript: {}".format(sFileName))
try:
with open(sFileName, 'r') as rFile:
rFile = rFile.readlines()
except EnvironmentError as e:
oAppContext.error("Can't open file: {}".format(e))
return False
if not rFile:
oAppContext.error("Empty file")
return False
for line in rFile:
line = re.sub('\n$', '', line)
line = re.sub(r'^\s*#.*$', '', line)
line = expand(line)
if (re.sub(r'^\s*$', '', line) != ''):
if (oAppContext.fEcho):
sys.stdout.write(line + '\n')
if (oAppContext.fWriteEnable):
sResult = writecommand((re.sub('\n$', '', line)) + '\n')
if not (type(sResult) is tuple and sResult[0] is None):
continue
else:
oAppContext.error("Line: {0}\nError: \n{1}".format(
line,
sResult[1])
)
return False
return True
def closeport(sPortName):
'''
Close serial port
Args:
sPortName: serial port name
Returns:
True if closed or None otherwise
'''
if comPort.is_open:
comPort.reset_input_buffer()
comPort.reset_output_buffer()
comPort.close()
oAppContext.debug('Port {} closed'.format(sPortName))
return True
else:
oAppContext.error('Port {} already closed'.format(sPortName))
return None
##############################################################################
#
# main
#
##############################################################################
if __name__ == '__main__':
pName = os.path.basename(__file__)
pDir = os.path.dirname(os.path.abspath(__file__))
oAppContext = AppContext()
optparser = argparse.ArgumentParser(
description='MCCI Catena Provisioning')
optparser.add_argument(
'-baud',
action='store',
nargs='?',
dest='baudrate',
type=int,
help='Specify the baud rate as a number. Default is 115200')
optparser.add_argument(
'-port',
action='store',
nargs=1,
dest='portname',
type=str,
required=True,
help='Specify the COM port name. This is system specific')
optparser.add_argument(
'-D',
action='store_true',
default=False,
dest='debug',
help='Operate in debug mode. Causes more output to be produced')
optparser.add_argument(
'-info',
action='store_true',
default=False,
dest='info',
help='Display the Catena info')
optparser.add_argument(
'-v',
action='store_true',
default=False,
dest='verbose',
help='Operate in verbose mode')
optparser.add_argument(
'-echo',
action='store_true',
default=False,
dest='echo',
help='Echo all device operations')
optparser.add_argument(
'-V',
action='append',
dest='vars',
help='Specify ttn config info in name=value format')
optparser.add_argument(
'-nowrite',
action='store_false',
default=True,
dest='writeEnable',
help='Disable writes to the device')
optparser.add_argument(
'-permissive',
action='store_true',
default=False,
dest='permissive',
help='Don\'t give up if SYSEUI isn\'t set.')
optparser.add_argument(
'-r',
action='store_true',
default=False,
dest='register',
help='Registers the device in helium network')
optparser.add_argument(
'-Werror',
action='store_true',
default=False,
dest='warning',
help='Warning messages become error messages')
optparser.add_argument(
'-s',
action='store',
nargs=1,
dest='script',
type=str,
help='Specify script name to load catena info')
opt = optparser.parse_args()
if not opt.portname:
oAppContext.fatal("Must specify -port")
oAppContext.sPort = opt.portname[0]
if opt.baudrate and (opt.baudrate < 9600):
oAppContext.fatal("Baud rate too small: {}".format(opt.baudrate))
elif opt.baudrate and (opt.baudrate > 9600):
oAppContext.nBaudRate = opt.baudrate
# Serial port Settings
comPort = serial.Serial()
comPort.port = oAppContext.sPort
comPort.baudrate = oAppContext.nBaudRate
comPort.bytesize = serial.EIGHTBITS
comPort.parity = serial.PARITY_NONE
comPort.stopbits = serial.STOPBITS_ONE
# comPort.dsrdtr = True
# comPort.rtscts = True
comPort.timeout = 1
# Add validate and split -V args
if opt.vars:
varCount = len(opt.vars)
else:
varCount = 0
for i in range(varCount):
mResult = re.search(r'^([A-Za-z0-9_]+)=(.*)$', opt.vars[i])
if not mResult:
oAppContext.fatal("Illegal variable specification: {}".format(
opt.vars[i])
)
else:
oAppContext.dVariables[mResult.group(1)] = mResult.group(2)
# Copy the boolean params
oAppContext.fDebug = opt.debug
oAppContext.fVerbose = opt.verbose
oAppContext.fWerror = opt.warning
oAppContext.fEcho = opt.echo
oAppContext.fWriteEnable = opt.writeEnable
oAppContext.fInfo = opt.info
oAppContext.fPermissive = opt.permissive
oAppContext.fRegister = opt.register
hPort = openport(oAppContext.sPort)
if not hPort:
sys.exit(1)
# Turn off echo, before start provisioning
setechooff()
checkcomms(oAppContext.fPermissive)
listDirContent = os.listdir(pDir)
heliumCli = [True for dirfile in listDirContent if dirfile == 'helium-console-cli' or dirfile == 'helium-console-cli.exe']
if not heliumCli:
oAppContext.fatal("helium cli not found; add to path: {}".format(
pDir)
)
if oAppContext.fRegister:
heliumcommResult = heliumcomms(**oAppContext.dVariables)
if heliumcommResult:
oAppContext.debug("Device Created Successfully")
else:
oAppContext.fatal("Failed to create device")
oAppContext.verbose("Vars Dict:\n {}".format(oAppContext.dVariables))
if opt.script:
doscript(opt.script[0])
cResult = closeport(oAppContext.sPort)
if not cResult:
oAppContext.error("Can't close port {}".format(oAppContext.sPort))
oAppContext.exitchecks()
|
the-stack_106_22377 | import nipype.pipeline.engine as pe
import nipype.interfaces.utility as util
import nipype.interfaces.fsl as fsl
import nipype.interfaces.c3 as c3
def create_nonlinear_register(name='nonlinear_register'):
"""
Performs non-linear registration of an input file to a reference file.
Parameters
----------
name : string, optional
Name of the workflow.
Returns
-------
nonlinear_register : nipype.pipeline.engine.Workflow
Notes
-----
Workflow Inputs::
inputspec.input_brain : string (nifti file)
File of brain to be normalized (registered)
inputspec.input_skull : string (nifti file)
File of input brain with skull
inputspec.reference_brain : string (nifti file)
Target brain file to normalize to
inputspec.reference_skull : string (nifti file)
Target brain with skull to normalize to
inputspec.fnirt_config : string (fsl fnirt config file)
Configuration file containing parameters that can be specified in fnirt
Workflow Outputs::
outputspec.output_brain : string (nifti file)
Normalizion of input brain file
outputspec.linear_xfm : string (.mat file)
Affine matrix of linear transformation of brain file
outputspec.invlinear_xfm : string
Inverse of affine matrix of linear transformation of brain file
outputspec.nonlinear_xfm : string
Nonlinear field coefficients file of nonlinear transformation
Registration Procedure:
1. Perform a linear registration to get affine transformation matrix.
2. Perform a nonlinear registration on an input file to the reference file utilizing affine
transformation from the previous step as a starting point.
3. Invert the affine transformation to provide the user a transformation (affine only) from the
space of the reference file to the input file.
Workflow Graph:
.. image:: ../images/nonlinear_register.dot.png
:width: 500
Detailed Workflow Graph:
.. image:: ../images/nonlinear_register_detailed.dot.png
:width: 500
"""
nonlinear_register = pe.Workflow(name=name)
inputspec = pe.Node(util.IdentityInterface(fields=['input_brain',
'input_skull',
'reference_brain',
'reference_skull',
'ref_mask',
'fnirt_config']),
name='inputspec')
outputspec = pe.Node(util.IdentityInterface(fields=['output_brain',
'linear_xfm',
'invlinear_xfm',
'nonlinear_xfm']),
name='outputspec')
linear_reg = pe.Node(interface=fsl.FLIRT(), name='linear_reg_0')
linear_reg.inputs.cost = 'corratio'
nonlinear_reg = pe.Node(interface=fsl.FNIRT(),
name='nonlinear_reg_1')
nonlinear_reg.inputs.fieldcoeff_file = True
nonlinear_reg.inputs.jacobian_file = True
brain_warp = pe.Node(interface=fsl.ApplyWarp(),
name='brain_warp')
inv_flirt_xfm = pe.Node(interface=fsl.utils.ConvertXFM(),
name='inv_linear_reg0_xfm')
inv_flirt_xfm.inputs.invert_xfm = True
nonlinear_register.connect(inputspec, 'input_brain',
linear_reg, 'in_file')
nonlinear_register.connect(inputspec, 'reference_brain',
linear_reg, 'reference')
nonlinear_register.connect(inputspec, 'input_skull',
nonlinear_reg, 'in_file')
nonlinear_register.connect(inputspec, 'reference_skull',
nonlinear_reg, 'ref_file')
nonlinear_register.connect(inputspec, 'ref_mask',
nonlinear_reg, 'refmask_file')
# FNIRT parameters are specified by FSL config file
# ${FSLDIR}/etc/flirtsch/TI_2_MNI152_2mm.cnf (or user-specified)
nonlinear_register.connect(inputspec, 'fnirt_config',
nonlinear_reg, 'config_file')
nonlinear_register.connect(linear_reg, 'out_matrix_file',
nonlinear_reg, 'affine_file')
nonlinear_register.connect(nonlinear_reg, 'fieldcoeff_file',
outputspec, 'nonlinear_xfm')
nonlinear_register.connect(inputspec, 'input_brain',
brain_warp, 'in_file')
nonlinear_register.connect(nonlinear_reg, 'fieldcoeff_file',
brain_warp, 'field_file')
nonlinear_register.connect(inputspec, 'reference_brain',
brain_warp, 'ref_file')
nonlinear_register.connect(brain_warp, 'out_file',
outputspec, 'output_brain')
nonlinear_register.connect(linear_reg, 'out_matrix_file',
inv_flirt_xfm, 'in_file')
nonlinear_register.connect(inv_flirt_xfm, 'out_file',
outputspec, 'invlinear_xfm')
nonlinear_register.connect(linear_reg, 'out_matrix_file',
outputspec, 'linear_xfm')
return nonlinear_register
def create_register_func_to_mni(name='register_func_to_mni'):
"""
Registers a functional scan in native space to MNI standard space. This is meant to be used
after create_nonlinear_register() has been run and relies on some of it's outputs.
Parameters
----------
name : string, optional
Name of the workflow.
Returns
-------
register_func_to_mni : nipype.pipeline.engine.Workflow
Notes
-----
Workflow Inputs::
inputspec.func : string (nifti file)
Input functional scan to be registered to MNI space
inputspec.mni : string (nifti file)
Reference MNI file
inputspec.anat : string (nifti file)
Corresponding anatomical scan of subject
inputspec.interp : string
Type of interpolation to use ('trilinear' or 'nearestneighbour' or 'sinc')
inputspec.anat_to_mni_nonlinear_xfm : string (warp file)
Corresponding anatomical native space to MNI warp file
inputspec.anat_to_mni_linear_xfm : string (mat file)
Corresponding anatomical native space to MNI mat file
Workflow Outputs::
outputspec.func_to_anat_linear_xfm : string (mat file)
Affine transformation from functional to anatomical native space
outputspec.func_to_mni_linear_xfm : string (mat file)
Affine transformation from functional to MNI space
outputspec.mni_to_func_linear_xfm : string (mat file)
Affine transformation from MNI to functional space
outputspec.mni_func : string (nifti file)
Functional scan registered to MNI standard space
Workflow Graph:
.. image:: ../images/register_func_to_mni.dot.png
:width: 500
Detailed Workflow Graph:
.. image:: ../images/register_func_to_mni_detailed.dot.png
:width: 500
"""
register_func_to_mni = pe.Workflow(name=name)
inputspec = pe.Node(util.IdentityInterface(fields=['func',
'mni',
'anat',
'interp',
'anat_to_mni_nonlinear_xfm',
'anat_to_mni_linear_xfm']),
name='inputspec')
outputspec = pe.Node(util.IdentityInterface(fields=['func_to_anat_linear_xfm',
'func_to_mni_linear_xfm',
'mni_to_func_linear_xfm',
'mni_func']),
name='outputspec')
linear_reg = pe.Node(interface=fsl.FLIRT(),
name='linear_func_to_anat')
linear_reg.inputs.cost = 'corratio'
linear_reg.inputs.dof = 6
mni_warp = pe.Node(interface=fsl.ApplyWarp(),
name='mni_warp')
mni_affine = pe.Node(interface=fsl.ConvertXFM(),
name='mni_affine')
mni_affine.inputs.concat_xfm = True
register_func_to_mni.connect(linear_reg, 'out_matrix_file',
mni_affine, 'in_file2')
register_func_to_mni.connect(inputspec, 'anat_to_mni_linear_xfm',
mni_affine, 'in_file')
register_func_to_mni.connect(mni_affine, 'out_file',
outputspec, 'func_to_mni_linear_xfm')
inv_mni_affine = pe.Node(interface=fsl.ConvertXFM(),
name='inv_mni_affine')
inv_mni_affine.inputs.invert_xfm = True
register_func_to_mni.connect(mni_affine, 'out_file',
inv_mni_affine, 'in_file')
register_func_to_mni.connect(inv_mni_affine, 'out_file',
outputspec, 'mni_to_func_linear_xfm')
register_func_to_mni.connect(inputspec, 'func',
linear_reg, 'in_file')
register_func_to_mni.connect(inputspec, 'anat',
linear_reg, 'reference')
register_func_to_mni.connect(inputspec, 'interp',
linear_reg, 'interp')
register_func_to_mni.connect(inputspec, 'func',
mni_warp, 'in_file')
register_func_to_mni.connect(inputspec, 'mni',
mni_warp, 'ref_file')
register_func_to_mni.connect(inputspec, 'anat_to_mni_nonlinear_xfm',
mni_warp, 'field_file')
register_func_to_mni.connect(linear_reg, 'out_matrix_file',
mni_warp, 'premat')
register_func_to_mni.connect(linear_reg, 'out_matrix_file',
outputspec, 'func_to_anat_linear_xfm')
register_func_to_mni.connect(mni_warp, 'out_file',
outputspec, 'mni_func')
return register_func_to_mni
def create_register_func_to_anat(fieldmap_distortion=False,
name='register_func_to_anat'):
"""
Registers a functional scan in native space to anatomical space using a
linear transform and does not include bbregister.
Parameters
----------
fieldmap_distortion : bool, optional
If field map-based distortion correction is being run, FLIRT should
take in the appropriate field map-related inputs.
name : string, optional
Name of the workflow.
Returns
-------
create_register_func_to_anat : nipype.pipeline.engine.Workflow
Notes
-----
Workflow Inputs::
inputspec.func : string (nifti file)
Input functional scan to be registered to anatomical space
inputspec.anat : string (nifti file)
Corresponding anatomical scan of subject
inputspec.interp : string
Type of interpolation to use ('trilinear' or 'nearestneighbour' or 'sinc')
Workflow Outputs::
outputspec.func_to_anat_linear_xfm_nobbreg : string (mat file)
Affine transformation from functional to anatomical native space
outputspec.anat_func_nobbreg : string (nifti file)
Functional scan registered to anatomical space
"""
register_func_to_anat = pe.Workflow(name=name)
inputspec = pe.Node(util.IdentityInterface(fields=['func',
'anat',
'interp',
'fieldmap',
'fieldmapmask']),
name='inputspec')
inputNode_echospacing = pe.Node(
util.IdentityInterface(fields=['echospacing']),
name='echospacing_input')
inputNode_pedir = pe.Node(util.IdentityInterface(fields=['pedir']),
name='pedir_input')
outputspec = pe.Node(util.IdentityInterface(fields=['func_to_anat_linear_xfm_nobbreg',
'anat_func_nobbreg']),
name='outputspec')
linear_reg = pe.Node(interface=fsl.FLIRT(),
name='linear_func_to_anat')
linear_reg.inputs.cost = 'corratio'
linear_reg.inputs.dof = 6
if fieldmap_distortion:
register_func_to_anat.connect(inputNode_pedir, 'pedir',
linear_reg, 'pedir')
register_func_to_anat.connect(inputspec, 'fieldmap',
linear_reg, 'fieldmap')
register_func_to_anat.connect(inputspec, 'fieldmapmask',
linear_reg, 'fieldmapmask')
register_func_to_anat.connect(inputNode_echospacing, 'echospacing',
linear_reg, 'echospacing')
register_func_to_anat.connect(inputspec, 'func', linear_reg, 'in_file')
register_func_to_anat.connect(inputspec, 'anat', linear_reg, 'reference')
register_func_to_anat.connect(inputspec, 'interp', linear_reg, 'interp')
register_func_to_anat.connect(linear_reg, 'out_matrix_file',
outputspec,
'func_to_anat_linear_xfm_nobbreg')
register_func_to_anat.connect(linear_reg, 'out_file',
outputspec, 'anat_func_nobbreg')
return register_func_to_anat
def create_bbregister_func_to_anat(fieldmap_distortion=False,
name='bbregister_func_to_anat'):
"""
Registers a functional scan in native space to structural. This is meant to be used
after create_nonlinear_register() has been run and relies on some of it's outputs.
Parameters
----------
fieldmap_distortion : bool, optional
If field map-based distortion correction is being run, FLIRT should
take in the appropriate field map-related inputs.
name : string, optional
Name of the workflow.
Returns
-------
register_func_to_anat : nipype.pipeline.engine.Workflow
Notes
-----
Workflow Inputs::
inputspec.func : string (nifti file)
Input functional scan to be registered to MNI space
inputspec.anat_skull : string (nifti file)
Corresponding full-head scan of subject
inputspec.linear_reg_matrix : string (mat file)
Affine matrix from linear functional to anatomical registration
inputspec.anat_wm_segmentation : string (nifti file)
White matter segmentation probability mask in anatomical space
inputspec.bbr_schedule : string (.sch file)
Boundary based registration schedule file for flirt command
Workflow Outputs::
outputspec.func_to_anat_linear_xfm : string (mat file)
Affine transformation from functional to anatomical native space
outputspec.anat_func : string (nifti file)
Functional data in anatomical space
"""
register_bbregister_func_to_anat = pe.Workflow(name=name)
inputspec = pe.Node(util.IdentityInterface(fields=['func',
'anat_skull',
'linear_reg_matrix',
'anat_wm_segmentation',
'bbr_schedule',
'fieldmap',
'fieldmapmask'
]),
name='inputspec')
inputNode_echospacing = pe.Node(
util.IdentityInterface(fields=['echospacing']),
name='echospacing_input')
inputNode_pedir = pe.Node(util.IdentityInterface(fields=['pedir']),
name='pedir_input')
outputspec = pe.Node(util.IdentityInterface(fields=['func_to_anat_linear_xfm',
'anat_func']),
name='outputspec')
wm_bb_mask = pe.Node(interface=fsl.ImageMaths(),
name='wm_bb_mask')
wm_bb_mask.inputs.op_string = '-thr 0.5 -bin'
register_bbregister_func_to_anat.connect(inputspec, 'anat_wm_segmentation',
wm_bb_mask, 'in_file')
def bbreg_args(bbreg_target):
return '-cost bbr -wmseg ' + bbreg_target
bbreg_func_to_anat = pe.Node(interface=fsl.FLIRT(),
name='bbreg_func_to_anat')
bbreg_func_to_anat.inputs.dof = 6
register_bbregister_func_to_anat.connect(inputspec, 'bbr_schedule',
bbreg_func_to_anat, 'schedule')
register_bbregister_func_to_anat.connect(wm_bb_mask, ('out_file', bbreg_args),
bbreg_func_to_anat, 'args')
register_bbregister_func_to_anat.connect(inputspec, 'func',
bbreg_func_to_anat, 'in_file')
register_bbregister_func_to_anat.connect(inputspec, 'anat_skull',
bbreg_func_to_anat, 'reference')
register_bbregister_func_to_anat.connect(inputspec, 'linear_reg_matrix',
bbreg_func_to_anat, 'in_matrix_file')
if fieldmap_distortion:
def convert_pedir(pedir):
# FSL Flirt requires pedir input encoded as an int
conv_dct = {'x': 1, 'y': 2, 'z': 3, '-x': -1, '-y': -2, '-z': -3}
if not isinstance(pedir, str):
raise Exception("\n\nPhase-encoding direction must be a "
"string value.\n\n")
if pedir not in conv_dct.keys():
raise Exception("\n\nInvalid phase-encoding direction "
"entered: {0}\n\n".format(pedir))
return conv_dct[pedir]
register_bbregister_func_to_anat.connect(inputNode_pedir, ('pedir', convert_pedir),
bbreg_func_to_anat, 'pedir')
register_bbregister_func_to_anat.connect(inputspec, 'fieldmap',
bbreg_func_to_anat, 'fieldmap')
register_bbregister_func_to_anat.connect(inputspec, 'fieldmapmask',
bbreg_func_to_anat, 'fieldmapmask')
register_bbregister_func_to_anat.connect(inputNode_echospacing, 'echospacing',
bbreg_func_to_anat, 'echospacing')
register_bbregister_func_to_anat.connect(bbreg_func_to_anat, 'out_matrix_file',
outputspec, 'func_to_anat_linear_xfm')
register_bbregister_func_to_anat.connect(bbreg_func_to_anat, 'out_file',
outputspec, 'anat_func')
return register_bbregister_func_to_anat
def create_wf_calculate_ants_warp(name='create_wf_calculate_ants_warp', mult_input=0, num_threads=1):
'''
Calculates the nonlinear ANTS registration transform. This workflow
employs the antsRegistration tool:
http://stnava.github.io/ANTs/
Parameters
----------
name : string, optional
Name of the workflow.
Returns
-------
calc_ants_warp_wf : nipype.pipeline.engine.Workflow
Notes
-----
Some of the inputs listed below are lists or lists of lists. This is
because antsRegistration can perform multiple stages of calculations
depending on how the user configures their registration.
For example, if one wants to employ a different metric (with different
parameters) at each stage, the lists would be configured like this:
warp_wf.inputs.inputspec.transforms = ['Rigid','Affine','SyN']
warp_wf.inputs.inputspec.transform_parameters = [[0.1],[0.1],[0.1,3,0]]
..where each element in the first list is a metric to be used at each
stage, 'Rigid' being for stage 1, 'Affine' for stage 2, etc. The lists
within the list for transform_parameters would then correspond to each
stage's metric, with [0.1] applying to 'Rigid' and 'Affine' (stages 1 and
2), and [0.1,3,0] applying to 'SyN' of stage 3.
In some cases, when a parameter is not needed for a stage, 'None' must be
entered in its place if there are other parameters for other stages.
Workflow Inputs::
inputspec.anatomical_brain : string (nifti file)
File of brain to be normalized (registered)
inputspec.reference_brain : string (nifti file)
Target brain file to normalize to
inputspec.dimension : integer
Dimension of the image (default: 3)
inputspec.use_histogram_matching : boolean
Histogram match the images before registration
inputspec.winsorize_lower_quantile : float
Winsorize data based on quantiles (lower range)
inputspec.winsorize_higher_quantile : float
Winsorize data based on quantiles (higher range)
inputspec.metric : list of strings
Image metric(s) to be used at each stage
inputspec.metric_weight : list of floats
Modulate the per-stage weighting of the corresponding metric
inputspec.radius_or_number_of_bins : list of integers
Number of bins in each stage for the MI and Mattes metric, the
radius for other metrics
inputspec.sampling_strategy : list of strings
Sampling strategy (or strategies) to use for the metrics
{None, Regular, or Random}
inputspec.sampling_percentage : list of floats
Defines the sampling strategy
{float value, or None}
inputspec.number_of_iterations : list of lists of integers
Determines the convergence
inputspec.convergence_threshold : list of floats
Threshold compared to the slope of the line fitted in convergence
inputspec.convergence_window_size : list of integers
Window size of convergence calculations
inputspec.transforms : list of strings
Selection of transform options. See antsRegistration documentation
for a full list of options and their descriptions
inputspec.transform_parameters : list of lists of floats
Fine-tuning for the different transform options
inputspec.shrink_factors : list of lists of integers
Specify the shrink factor for the virtual domain (typically the
fixed image) at each level
inputspec.smoothing_sigmas : list of lists of floats
Specify the sigma of gaussian smoothing at each level
Workflow Outputs::
outputspec.warp_field : string (nifti file)
Output warp field of registration
outputspec.inverse_warp_field : string (nifti file)
Inverse of the warp field of the registration
outputspec.ants_affine_xfm : string (.mat file)
The affine matrix of the registration
outputspec.ants_inverse_affine_xfm : string (.mat file)
The affine matrix of the reverse registration
outputspec.composite_transform : string (nifti file)
The combined transform including the warp field and rigid & affine
linear warps
outputspec.normalized_output_brain : string (nifti file)
Template-registered version of input brain
Registration Procedure:
1. Calculates a nonlinear anatomical-to-template registration.
Workflow Graph:
.. image::
:width: 500
Detailed Workflow Graph:
.. image::
:width: 500
'''
import nipype.interfaces.ants as ants
from nipype.interfaces.utility import Function
from CPAC.registration.utils import seperate_warps_list, \
combine_inputs_into_list, \
hardcoded_reg
calc_ants_warp_wf = pe.Workflow(name=name)
inputspec = pe.Node(util.IdentityInterface(fields=['anatomical_brain',
'reference_brain', 'dimension', 'use_histogram_matching',
'winsorize_lower_quantile', 'winsorize_upper_quantile', 'metric',
'metric_weight', 'radius_or_number_of_bins', 'sampling_strategy',
'sampling_percentage', 'number_of_iterations',
'convergence_threshold', 'convergence_window_size', 'transforms',
'transform_parameters', 'shrink_factors', 'smoothing_sigmas',
'write_composite_transform', 'anatomical_skull',
'reference_skull']), name='inputspec')
# use ANTS to warp the masked anatomical image to a template image
'''
calculate_ants_warp = pe.Node(interface=ants.Registration(),
name='calculate_ants_warp')
calculate_ants_warp.inputs.output_warped_image = True
calculate_ants_warp.inputs.initial_moving_transform_com = 0
'''
reg_imports = ['import os', 'import subprocess']
calculate_ants_warp = \
pe.Node(interface=util.Function(input_names=['anatomical_brain',
'reference_brain',
'anatomical_skull',
'reference_skull'],
output_names=['warp_list',
'warped_image'],
function=hardcoded_reg,
imports=reg_imports),
name='calc_ants_warp')
calculate_ants_warp.interface.num_threads = num_threads
select_forward_initial = pe.Node(util.Function(input_names=['warp_list',
'selection'], output_names=['selected_warp'],
function=seperate_warps_list), name='select_forward_initial')
select_forward_initial.inputs.selection = "Initial"
select_forward_rigid = pe.Node(util.Function(input_names=['warp_list',
'selection'], output_names=['selected_warp'],
function=seperate_warps_list), name='select_forward_rigid')
select_forward_rigid.inputs.selection = "Rigid"
select_forward_affine = pe.Node(util.Function(input_names=['warp_list',
'selection'], output_names=['selected_warp'],
function=seperate_warps_list), name='select_forward_affine')
select_forward_affine.inputs.selection = "Affine"
select_forward_warp = pe.Node(util.Function(input_names=['warp_list',
'selection'], output_names=['selected_warp'],
function=seperate_warps_list), name='select_forward_warp')
select_forward_warp.inputs.selection = "3Warp"
select_inverse_warp = pe.Node(util.Function(input_names=['warp_list',
'selection'], output_names=['selected_warp'],
function=seperate_warps_list), name='select_inverse_warp')
select_inverse_warp.inputs.selection = "Inverse"
outputspec = pe.Node(util.IdentityInterface(fields=['ants_initial_xfm',
'ants_rigid_xfm', 'ants_affine_xfm', 'warp_field',
'inverse_warp_field', 'composite_transform', 'wait',
'normalized_output_brain']), name='outputspec')
# connections from inputspec
if mult_input == 1:
'''
combine_inputs = pe.Node(util.Function(input_names=['input1', 'input2', 'input3'],
output_names=['inputs_list'], function=combine_inputs_into_list),
name='ants_reg_combine_inputs')
combine_refs = pe.Node(util.Function(input_names=['input1', 'input2', 'input3'],
output_names=['inputs_list'], function=combine_inputs_into_list),
name='ants_reg_combine_refs')
'''
calc_ants_warp_wf.connect(inputspec, 'anatomical_brain',
calculate_ants_warp, 'anatomical_brain')
calc_ants_warp_wf.connect(inputspec, 'anatomical_skull',
calculate_ants_warp, 'anatomical_skull')
calc_ants_warp_wf.connect(inputspec, 'reference_brain',
calculate_ants_warp, 'reference_brain')
calc_ants_warp_wf.connect(inputspec, 'reference_skull',
calculate_ants_warp, 'reference_skull')
'''
calc_ants_warp_wf.connect(inputspec, 'anatomical_brain',
combine_inputs, 'input1')
calc_ants_warp_wf.connect(inputspec, 'anatomical_brain',
combine_inputs, 'input2')
calc_ants_warp_wf.connect(inputspec, 'anatomical_skull',
combine_inputs, 'input3')
calc_ants_warp_wf.connect(combine_inputs, 'inputs_list',
calculate_ants_warp, 'moving_image')
calc_ants_warp_wf.connect(inputspec, 'reference_brain',
combine_refs, 'input1')
calc_ants_warp_wf.connect(inputspec, 'reference_brain',
combine_refs, 'input2')
calc_ants_warp_wf.connect(inputspec, 'reference_skull',
combine_refs, 'input3')
calc_ants_warp_wf.connect(combine_refs, 'inputs_list',
calculate_ants_warp, 'fixed_image')
'''
else:
'''
calc_ants_warp_wf.connect(inputspec, 'anatomical_brain',
calculate_ants_warp, 'moving_image')
calc_ants_warp_wf.connect(inputspec, 'reference_brain',
calculate_ants_warp, 'fixed_image')
'''
calc_ants_warp_wf.connect(inputspec, 'anatomical_brain',
calculate_ants_warp, 'anatomical_brain')
calc_ants_warp_wf.connect(inputspec, 'anatomical_brain',
calculate_ants_warp, 'anatomical_skull')
calc_ants_warp_wf.connect(inputspec, 'reference_brain',
calculate_ants_warp, 'reference_brain')
calc_ants_warp_wf.connect(inputspec, 'reference_brain',
calculate_ants_warp, 'reference_skull')
calc_ants_warp_wf.connect(inputspec, 'dimension', calculate_ants_warp,
'dimension')
calc_ants_warp_wf.connect(inputspec, 'use_histogram_matching',
calculate_ants_warp, 'use_histogram_matching')
calc_ants_warp_wf.connect(inputspec, 'winsorize_lower_quantile',
calculate_ants_warp, 'winsorize_lower_quantile')
calc_ants_warp_wf.connect(inputspec, 'winsorize_upper_quantile',
calculate_ants_warp, 'winsorize_upper_quantile')
calc_ants_warp_wf.connect(inputspec, 'metric', calculate_ants_warp,
'metric')
calc_ants_warp_wf.connect(inputspec, 'metric_weight', calculate_ants_warp,
'metric_weight')
calc_ants_warp_wf.connect(inputspec, 'radius_or_number_of_bins',
calculate_ants_warp, 'radius_or_number_of_bins')
calc_ants_warp_wf.connect(inputspec, 'sampling_strategy',
calculate_ants_warp, 'sampling_strategy')
calc_ants_warp_wf.connect(inputspec, 'sampling_percentage',
calculate_ants_warp, 'sampling_percentage')
calc_ants_warp_wf.connect(inputspec, 'number_of_iterations',
calculate_ants_warp, 'number_of_iterations')
calc_ants_warp_wf.connect(inputspec, 'convergence_threshold',
calculate_ants_warp, 'convergence_threshold')
calc_ants_warp_wf.connect(inputspec, 'convergence_window_size',
calculate_ants_warp, 'convergence_window_size')
calc_ants_warp_wf.connect(inputspec, 'transforms', calculate_ants_warp,
'transforms')
calc_ants_warp_wf.connect(inputspec, 'transform_parameters',
calculate_ants_warp, 'transform_parameters')
calc_ants_warp_wf.connect(inputspec, 'shrink_factors',
calculate_ants_warp, 'shrink_factors')
calc_ants_warp_wf.connect(inputspec, 'smoothing_sigmas',
calculate_ants_warp, 'smoothing_sigmas')
calc_ants_warp_wf.connect(inputspec, 'write_composite_transform',
calculate_ants_warp, 'write_composite_transform')
# inter-workflow connections
calc_ants_warp_wf.connect(calculate_ants_warp, 'warp_list',
select_forward_initial, 'warp_list')
calc_ants_warp_wf.connect(calculate_ants_warp, 'warp_list',
select_forward_rigid, 'warp_list')
calc_ants_warp_wf.connect(calculate_ants_warp, 'warp_list',
select_forward_affine, 'warp_list')
calc_ants_warp_wf.connect(calculate_ants_warp, 'warp_list',
select_forward_warp, 'warp_list')
calc_ants_warp_wf.connect(calculate_ants_warp, 'warp_list',
select_inverse_warp, 'warp_list')
# connections to outputspec
calc_ants_warp_wf.connect(select_forward_initial, 'selected_warp',
outputspec, 'ants_initial_xfm')
calc_ants_warp_wf.connect(select_forward_rigid, 'selected_warp',
outputspec, 'ants_rigid_xfm')
calc_ants_warp_wf.connect(select_forward_affine, 'selected_warp',
outputspec, 'ants_affine_xfm')
calc_ants_warp_wf.connect(select_forward_warp, 'selected_warp',
outputspec, 'warp_field')
calc_ants_warp_wf.connect(select_inverse_warp, 'selected_warp',
outputspec, 'inverse_warp_field')
calc_ants_warp_wf.connect(calculate_ants_warp, 'warped_image',
outputspec, 'normalized_output_brain')
return calc_ants_warp_wf
def create_wf_apply_ants_warp(map_node=False,
name='create_wf_apply_ants_warp',
ants_threads=1):
"""
Applies previously calculated ANTS registration transforms to input
images. This workflow employs the antsApplyTransforms tool:
http://stnava.github.io/ANTs/
Parameters
----------
name : string, optional
Name of the workflow.
Returns
-------
apply_ants_warp_wf : nipype.pipeline.engine.Workflow
Notes
-----
Workflow Inputs::
inputspec.input_image : string (nifti file)
Image file of brain to be registered to reference
inputspec.reference_image : string (nifti file)
Image file of brain or template being used as a reference
inputspec.transforms : list of filepaths (nifti, .mat, .txt)
List of transforms and warps to be applied to the input image
inputspec.dimension : integer
Dimension value of image being registered (2, 3, or 4)
inputspec.interpolation : string
Type of interpolation to be used. See antsApplyTransforms
documentation or Nipype interface documentation for options
Workflow Outputs::
outputspec.output_image : string (nifti file)
Normalized output file
Workflow Graph:
.. image::
:width: 500
Detailed Workflow Graph:
.. image::
:width: 500
"""
import nipype.interfaces.ants as ants
apply_ants_warp_wf = pe.Workflow(name=name)
inputspec = pe.Node(util.IdentityInterface(fields=['input_image',
'reference_image',
'transforms',
'dimension',
'input_image_type',
'interpolation']),
name='inputspec')
if map_node:
apply_ants_warp = pe.MapNode(interface=ants.ApplyTransforms(),
name='apply_ants_warp_mapnode',
iterfield=['input_image', 'transforms'])
else:
apply_ants_warp = pe.Node(interface=ants.ApplyTransforms(),
name='apply_ants_warp')
apply_ants_warp.inputs.out_postfix = '_antswarp'
apply_ants_warp.interface.num_threads = ants_threads
apply_ants_warp.interface.estimated_memory_gb = 1.5
outputspec = pe.Node(util.IdentityInterface(fields=['output_image']),
name='outputspec')
# connections from inputspec
apply_ants_warp_wf.connect(inputspec, 'input_image', apply_ants_warp,
'input_image')
apply_ants_warp_wf.connect(inputspec, 'reference_image', apply_ants_warp,
'reference_image')
apply_ants_warp_wf.connect(inputspec, 'transforms', apply_ants_warp,
'transforms')
apply_ants_warp_wf.connect(inputspec, 'dimension', apply_ants_warp,
'dimension')
apply_ants_warp_wf.connect(inputspec, 'input_image_type', apply_ants_warp,
'input_image_type')
apply_ants_warp_wf.connect(inputspec, 'interpolation', apply_ants_warp,
'interpolation')
# connections to outputspec
apply_ants_warp_wf.connect(apply_ants_warp, 'output_image',
outputspec, 'output_image')
return apply_ants_warp_wf
def create_wf_c3d_fsl_to_itk(input_image_type=0, map_node=False,
name='create_wf_c3d_fsl_to_itk'):
"""
Converts an FSL-format output matrix to an ITK-format (ANTS) matrix
for use with ANTS registration tools.
Parameters
----------
name : string, optional
Name of the workflow.
Returns
-------
fsl_to_itk_conversion : nipype.pipeline.engine.Workflow
Notes
-----
Workflow Inputs::
inputspec.affine_file : string (nifti file)
Output matrix of FSL-based functional to anatomical registration
inputspec.reference_file : string (nifti file)
File of skull-stripped anatomical brain to be used in affine
conversion
inputspec.source_file : string (nifti file)
Should match the input of the apply warp (in_file) unless you are
applying the warp to a 4-d file, in which case this file should
be a mean_functional file
Workflow Outputs::
outputspec.itk_transform : string (nifti file)
Converted affine transform in ITK format usable with ANTS
"""
import nipype.interfaces.c3 as c3
from nipype.interfaces.utility import Function
from CPAC.registration.utils import change_itk_transform_type
from nipype.interfaces.afni import preprocess
fsl_to_itk_conversion = pe.Workflow(name=name)
itk_imports = ['import os']
inputspec = pe.Node(util.IdentityInterface(fields=['affine_file',
'reference_file',
'source_file']),
name='inputspec')
# converts FSL-format .mat affine xfm into ANTS-format .txt
# .mat affine comes from Func->Anat registration
if map_node:
fsl_reg_2_itk = pe.MapNode(c3.C3dAffineTool(),
name='fsl_reg_2_itk_mapnode',
iterfield=['source_file'])
change_transform = pe.MapNode(util.Function(
input_names=['input_affine_file'],
output_names=['updated_affine_file'],
function=change_itk_transform_type,
imports=itk_imports),
name='change_transform_type',
iterfield=['input_affine_file'])
else:
fsl_reg_2_itk = pe.Node(c3.C3dAffineTool(), name='fsl_reg_2_itk')
change_transform = pe.Node(util.Function(
input_names=['input_affine_file'],
output_names=['updated_affine_file'],
function=change_itk_transform_type,
imports=itk_imports),
name='change_transform_type')
fsl_reg_2_itk.inputs.itk_transform = True
fsl_reg_2_itk.inputs.fsl2ras = True
outputspec = pe.Node(util.IdentityInterface(fields=['itk_transform']),
name='outputspec')
fsl_to_itk_conversion.connect(inputspec, 'affine_file', fsl_reg_2_itk,
'transform_file')
fsl_to_itk_conversion.connect(inputspec, 'reference_file', fsl_reg_2_itk,
'reference_file')
# source_file input of the conversion must be a 3D file, so if the source
# file is 4D (input_image_type=3), average it into a 3D file first
if input_image_type == 0:
fsl_to_itk_conversion.connect(inputspec, 'source_file', fsl_reg_2_itk,
'source_file')
elif input_image_type == 3:
try:
tstat_source = pe.Node(interface=preprocess.TStat(),
name='fsl_to_itk_tcat_source')
except AttributeError:
from nipype.interfaces.afni import utils as afni_utils
tstat_source = pe.Node(interface=afni_utils.TStat(),
name='fsl_to_itk_tcat_source')
tstat_source.inputs.outputtype = 'NIFTI_GZ'
tstat_source.inputs.options = '-mean'
fsl_to_itk_conversion.connect(inputspec, 'source_file', tstat_source,
'in_file')
fsl_to_itk_conversion.connect(tstat_source, 'out_file', fsl_reg_2_itk,
'source_file')
fsl_to_itk_conversion.connect(fsl_reg_2_itk, 'itk_transform',
change_transform, 'input_affine_file')
fsl_to_itk_conversion.connect(change_transform, 'updated_affine_file',
outputspec, 'itk_transform')
return fsl_to_itk_conversion
def create_wf_collect_transforms(map_node=False,
name='create_wf_collect_transforms'):
"""
DOCSTRINGS
"""
collect_transforms_wf = pe.Workflow(name=name)
inputspec = pe.Node(util.IdentityInterface(fields=['warp_file',
'linear_initial', 'linear_affine', 'linear_rigid', \
'fsl_to_itk_affine']), name='inputspec')
# converts FSL-format .mat affine xfm into ANTS-format .txt
# .mat affine comes from Func->Anat registration
if map_node:
collect_transforms = pe.MapNode(util.Merge(5),
name='collect_transforms_mapnode', iterfield=['in5'])
else:
collect_transforms = pe.Node(util.Merge(5), name='collect_transforms')
outputspec = pe.Node(util.IdentityInterface(
fields=['transformation_series']), name='outputspec')
# Field file from anatomical nonlinear registration
collect_transforms_wf.connect(inputspec, 'warp_file', collect_transforms,
'in1')
# affine transformation from anatomical registration
collect_transforms_wf.connect(inputspec, 'linear_affine',
collect_transforms, 'in2')
# rigid transformation from anatomical registration
collect_transforms_wf.connect(inputspec, 'linear_rigid',
collect_transforms, 'in3')
# initial transformation from anatomical registration
collect_transforms_wf.connect(inputspec, 'linear_initial',
collect_transforms, 'in4')
# Premat from Func->Anat linear reg and bbreg (if bbreg is enabled)
collect_transforms_wf.connect(inputspec, 'fsl_to_itk_affine',
collect_transforms, 'in5')
collect_transforms_wf.connect(collect_transforms, 'out', outputspec,
'transformation_series')
return collect_transforms_wf
|
the-stack_106_22378 | import random
import sys
import numpy as np
import pandas as pd
from scipy import stats
from tqdm import tqdm
import torch
import train_network
import network as net
import functions as f
from parameters import BATCH_SIZE, RESOLUTION, N_ACTIONS, DATA_PATH_TEST, TRANSFORM, Q_TABLE_TEST, DEVICE
def generate_random(idx_to_class, loader):
random_list = np.zeros(1000)
n_samples = 30
with torch.no_grad():
print('generating {} random samples ... \n'.format(n_samples))
for j in tqdm(range(n_samples)):
df = []
for images, labels, paths in loader:
image_class = [idx_to_class[l.item()] for l in labels]
image_name = [f.path_to_image_name(paths[i], image_class[i])
for i in range(len(images))]
actions = [random.randint(0, 24) for _ in range(len(images))]
predicted = torch.tensor([f.image_reward(image_name[i], Q_TABLE_TEST, actions[i])
for i in range(len(images))], device=DEVICE)
df += predicted.tolist()
random_list = np.add(random_list, df)
random_list = random_list / n_samples
print('generating {} random samples done'.format(n_samples))
return random_list
def generate_predictions(idx_to_class, loader, model):
model.eval()
center_count = 0
target_list = []
predicted_list = []
with torch.no_grad():
print('generating predictions ... \n')
for images, labels, paths in tqdm(loader):
images = images.to(DEVICE)
image_class = [idx_to_class[l.item()] for l in labels]
image_name = [f.path_to_image_name(paths[i], image_class[i]) for i in range(len(images))]
actions = [model(i.unsqueeze(0)).min(1)[1].view(1, 1) for i in images]
predicted = torch.tensor([f.image_reward(image_name[i], Q_TABLE_TEST, actions[i]) for i in range(len(images))],
device=DEVICE)
targets = torch.tensor([f.image_reward(image_name[i], Q_TABLE_TEST, 12) for i in range(len(images))],
device=DEVICE)
predicted_list += predicted.tolist()
target_list += targets.tolist()
center = [torch.ones([1, 1], dtype=torch.long, device=DEVICE) * 12 for _ in range(len(actions))]
center_count += (torch.tensor(actions) == torch.tensor(center)).sum().item()
print('generating predictions done')
return predicted_list, target_list, center_count
def print_results(predicted_results=None, target_results=None, random_results=None):
possible_combinations = [('Better', np.less), ('Equal', np.equal), ('Worse', np.greater)]
statistic_comparison = stats.wilcoxon
def print_comparison(x, y):
assert len(x) == len(y), "length of lists is not equal"
for s, func in possible_combinations:
val = 100 * sum(func(x, y)) / len(x)
# TODO fix text "network then center"
print('{} performance of the network then center on the {} test images: {}%'.format(s, len(x), val))
print(statistic_comparison(x, y))
print(statistic_comparison(x, y, alternative='less'))
if predicted_results is not None and target_results is not None:
print('\n', '=== predicted vs target ===')
print_comparison(predicted_results, target_results)
if random_results is not None and target_results is not None:
print('\n', '=== random vs target ===')
print_comparison(random_results, target_results)
if predicted_results is not None and random_results is not None:
print('\n', '=== predicted vs random ===')
print_comparison(predicted_results, random_results)
# fig = plt.figure(dpi=200, facecolor='w', edgecolor='k')
# plt.plot(df['predicted'], 'ro', markersize=3, fillstyle='none')
# plt.plot(df['random'], 'go', markersize=3, fillstyle='none')
# plt.plot(df['target'], 'bo', markersize=3)
# plt.ylabel('cross-entropy loss')
# plt.xlabel('test images')
# plt.legend(['predicted', 'random', 'target'])
# plt.show()
# fig.savefig("testdatascatterplot", bbox_inches='tight')
#
# result = df.sort_values('default', ascending=False)
# result = result.reset_index(drop=True)
# print(result)
#
# fig = plt.figure(dpi=200, facecolor='w', edgecolor='k')
# plt.plot(df['predicted'], 'ro', markersize=3, fillstyle='none')
# plt.plot(df['random'], 'go', markersize=3, fillstyle='none')
# plt.plot(df['target'], 'bo', markersize=3)
# plt.ylabel('cross-entropy loss')
# plt.xlabel('test images')
# plt.legend(['predicted', 'random', 'target'])
# plt.show()
# fig.savefig("testdatascatterplot", bbox_inches='tight')
if __name__ == '__main__':
NETWORK_PATH = sys.argv[1]
# DATA_PATH = sys.argv[2]
model = net.DQN(RESOLUTION, RESOLUTION, N_ACTIONS)
model.load_state_dict(torch.load(NETWORK_PATH))
# if gpu is to be used
model.to(DEVICE)
model.eval()
loader_test, idx_to_class = f.loader(DATA_PATH_TEST, transform=TRANSFORM, batch_size=BATCH_SIZE, shuffle=False)
random_losses = generate_random(idx_to_class, loader_test)
predicted_losses, target_losses, center_locations = generate_predictions(idx_to_class, loader_test, model)
losses = pd.DataFrame([np.array(target_losses), np.array(predicted_losses), np.array(random_losses)]).transpose()
losses.columns = ['target', 'predicted', 'random']
losses = losses.sort_values('target')
losses = losses.reset_index(drop=True)
print_results(losses)
|
the-stack_106_22379 | from inspect import getframeinfo, currentframe
from os.path import dirname, abspath
from sys import path
import numpy as np
from torch import ones, zeros, mean, tensor, cat, log, clamp, sigmoid, dist, Tensor
from torch.nn import MSELoss, BCELoss, BCEWithLogitsLoss, Module, L1Loss
from torch.autograd import grad
from torch.nn.functional import normalize
from torchvision.models import vgg19
from torchvision.transforms import Normalize
from .model import Discriminator, FeatureExtractor, Generator
from models.abstract_gan_solver import AbstractGanSolver
from feature_extractor import Senet50FeatureExtractor
Generator = None
class Solver(AbstractGanSolver):
def __init__(self, cfg=None, mode="train"):
super().__init__(cfg, mode)
# TODO: use this as dynamic gen. import (if so, define Gen on global level)
model = cfg['GAN']['Generator'] if cfg is not None else self.discriminator_name
global Generator
try:
Generator = __import__("models." + model, fromlist=['Generator']).Generator
except AttributeError:
Generator = __import__("models." + model, fromlist=['Net']).Net
if mode == "train":
nn_config = cfg['GAN']
self.mse = MSELoss().to(self.device)
self.l1 = L1Loss().to(self.device)
self.bcewl = BCEWithLogitsLoss().to(self.device)
self.ones_const = ones(self.batch_size, device=self.device)
self.zeros_const = zeros(self.batch_size, device=self.device)
self.d_loss_response = cat((ones(self.batch_size, device=self.device),
zeros(self.batch_size, device=self.device)))
self.pixel_loss_param = nn_config.getfloat('PixelLossParam', fallback=0)
self.adversarial_loss_param = nn_config.getfloat('AdversarialLossParam', fallback=0)
self.feature_loss_param = nn_config.getfloat('FeatureLossParam', fallback=0)
self.variance_loss_param = nn_config.getfloat('VarianceLossParam', fallback=0)
self.identity_loss_param = nn_config.getfloat("IdentityLossParam", fallback=0)
self.gradient_penalty_param = nn_config.getfloat("GradientPenaltyParam", fallback=0)
if self.feature_loss_param > 0:
self.feature_extractor = FeatureExtractor().to(self.device)
if self.identity_loss_param > 0:
self.identity_extractor = Senet50FeatureExtractor(
"/run/media/hacky/DATA2/FFHQ/mtcnn_detections_ffhq.pkl",
"/home/hacky/datasets/VGG2/senet50_ft_pytorch/senet50_ft_dims_2048.pth"
).to(self.device)
self.zero = zeros(1).to(self.device)
elif mode == "single":
pass
else:
raise Exception(f"Wrong mode \"{mode}\"!")
@property
def discriminator_name(self):
return "MFGAn"
def build_generator(self, *args, **kwargs):
return Generator(*args, **kwargs)
def build_discriminator(self, *args, **kwargs):
return Discriminator()
def compute_gradient_penalty(self, real_img: tensor, fake_img: tensor):
"""Calculates the gradient penalty loss for WGAN GP"""
# Random weight term for interpolation between real and fake samples
alpha = Tensor(np.random.random((real_img.size(0), 1, 1, 1))).to(self.device)
# Get random interpolation between real and fake samples
interpolates = (alpha * real_img + ((1 - alpha) * fake_img)).requires_grad_(True)
d_interpolates = self.discriminator(interpolates)
# fake = Tensor(real.size(0), 1).fill_(1.).requires_grad_(False)
# Get gradient w.r.t. interpolates
gradients = grad(
outputs=d_interpolates,
inputs=interpolates,
grad_outputs=self.ones_const[:real_img.size(0)],
create_graph=True,
retain_graph=True,
only_inputs=True,
)[0]
gradients = gradients.view(gradients.size(0), -1)
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()
return gradient_penalty
def compute_discriminator_loss(self, fake_img, real_img, precomputed=None, train=True, *args, **kwargs):
fake_response = self.discriminator(fake_img.detach())
real_response = self.discriminator(real_img)
gradient_penalty = 0. \
if not train or self.gradient_penalty_param == 0 \
else self.compute_gradient_penalty(real_img, fake_img)
# response = cat((real_response, fake_response))
# return self.bcewl(real_response, self.ones_const) + self.bcewl(fake_response, self.zeros_const) + gradient_penalty
# return mean(- log(sigmoid(real_response) + self.epsilon), 0) \
# - mean(log(1 - sigmoid(fake_response) + self.epsilon), 0)
return self.bcewl(real_response - fake_response.mean(0, keepdim=True),
self.ones_const[:real_response.size(0)]) \
+ self.bcewl(fake_response - real_response.mean(0, keepdim=True),
self.zeros_const[:real_response.size(0)]) + gradient_penalty
def compute_generator_loss(self, label, fake_img: tensor, real_img: tensor, precomputed=None, *args, **kwargs):
loss = 0.
components = {}
if self.pixel_loss_param != 0:
pixel_loss = self.pixel_loss_param * self.l1(fake_img, real_img)
loss += pixel_loss
components["pixel_loss"] = pixel_loss.item()
if self.adversarial_loss_param != 0:
fake_response = self.discriminator(fake_img)
real_response = self.discriminator(real_img).detach()
# adversarial_loss = self.adversarial_loss_param * self.bcewl(fake_response, self.ones_const)
adversarial_loss = self.adversarial_loss_param * \
(self.bcewl(fake_response - real_response.mean(0, keepdim=True),
self.zeros_const[:fake_response.size(0)])
+ self.bcewl(real_response - fake_response.mean(0, keepdim=True),
self.ones_const[:fake_response.size(0)])
)
loss += adversarial_loss
components["adv_loss"] = adversarial_loss.item()
if self.feature_loss_param > 0:
fake_features = self.feature_extractor(fake_img)
real_features = self.feature_extractor(real_img)
feature_loss = self.feature_loss_param * self.mse(real_features, fake_features)
loss += feature_loss
components["feature_loss"] = feature_loss.item()
if self.variance_loss_param != 0:
var_loss = self.variance_loss_param * compute_variance_loss(fake_img)
loss += var_loss
components["variance_loss"] = var_loss.item()
if self.identity_loss_param > 0:
fake_identities = self.identity_extractor(label, fake_img)
real_identities = self.identity_extractor(label, real_img).detach()
norm_fake = normalize(fake_identities, p=2, dim=1)
norm_real = normalize(real_identities, p=2, dim=1)
# compute l2 distance in hyperball metric space
identity_loss = self.identity_loss_param * (norm_fake - norm_real).pow(2).sum(1).mean()
loss += identity_loss
components["identity_loss"] = identity_loss.item()
return loss, components, None
def compute_variance_loss(x: Tensor):
n, c, h, w = x.shape
num_count_h = n * c * (h - 1) * w
num_count_w = n * c * h * (w - 1)
# h_tv = torch.pow((x[:, :, 1:, :] - x[:, :, :-1, :]), 2).sum()
# w_tv = torch.pow((x[:, :, :, 1:] - x[:, :, :, :-1]), 2).sum()
h_tv = dist(x[:, :, 1:, :], x[:, :, :-1, :], p=1)
w_tv = dist(x[:, :, :, 1:], x[:, :, :, :-1], p=1)
return h_tv / num_count_h + w_tv / num_count_w
|
the-stack_106_22381 | import pytest
import tiflash
class TestMemoryApi():
def test_basic_memory_read_single_byte(self, tdev):
"""Tests simple memory read"""
result = tiflash.memory_read(tdev['read-address'], 1,
serno=tdev['serno'],
connection=tdev['connection'],
devicetype=tdev['devicetype'])
assert len(result) == 1
def test_basic_memory_read_multiple_bytes(self, tdev):
"""Tests simple memory read of multiple bytes"""
result = tiflash.memory_read(tdev['read-address'], 4,
serno=tdev['serno'],
connection=tdev['connection'],
devicetype=tdev['devicetype'])
assert len(result) == 4
def test_basic_memory_read_and_check_byte_values(self, tdev):
"""Tests memory read and checks for correct byte values. This test is
device specific."""
if "read-address" not in tdev.keys() or \
"address-value" not in tdev.keys():
pytest.skip("Need to add memval and memaddr fields in \
setup.cfg for device: %s" % tdev['devicetype'])
addr = int(tdev['read-address'], 0)
answer = tdev['value'].split(',')
answer = [ int(d, 0) for d in answer ]
result = tiflash.memory_read(addr, len(answer),
serno=tdev['serno'],
connection=tdev['connection'],
devicetype=tdev['devicetype'])
assert len(result) == len(answer)
assert result == answer
def test_basic_memory_write(self, tdev):
"""Tests simple memory write"""
WRITE_DATA = [0x11, 0x22, 0x33]
tiflash.memory_write(tdev['write-address'], WRITE_DATA,
serno=tdev['serno'],
connection=tdev['connection'],
devicetype=tdev['devicetype'])
def test_invalid_address_memory_read(self, tdev):
"""Tests an Error is raised when trying to access invalid memory for
memory read"""
INVALID_ADDRESS = 0xFFFFFFFF
NUM_BYTES = 4
with pytest.raises(tiflash.TIFlashError):
tiflash.memory_read(INVALID_ADDRESS, NUM_BYTES,
serno=tdev['serno'],
connection=tdev['connection'],
devicetype=tdev['devicetype'])
def test_invalid_address_memory_write(self, tdev):
"""Tests an Error is raised when trying to access invalid memory for
memory write"""
INVALID_ADDRESS = 0x10000000
WRITE_DATA = [0x11, 0x22, 0x33]
with pytest.raises(tiflash.TIFlashError):
tiflash.memory_write(INVALID_ADDRESS, WRITE_DATA,
serno=tdev['serno'],
connection=tdev['connection'],
devicetype=tdev['devicetype'])
|
the-stack_106_22382 | import json
import requests
# Cargamos los usuarios
with open('usuarios.json') as f: usuarios = json.load(f)
# Funciones de ayuda
def save_users():
"Guarda los usuarios en nuestro fichero de usuarios"
with open('usuarios.json', 'w') as f: json.dump(usuarios, f, indent=2)
def is_user(cid):
"Comprueba si un ID es usuario de nuestro bot"
return usuarios.get(str(cid))
def add_user(cid):
"Añade un usuario"
usuarios[str(cid)] = True
save_users()
def delete_user(cid):
"Borra un usuario"
usuarios[str(cid)] = False
save_users()
# Funciones para obtener información de los pokemon
url = "https://pokeapi.co/api/v2/{}"
def get_pokemon_info(pokemon):
"Obtiene información básica de un Pokémon"
r = requests.get(url.format('pokemon/{}'.format(pokemon)))
# Comprobamos si la petición tuvo éxito
if r.status_code != 200:
return "Error. Pokémon not found"
# Sabiendo que ha sido una petición buena, empezamos a sacar información del pokemon
# Primero obtendremos el json del resultado a la petición
r_json = r.json()
# Crearemos un diccionario donde almacenar la información del pokemon de forma cómoda
# Para esto cogeremos una petición cualquiera y la analizaremos con http://jsonviewer.stack.hu/
poke_info = dict()
# Obtenemos el nombre y lo ponemos capitalizamos
poke_info['name'] = r_json['name'].capitalize()
# El peso (Viene en gramos)
poke_info['weight'] = r_json['weight']/10
# La altura (Viene en centímetros)
poke_info['height'] = r_json['height']/10
# Los tipos (Viene una lista con los tipos en minúscula)
# Aquí estamos usando list comprehension http://python-3-patterns-idioms-test.readthedocs.io/en/latest/Comprehensions.html
poke_info['types'] = [x['type']['name'].capitalize() for x in r_json['types']]
# Habilidades (Viene como los tipos)
poke_info['abilities'] = [x['ability']['name'].capitalize() for x in r_json['abilities']]
# Sprite del pokemon (Viene una lista y cogeremos solo la parte de alante por defecto)
poke_info['sprite'] = r_json['sprites']['front_default']
# Generamos el mensaje a enviar utilizando la información del pokemon
# OJO, lo que hay entre los corchetes de antes del sprite es un caracter vacío
# que utilizaremos para enviar la imagen desde la URL
message = "[]({sprite})Pokémon: *{name}*\nHeight: *{height}* m\nWeight: *{weight}* kg\nTypes:\n*\t· {types}*\nAbilities:\n*\t· {abilities}*".format(
sprite = poke_info['sprite'], name = poke_info['name'], types = "\n\t· ".join(poke_info['types']),
abilities = "\n\t· ".join(poke_info['abilities']), height = poke_info['height'], weight = poke_info['weight'])
return message
|
the-stack_106_22383 | # -*- coding: utf-8 -*-
"""Tests for the xml module."""
from __future__ import unicode_literals
from soco import xml
def test_register_namespace():
assert xml.register_namespace
def test_ns_tag():
"""Test the ns_tag function."""
namespaces = ['http://purl.org/dc/elements/1.1/',
'urn:schemas-upnp-org:metadata-1-0/upnp/',
'urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/']
for ns_in, namespace in zip(['dc', 'upnp', ''], namespaces):
res = xml.ns_tag(ns_in, 'testtag')
correct = '{{{0}}}{1}'.format(namespace, 'testtag')
assert res == correct
|
the-stack_106_22384 | from urllib.parse import urlparse
import vobject
from csp.decorators import csp_update
from django.conf import settings
from django.contrib import messages
from django.db.models import Q
from django.http import Http404, HttpResponse
from django.shortcuts import get_object_or_404, render
from django.utils.decorators import method_decorator
from django.utils.functional import cached_property
from django.utils.translation import gettext_lazy as _
from django.views.generic import DetailView, FormView, TemplateView
from django_context_decorator import context
from pretalx.agenda.signals import register_recording_provider
from pretalx.cfp.views.event import EventPageMixin
from pretalx.common.mixins.views import PermissionRequired
from pretalx.common.phrases import phrases
from pretalx.schedule.models import Schedule, TalkSlot
from pretalx.submission.forms import FeedbackForm
from pretalx.submission.models import QuestionTarget, Submission, SubmissionStates
@method_decorator(csp_update(IMG_SRC="https://www.gravatar.com"), name="dispatch")
class TalkView(PermissionRequired, TemplateView):
model = Submission
slug_field = "code"
template_name = "agenda/talk.html"
permission_required = "agenda.view_slot"
def get_object(self, queryset=None):
talk = (
self.request.event.talks.prefetch_related("slots", "answers", "resources")
.filter(
code__iexact=self.kwargs["slug"],
)
.first()
)
if talk:
return talk
if getattr(self.request, "is_orga", False):
return get_object_or_404(
self.request.event.submissions.prefetch_related(
"speakers", "slots", "answers", "resources"
).select_related("submission_type"),
code__iexact=self.kwargs["slug"],
)
raise Http404()
@context
@cached_property
def submission(self):
return self.get_object()
def get_permission_object(self):
return self.submission
@cached_property
def recording(self):
for __, response in register_recording_provider.send_robust(self.request.event):
if (
response
and not isinstance(response, Exception)
and getattr(response, "get_recording", None)
):
recording = response.get_recording(self.submission)
if recording and recording["iframe"]:
return recording
return {}
@context
def recording_iframe(self):
return self.recording.get("iframe")
def get(self, request, *args, **kwargs):
response = super().get(request, *args, **kwargs)
if self.recording.get("csp_header"):
response._csp_update = {"frame-src": self.recording.get("csp_header")}
return response
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
qs = TalkSlot.objects.none()
schedule = Schedule.objects.none()
if self.request.event.current_schedule:
schedule = self.request.event.current_schedule
qs = schedule.talks.filter(is_visible=True).select_related("room")
elif self.request.is_orga:
schedule = self.request.event.wip_schedule
qs = schedule.talks.filter(room__isnull=False).select_related("room")
ctx["talk_slots"] = (
qs.filter(submission=self.submission)
.order_by("start")
.select_related("room")
)
result = []
other_slots = (
schedule.talks.exclude(submission_id=self.submission.pk).filter(
is_visible=True
)
if schedule
else TalkSlot.objects.none()
)
for speaker in self.submission.speakers.all():
speaker.talk_profile = speaker.event_profile(event=self.request.event)
speaker.other_submissions = self.request.event.submissions.filter(
slots__in=other_slots, speakers__in=[speaker]
).select_related("event")
result.append(speaker)
ctx["speakers"] = result
return ctx
@context
@cached_property
def submission_description(self):
return (
self.submission.description
or self.submission.abstract
or _("The talk “{title}” at {event}").format(
title=self.submission.title, event=self.request.event.name
)
)
@context
@cached_property
def answers(self):
return self.submission.answers.filter(
question__is_public=True,
question__event=self.request.event,
question__target=QuestionTarget.SUBMISSION,
).select_related("question")
class TalkReviewView(TalkView):
model = Submission
slug_field = "review_code"
template_name = "agenda/talk.html"
def has_permission(self):
return True
def get_object(self):
return get_object_or_404(
self.request.event.submissions,
review_code=self.kwargs["slug"],
state__in=[
SubmissionStates.SUBMITTED,
SubmissionStates.ACCEPTED,
SubmissionStates.CONFIRMED,
],
)
class SingleICalView(EventPageMixin, DetailView):
model = Submission
slug_field = "code"
def get(self, request, event, **kwargs):
submission = self.get_object()
code = submission.code
talk_slots = submission.slots.filter(
schedule=self.request.event.current_schedule, is_visible=True
)
netloc = urlparse(settings.SITE_URL).netloc
cal = vobject.iCalendar()
cal.add("prodid").value = "-//pretalx//{}//{}".format(netloc, code)
for talk in talk_slots:
talk.build_ical(cal)
resp = HttpResponse(cal.serialize(), content_type="text/calendar")
resp[
"Content-Disposition"
] = f'attachment; filename="{request.event.slug}-{code}.ics"'
return resp
class FeedbackView(PermissionRequired, FormView):
form_class = FeedbackForm
template_name = "agenda/feedback_form.html"
permission_required = "agenda.view_slot"
def get_object(self):
return self.request.event.talks.filter(code__iexact=self.kwargs["slug"]).first()
@context
@cached_property
def talk(self):
return self.get_object()
@context
@cached_property
def can_give_feedback(self):
return self.request.user.has_perm("agenda.give_feedback", self.talk)
@context
@cached_property
def speakers(self):
return self.talk.speakers.all()
def get(self, *args, **kwargs):
talk = self.talk
if talk and self.request.user in self.speakers:
return render(
self.request,
"agenda/feedback.html",
context={
"talk": talk,
"feedback": talk.feedback.filter(
Q(speaker__isnull=True) | Q(speaker=self.request.user)
).select_related("speaker"),
},
)
return super().get(*args, **kwargs)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs["talk"] = self.talk
return kwargs
def form_valid(self, form):
if not self.can_give_feedback:
return super().form_invalid(form)
result = super().form_valid(form)
form.save()
messages.success(self.request, phrases.agenda.feedback_success)
return result
def get_success_url(self):
return self.get_object().urls.public
|
the-stack_106_22386 | # pylint: skip-file
#
# All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation
#
# All contributions by the University of California:
# Copyright (c) 2014, 2015, The Regents of the University of California (Regents)
# All rights reserved.
#
# All other contributions:
# Copyright (c) 2014, 2015, the respective contributors
# All rights reserved.
# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md
#
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import unittest
import tempfile
import os
import numpy as np
import six
from collections import OrderedDict
import caffe
def simple_net_file(num_output):
"""Make a simple net prototxt, based on test_net.cpp, returning the name
of the (temporary) file."""
f = tempfile.NamedTemporaryFile(mode='w+', delete=False)
f.write("""name: 'testnet' force_backward: true
layer { type: 'DummyData' name: 'data' top: 'data' top: 'label'
dummy_data_param { num: 5 channels: 2 height: 3 width: 4
num: 5 channels: 1 height: 1 width: 1
data_filler { type: 'gaussian' std: 1 }
data_filler { type: 'constant' } } }
layer { type: 'Convolution' name: 'conv' bottom: 'data' top: 'conv'
convolution_param { num_output: 11 kernel_size: 2 pad: 3
weight_filler { type: 'gaussian' std: 1 }
bias_filler { type: 'constant' value: 2 } }
param { decay_mult: 1 } param { decay_mult: 0 }
}
layer { type: 'InnerProduct' name: 'ip' bottom: 'conv' top: 'ip_blob'
inner_product_param { num_output: """ + str(num_output) + """
weight_filler { type: 'gaussian' std: 2.5 }
bias_filler { type: 'constant' value: -3 } } }
layer { type: 'SoftmaxWithLoss' name: 'loss' bottom: 'ip_blob' bottom: 'label'
top: 'loss' }""")
f.close()
return f.name
class TestNet(unittest.TestCase):
def setUp(self):
self.num_output = 13
net_file = simple_net_file(self.num_output)
self.net = caffe.Net(net_file, caffe.TRAIN)
# fill in valid labels
self.net.blobs['label'].data[...] = \
np.random.randint(self.num_output,
size=self.net.blobs['label'].data.shape)
os.remove(net_file)
def test_memory(self):
"""Check that holding onto blob data beyond the life of a Net is OK"""
params = sum(map(list, six.itervalues(self.net.params)), [])
blobs = self.net.blobs.values()
del self.net
# now sum everything (forcing all memory to be read)
total = 0
for p in params:
total += p.data.sum() + p.diff.sum()
for bl in blobs:
total += bl.data.sum() + bl.diff.sum()
def test_layer_dict(self):
layer_dict = self.net.layer_dict
self.assertEqual(list(layer_dict.keys()), list(self.net._layer_names))
for i, name in enumerate(self.net._layer_names):
self.assertEqual(layer_dict[name].type,
self.net.layers[i].type)
def test_forward_backward(self):
self.net.forward()
self.net.backward()
def test_forward_start_end(self):
conv_blob=self.net.blobs['conv']
ip_blob=self.net.blobs['ip_blob']
sample_data=np.random.uniform(size=conv_blob.data.shape)
sample_data=sample_data.astype(np.float32)
conv_blob.data[:]=sample_data
forward_blob=self.net.forward(start='ip',end='ip')
self.assertIn('ip_blob',forward_blob)
manual_forward=[]
for i in range(0,conv_blob.data.shape[0]):
dot=np.dot(self.net.params['ip'][0].data,
conv_blob.data[i].reshape(-1))
manual_forward.append(dot+self.net.params['ip'][1].data)
manual_forward=np.array(manual_forward)
np.testing.assert_allclose(ip_blob.data,manual_forward,rtol=1e-3,atol=1e-5)
def test_backward_start_end(self):
conv_blob=self.net.blobs['conv']
ip_blob=self.net.blobs['ip_blob']
sample_data=np.random.uniform(size=ip_blob.data.shape)
sample_data=sample_data.astype(np.float32)
ip_blob.diff[:]=sample_data
backward_blob=self.net.backward(start='ip',end='ip')
self.assertIn('conv',backward_blob)
manual_backward=[]
for i in range(0,conv_blob.data.shape[0]):
dot=np.dot(self.net.params['ip'][0].data.transpose(),
sample_data[i].reshape(-1))
manual_backward.append(dot)
manual_backward=np.array(manual_backward)
manual_backward=manual_backward.reshape(conv_blob.data.shape)
np.testing.assert_allclose(conv_blob.diff,manual_backward,rtol=1e-3,atol=1e-5)
def test_clear_param_diffs(self):
# Run a forward/backward step to have non-zero diffs
self.net.forward()
self.net.backward()
diff = self.net.params["conv"][0].diff
# Check that we have non-zero diffs
self.assertTrue(diff.max() > 0)
self.net.clear_param_diffs()
# Check that the diffs are now 0
self.assertTrue((diff == 0).all())
def test_inputs_outputs(self):
self.assertEqual(self.net.inputs, [])
self.assertEqual(self.net.outputs, ['loss'])
def test_top_bottom_names(self):
self.assertEqual(self.net.top_names,
OrderedDict([('data', ['data', 'label']),
('conv', ['conv']),
('ip', ['ip_blob']),
('loss', ['loss'])]))
self.assertEqual(self.net.bottom_names,
OrderedDict([('data', []),
('conv', ['data']),
('ip', ['conv']),
('loss', ['ip_blob', 'label'])]))
def test_save_and_read(self):
f = tempfile.NamedTemporaryFile(mode='w+', delete=False)
f.close()
self.net.save(f.name)
net_file = simple_net_file(self.num_output)
# Test legacy constructor
# should print deprecation warning
caffe.Net(net_file, f.name, caffe.TRAIN)
# Test named constructor
net2 = caffe.Net(net_file, caffe.TRAIN, weights=f.name)
os.remove(net_file)
os.remove(f.name)
for name in self.net.params:
for i in range(len(self.net.params[name])):
self.assertEqual(abs(self.net.params[name][i].data
- net2.params[name][i].data).sum(), 0)
def test_save_hdf5(self):
f = tempfile.NamedTemporaryFile(mode='w+', delete=False)
f.close()
self.net.save_hdf5(f.name)
net_file = simple_net_file(self.num_output)
net2 = caffe.Net(net_file, caffe.TRAIN)
net2.load_hdf5(f.name)
os.remove(net_file)
os.remove(f.name)
for name in self.net.params:
for i in range(len(self.net.params[name])):
self.assertEqual(abs(self.net.params[name][i].data
- net2.params[name][i].data).sum(), 0)
class TestLevels(unittest.TestCase):
TEST_NET = """
layer {
name: "data"
type: "DummyData"
top: "data"
dummy_data_param { shape { dim: 1 dim: 1 dim: 10 dim: 10 } }
}
layer {
name: "NoLevel"
type: "InnerProduct"
bottom: "data"
top: "NoLevel"
inner_product_param { num_output: 1 }
}
layer {
name: "Level0Only"
type: "InnerProduct"
bottom: "data"
top: "Level0Only"
include { min_level: 0 max_level: 0 }
inner_product_param { num_output: 1 }
}
layer {
name: "Level1Only"
type: "InnerProduct"
bottom: "data"
top: "Level1Only"
include { min_level: 1 max_level: 1 }
inner_product_param { num_output: 1 }
}
layer {
name: "Level>=0"
type: "InnerProduct"
bottom: "data"
top: "Level>=0"
include { min_level: 0 }
inner_product_param { num_output: 1 }
}
layer {
name: "Level>=1"
type: "InnerProduct"
bottom: "data"
top: "Level>=1"
include { min_level: 1 }
inner_product_param { num_output: 1 }
}
"""
def setUp(self):
self.f = tempfile.NamedTemporaryFile(mode='w+', delete=False)
self.f.write(self.TEST_NET)
self.f.close()
def tearDown(self):
os.remove(self.f.name)
def check_net(self, net, blobs):
net_blobs = [b for b in net.blobs.keys() if 'data' not in b]
self.assertEqual(net_blobs, blobs)
def test_0(self):
net = caffe.Net(self.f.name, caffe.TEST)
self.check_net(net, ['NoLevel', 'Level0Only', 'Level>=0'])
def test_1(self):
net = caffe.Net(self.f.name, caffe.TEST, level=1)
self.check_net(net, ['NoLevel', 'Level1Only', 'Level>=0', 'Level>=1'])
class TestStages(unittest.TestCase):
TEST_NET = """
layer {
name: "data"
type: "DummyData"
top: "data"
dummy_data_param { shape { dim: 1 dim: 1 dim: 10 dim: 10 } }
}
layer {
name: "A"
type: "InnerProduct"
bottom: "data"
top: "A"
include { stage: "A" }
inner_product_param { num_output: 1 }
}
layer {
name: "B"
type: "InnerProduct"
bottom: "data"
top: "B"
include { stage: "B" }
inner_product_param { num_output: 1 }
}
layer {
name: "AorB"
type: "InnerProduct"
bottom: "data"
top: "AorB"
include { stage: "A" }
include { stage: "B" }
inner_product_param { num_output: 1 }
}
layer {
name: "AandB"
type: "InnerProduct"
bottom: "data"
top: "AandB"
include { stage: "A" stage: "B" }
inner_product_param { num_output: 1 }
}
"""
def setUp(self):
self.f = tempfile.NamedTemporaryFile(mode='w+', delete=False)
self.f.write(self.TEST_NET)
self.f.close()
def tearDown(self):
os.remove(self.f.name)
def check_net(self, net, blobs):
net_blobs = [b for b in net.blobs.keys() if 'data' not in b]
self.assertEqual(net_blobs, blobs)
def test_A(self):
net = caffe.Net(self.f.name, caffe.TEST, stages=['A'])
self.check_net(net, ['A', 'AorB'])
def test_B(self):
net = caffe.Net(self.f.name, caffe.TEST, stages=['B'])
self.check_net(net, ['B', 'AorB'])
def test_AandB(self):
net = caffe.Net(self.f.name, caffe.TEST, stages=['A', 'B'])
self.check_net(net, ['A', 'B', 'AorB', 'AandB'])
class TestAllInOne(unittest.TestCase):
TEST_NET = """
layer {
name: "train_data"
type: "DummyData"
top: "data"
top: "label"
dummy_data_param {
shape { dim: 1 dim: 1 dim: 10 dim: 10 }
shape { dim: 1 dim: 1 dim: 1 dim: 1 }
}
include { phase: TRAIN stage: "train" }
}
layer {
name: "val_data"
type: "DummyData"
top: "data"
top: "label"
dummy_data_param {
shape { dim: 1 dim: 1 dim: 10 dim: 10 }
shape { dim: 1 dim: 1 dim: 1 dim: 1 }
}
include { phase: TEST stage: "val" }
}
layer {
name: "deploy_data"
type: "Input"
top: "data"
input_param { shape { dim: 1 dim: 1 dim: 10 dim: 10 } }
include { phase: TEST stage: "deploy" }
}
layer {
name: "ip"
type: "InnerProduct"
bottom: "data"
top: "ip"
inner_product_param { num_output: 2 }
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "ip"
bottom: "label"
top: "loss"
include: { phase: TRAIN stage: "train" }
include: { phase: TEST stage: "val" }
}
layer {
name: "pred"
type: "Softmax"
bottom: "ip"
top: "pred"
include: { phase: TEST stage: "deploy" }
}
"""
def setUp(self):
self.f = tempfile.NamedTemporaryFile(mode='w+', delete=False)
self.f.write(self.TEST_NET)
self.f.close()
def tearDown(self):
os.remove(self.f.name)
def check_net(self, net, outputs):
self.assertEqual(list(net.blobs['data'].shape), [1,1,10,10])
self.assertEqual(net.outputs, outputs)
def test_train(self):
net = caffe.Net(self.f.name, caffe.TRAIN, stages=['train'])
self.check_net(net, ['loss'])
def test_val(self):
net = caffe.Net(self.f.name, caffe.TEST, stages=['val'])
self.check_net(net, ['loss'])
def test_deploy(self):
net = caffe.Net(self.f.name, caffe.TEST, stages=['deploy'])
self.check_net(net, ['pred'])
|
the-stack_106_22387 | import uuid
import os
import tarfile
from pymongo import MongoClient
from server.files import list_all_img_in_folder
client = MongoClient('mongodb://localhost:27017/')
db = client.details
def retrieve_article(id):
result = db.articles.find_one({'id': id}, {'_id': 0})
return result
def retrieve_all_articles():
result = db.articles.find({}, {'_id': 0})
return result
def retrieve_news_by_chunck(index, limit):
result = list(
db.articles.find({'typeIsArticle': {'$in' : ['true', True, 'True']}}, {'_id': 0}).
sort('date', -1).
skip(int(index)).
limit(limit)
)
for element in result:
element['content'] = remove_author_name(element['content'])
return result
def retrieve_rubrique_by_chunck(rubrique_name, index, limit):
result = list(
db.articles.find({'categorie': rubrique_name}, {'_id': 0}).
sort('date', -1).
skip(int(index)).
limit(limit)
)
for element in result:
element['content'] = remove_author_name(element['content'])
return result
def retrieve_titles_and_ids():
result = list(db.articles.find({}, {'title': 1, 'id': 1, '_id': 0}).sort('date', -1))
return result
def count_articles(key, value):
return db.articles.count_documents({key: value})
def count_news():
return db.articles.count_documents({'typeIsArticle': {'$in' : ['true', True, 'True']}})
def add_article(data):
data['thumbail'] = data['image'][0]
data['id'] = create_unique_id(data['title'])
db.articles.insert_one(data)
def edit_article(id, data):
data['thumbail'] = data['image'][0]
db.articles.update_one(
{'id': id},
{'$set': data}
)
def remove_article(id):
db.articles.delete_one({'id': id})
def create_unique_id(complicated_title):
key = "-" + uuid.uuid4().hex[:8]
short_title = complicated_title.lower()
array_title = short_title.split(" ")[:3]
clean_title = []
for word in array_title :
for char in word :
if char.isalnum():
clean_title.append(char)
return "".join(clean_title) + key
def aggregate_img_by_article():
result = list(db.articles.find({}, {'image': 1, '_id': 0}).sort('date', -1))
flat_result = array_of_dict_to_array_flat(result)
return flat_result
def array_of_dict_to_array_flat(array):
image_list = []
for element in array:
if 'image' in dict(element):
for value in (dict(element)['image']):
image_list.append(value)
return image_list
# compare img articles with img folder to find unused img
def find_unused_img():
list_total = list_all_img_in_folder('public/img')
list_used = aggregate_img_by_article()
list_difference = []
for item in list_total:
if item not in list_used:
list_difference.append(item)
return list_difference
def populate_archive_folder():
result = retrieve_all_articles()
for element in result:
file = open('archive/'+element['id']+'.txt', 'w')
file.write(element['title']+'\n')
file.write(element['date']+'\n')
file.write(element['id']+'\n')
file.write(str(element['typeIsArticle'])+'\n')
file.write(element['thumbail']+'\n')
file.write(element['categorie']+'\n')
file.write(str(element['content']))
file.close()
def create_archive():
os.remove('public/img/archive.tgz')
with tarfile.open('public/img/archive.tgz', 'x:gz') as tar:
tar.add('archive', 'archive')
tar.add('public/img', 'img')
tar.close()
def remove_author_name(text):
return text[:text.find("<p>")] + text[text.find("</p>") + len('<p>')+3:]
|
the-stack_106_22388 | from torch.optim.lr_scheduler import _LRScheduler, CosineAnnealingLR
import math
class RestartCosineAnnealingLR(_LRScheduler):
def __init__(self, optimizer, T_max, eta_min=0, last_epoch=-1):
self.T_max = T_max
self.eta_min = eta_min
super(RestartCosineAnnealingLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
return [self.eta_min + (base_lr - self.eta_min) *
(1 + math.cos(math.pi * (self.last_epoch % self.T_max) / self.T_max)) / 2
for base_lr in self.base_lrs] |
the-stack_106_22389 |
from python_interface.gecco_interface import *
from python_spec.mrcc_response.get_response_data import _response_data, _pop_data, _cmp_data, _calc_data
from python_spec.mrcc_response.set_mrcc_response_targets import relax_ref
import math
_inp = GeCCo_Input()
# Get the name of the package GeCCo uses the integrals from
_env = _inp.env
_orb = Orb_Info()
_s2 = _orb.get('imult')
_ms = _orb.get('ims')
_isym_0 = _orb.get('lsym')
_isym= _isym_0
if ((_ms == 0) and ((_s2-1 % 4) == 0)):
_msc = 1
elif ((_ms == 0) and ((_s2+1 % 4) == 0)):
_msc = -1
else:
_msc = 0
#Getting the frequency
_freq=_response_data['freq']
#Getting the value of the restart option
_restart=_response_data['restart']
#Getting the total number of perturbation operator need to be defined
_npop=_response_data['nPop']
#Getting total number of response calculation
_ncnt=_response_data['nCnt']
#Getting the maximum order of the properties that will be calculated
_maxord=_response_data['maxorder']
#n_par tells how many version of the same operator has to be defined
#Defining all the perturbation operators needed for the whole calculations
_list_to_depend=[]
for ipop in range (0,_npop):
_cur_ext=_pop_data['name'][ipop]+_pop_data['comp'][ipop]
_pop_name='V'+_cur_ext
new_target(_pop_name)
DEF_HAMILTONIAN({LABEL:_pop_name,MAX_RANK:1})
new_target('IMPORT_'+_pop_name)
depend(_pop_name)
_op_list={_pop_name:'ME_'+_pop_name}
_irrep=_pop_data['isym'][ipop]
for _op in _op_list:
DEF_ME_LIST({LIST:_op_list[_op],
OPERATOR:_op,
IRREP:_irrep,
'2MS':0,
AB_SYM:1}) # For second order properties, the perturbations are mainly singlet.
#Getting the type of the integrals needed while importing the integrals
_int_type=_pop_data['int_name'][ipop]
IMPORT({LIST:'ME_'+_pop_name,
TYPE:_int_type,
ENV:_env})
PRINT_MEL({LIST:'ME_'+_pop_name})
_list_to_depend.append('IMPORT_'+_pop_name)
new_target('DEF_RSPNS(1)'+_cur_ext)
DEF_SCALAR({LABEL:'RSPNS(1)'+_cur_ext})
_op_list={'RSPNS(1)'+_cur_ext:'ME_RSPNS(1)'+_cur_ext}
for _op in _op_list:
DEF_ME_LIST({LIST:_op_list[_op],
OPERATOR:_op,
IRREP:_irrep,
'2MS':0})
new_target('IMPORT_PERT_OPS')
for ele in _list_to_depend:
depend(ele)
# We calculate the first order response, again(!), that will be needed here
# Here we will first get the formula, and then for each of the operator we
# will use the same formula, but changing only the list of the operators
new_target('GET_RSPNS(1)_FORM')
depend('F_MRCC_LAG')
depend('DEF_ME_T')
depend('DEF_ME_C0')
depend('DEF_ME_L')
if (relax_ref):
depend('DEF_ME_C0_bar')
DEF_SCALAR({LABEL:'preRSPNS(1)_1'})
DEF_SCALAR({LABEL:'preRSPNS(1)_2'})
DEF_SCALAR({LABEL:'preRSPNS(1)'})
#Defining a dummy scalar operator for RSPNS(1)
DEF_SCALAR({LABEL:'RSPNS(1)'})
#Defining a dummy perturbation operator
DEF_HAMILTONIAN({LABEL:'V(1)',MAX_RANK:1})
INVARIANT({LABEL_RES:'F_preRSPNS(1)_1',
LABEL_IN:'F_MRCC_LAG',
OP_RES:'preRSPNS(1)_1',
OPERATORS:'L'})
if (relax_ref):
REPLACE({LABEL_RES:'F_preRSPNS(1)_1',
LABEL_IN:'F_preRSPNS(1)_1',
OP_LIST:['C0^+','C0_bar']})
DERIVATIVE({LABEL_RES:'F_preRSPNS(1)_2',
LABEL_IN:'F_MRCC_LAG',
OP_RES:'preRSPNS(1)_2',
OP_DERIV:'L',
OP_MULT:'L'})
DEF_FORMULA({LABEL:'F_preRSPNS(1)',
FORMULA:'preRSPNS(1)=preRSPNS(1)_1+preRSPNS(1)_2'})
# FORMULA:'preRSPNS(1)=preRSPNS(1)_2'})
EXPAND({LABEL_RES:'F_preRSPNS(1)',
LABEL_IN:'F_preRSPNS(1)',
INTERM:['F_preRSPNS(1)_1','F_preRSPNS(1)_2']})
REPLACE({LABEL_RES:'F_RSPNS(1)',
LABEL_IN:'F_preRSPNS(1)',
OP_LIST:['H','V(1)']})
INVARIANT({LABEL_RES:'F_RSPNS(1)',
LABEL_IN:'F_RSPNS(1)',
OP_RES:'RSPNS(1)',
OPERATORS:'H'})
_list_to_depend=[]
for ipop in range (0,_npop):
_cur_ext=_pop_data['name'][ipop]+_pop_data['comp'][ipop]
_pop_name='V'+_cur_ext
new_target('EVAL_RSPNS(1)'+_cur_ext)
depend('DEF_RSPNS(1)'+_cur_ext)
ASSIGN_ME2OP({LIST:'ME_RSPNS(1)'+_cur_ext,
OPERATOR:'RSPNS(1)'})
ASSIGN_ME2OP({LIST:'ME_'+_pop_name,
OPERATOR:'V(1)'})
depend('H0')
depend('GET_RSPNS(1)_FORM')
OPTIMIZE({LABEL_OPT:'FOPT_RSPNS(1)'+_cur_ext,
LABELS_IN:'F_RSPNS(1)'})
EVALUATE({FORM:'FOPT_RSPNS(1)'+_cur_ext})
_print_mel_arg={}
_print_mel_arg[LIST]='ME_RSPNS(1)'+_cur_ext
_print_mel_arg[COMMENT]='First order property('+str(ipop)+')'
_print_mel_arg[FORMAT]='NORM'
PRINT_MEL(_print_mel_arg)
_list_to_depend.append('EVAL_RSPNS(1)'+_cur_ext)
########## calculating the first order properties
if (_restart<2):
new_target('EVAL_RSPNS(1)',True)
else:
new_target('EVAL_RSPNS(1)')
depend('IMPORT_PERT_OPS')
for ele in _list_to_depend:
depend(ele)
|
the-stack_106_22391 | # This code is part of Mthree.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test balanced cals"""
from mthree.mitigation import _balanced_cal_strings
def test_balanced_strings():
"""Validate balanced cal string pairs sum to the num_qubits"""
for num_qubits in [1, 2, 5, 9, 22, 47, 102]:
cal_strs = _balanced_cal_strings(num_qubits)
for kk in range(num_qubits):
_sum = 0
str1 = cal_strs[2*kk]
str2 = cal_strs[2*kk+1]
for jj in range(num_qubits):
_sum += int(str1[jj]) + int(str2[jj])
assert _sum == num_qubits
|
the-stack_106_22393 | r"""
Morphisms of Toric Varieties
There are three "obvious" ways to map toric varieties to toric
varieties:
1. Polynomial maps in local coordinates, the usual morphisms in
algebraic geometry.
2. Polynomial maps in the (global) homogeneous coordinates.
3. Toric morphisms, that is, algebraic morphisms equivariant with
respect to the torus action on the toric variety.
Both 2 and 3 are special cases of 1, which is just to say that we
always remain within the realm of algebraic geometry. But apart from
that, none is included in one of the other cases. In the examples
below, we will explore some algebraic maps that can or can not be
written as a toric morphism. Often a toric morphism can be written
with polynomial maps in homogeneous coordinates, but sometimes it
cannot.
The toric morphisms are perhaps the most mysterious at the
beginning. Let us quickly review their definition (See Definition
3.3.3 of [CLS]_). Let `\Sigma_1` be a fan in `N_{1,\RR}` and `\Sigma_2` be a
fan in `N_{2,\RR}`. A morphism `\phi: X_{\Sigma_1} \to X_{\Sigma_2}`
of the associated toric varieties is toric if `\phi` maps the maximal
torus `T_{N_1} \subseteq X_{\Sigma_1}` into `T_{N_2} \subseteq
X_{\Sigma_2}` and `\phi|_{T_N}` is a group homomorphism.
The data defining a toric morphism is precisely what defines a fan
morphism (see :mod:`~sage.geometry.fan_morphism`), extending the more
familiar dictionary between toric varieties and fans. Toric geometry
is a functor from the category of fans and fan morphisms to the
category of toric varieties and toric morphisms.
.. note::
Do not create the toric morphisms (or any morphism of schemes)
directly from the ``SchemeMorphism...`` classes. Instead, use the
:meth:`~sage.schemes.generic.scheme.hom` method common to all
algebraic schemes to create new homomorphisms.
EXAMPLES:
First, consider the following embedding of `\mathbb{P}^1` into
`\mathbb{P}^2` ::
sage: P2.<x,y,z> = toric_varieties.P2()
sage: P1.<u,v> = toric_varieties.P1()
sage: P1.hom([0,u^2+v^2,u*v], P2)
Scheme morphism:
From: 1-d CPR-Fano toric variety covered by 2 affine patches
To: 2-d CPR-Fano toric variety covered by 3 affine patches
Defn: Defined on coordinates by sending [u : v] to
[0 : u^2 + v^2 : u*v]
This is a well-defined morphism of algebraic varieties because
homogeneously rescaled coordinates of a point of `\mathbb{P}^1` map to the same
point in `\mathbb{P}^2` up to its homogeneous rescalings. It is not
equivariant with respect to the torus actions
.. MATH::
\CC^\times \times \mathbb{P}^1,
(\mu,[u:v]) \mapsto [u:\mu v]
\quad\text{and}\quad
\left(\CC^\times\right)^2 \times \mathbb{P}^2,
((\alpha,\beta),[x:y:z]) \mapsto [x:\alpha y:\beta z]
,
hence it is not a toric morphism. Clearly, the problem is that
the map in homogeneous coordinates contains summands that transform
differently under the torus action. However, this is not the only
difficulty. For example, consider ::
sage: phi = P1.hom([0,u,v], P2); phi
Scheme morphism:
From: 1-d CPR-Fano toric variety covered by 2 affine patches
To: 2-d CPR-Fano toric variety covered by 3 affine patches
Defn: Defined on coordinates by sending [u : v] to
[0 : u : v]
This map is actually the embedding of the
:meth:`~sage.schemes.toric.variety.ToricVariety_field.orbit_closure`
associated to one of the rays of the fan of `\mathbb{P}^2`. Now the
morphism is equivariant with respect to **some** map `\CC^\times \to
(\CC^\times)^2` of the maximal tori of `\mathbb{P}^1` and
`\mathbb{P}^2`. But this map of the maximal tori cannot be the same as
``phi`` defined above. Indeed, the image of ``phi`` completely misses
the maximal torus `T_{\mathbb{P}^2} = \{ [x:y:z] | x\not=0, y\not=0,
z\not=0 \}` of `\mathbb{P}^2`.
Consider instead the following morphism of fans::
sage: fm = FanMorphism( matrix(ZZ,[[1,0]]), P1.fan(), P2.fan() ); fm
Fan morphism defined by the matrix
[1 0]
Domain fan: Rational polyhedral fan in 1-d lattice N
Codomain fan: Rational polyhedral fan in 2-d lattice N
which also defines a morphism of toric varieties::
sage: P1.hom(fm, P2)
Scheme morphism:
From: 1-d CPR-Fano toric variety covered by 2 affine patches
To: 2-d CPR-Fano toric variety covered by 3 affine patches
Defn: Defined by sending Rational polyhedral fan in 1-d lattice N
to Rational polyhedral fan in 2-d lattice N.
The fan morphism map is equivalent to the following polynomial map::
sage: _.as_polynomial_map()
Scheme morphism:
From: 1-d CPR-Fano toric variety covered by 2 affine patches
To: 2-d CPR-Fano toric variety covered by 3 affine patches
Defn: Defined on coordinates by sending [u : v] to
[u : v : v]
Finally, here is an example of a fan morphism that cannot be written
using homogeneous polynomials. Consider the blowup `O_{\mathbb{P}^1}(2)
\to \CC^2/\ZZ_2`. In terms of toric data, this blowup is::
sage: A2_Z2 = toric_varieties.A2_Z2()
sage: A2_Z2.fan().rays()
N(1, 0),
N(1, 2)
in 2-d lattice N
sage: O2_P1 = A2_Z2.resolve(new_rays=[(1,1)])
sage: blowup = O2_P1.hom(identity_matrix(2), A2_Z2)
sage: blowup.as_polynomial_map()
Traceback (most recent call last):
...
TypeError: The fan morphism cannot be written in homogeneous polynomials.
If we denote the homogeneous coordinates of `O_{\mathbb{P}^1}(2)` by
`x`, `t`, `y` corresponding to the rays `(1,2)`, `(1,1)`, and `(1,0)`
then the blow-up map is [BB]_:
.. MATH::
f: O_{\mathbb{P}^1}(2) \to \CC^2/\ZZ_2, \quad
(x,t,y) \mapsto \left( x\sqrt{t}, y\sqrt{t} \right)
which requires square roots.
Fibrations
----------
If a toric morphism is :meth:`dominant
<SchemeMorphism_fan_toric_variety.is_dominant>`, then all fibers over
a fixed torus orbit in the base are isomorphic. Hence, studying the
fibers is again a combinatorial question and Sage implements
additional methods to study such fibrations that are not available
otherwise (however, note that you can always
:meth:`~SchemeMorphism_fan_toric_variety.factor` to pick out the part
that is dominant over the image or its closure).
For example, consider the blow-up restricted to one of the two
coordinate charts of $O_{\mathbb{P}^1}(2)$ ::
sage: O2_P1_chart = ToricVariety(Fan([O2_P1.fan().generating_cones()[0]]))
sage: single_chart = O2_P1_chart.hom(identity_matrix(2), A2_Z2)
sage: single_chart.is_dominant()
True
sage: single_chart.is_surjective()
False
sage: fiber = single_chart.fiber_generic(); fiber
(0-d affine toric variety, 1)
sage: fiber[0].embedding_morphism().as_polynomial_map()
Scheme morphism:
From: 0-d affine toric variety
To: 2-d affine toric variety
Defn: Defined on coordinates by sending [] to
[1 : 1]
The fibers are labeled by torus orbits in the base, that is, cones of
the codomain fan. In this case, the fibers over lower-dimensional
torus orbits are::
sage: A2_Z2_cones = flatten(A2_Z2.fan().cones())
sage: table([('cone', 'dim')] +
....: [(cone.ambient_ray_indices(), single_chart.fiber_dimension(cone))
....: for cone in A2_Z2_cones], header_row=True)
cone dim
+--------+-----+
() 0
(0,) 0
(1,) -1
(0, 1) 1
Lets look closer at the one-dimensional fiber. Although not the case
in this example, connected components of fibers over higher-dimensional cones
(corresponding
to lower-dimensional torus orbits) of the base are often not
irreducible. The irreducible components are labeled by the
:meth:`~sage.geometry.fan_morphism.FanMorphism.primitive_preimage_cones`,
which are certain cones of the domain fan that map to the cone in the
base that defines the torus orbit::
sage: table([('base cone', 'primitive preimage cones')] +
....: [(cone.ambient_ray_indices(),
....: single_chart.fan_morphism().primitive_preimage_cones(cone))
....: for cone in A2_Z2_cones], header_row=True)
base cone primitive preimage cones
+-----------+---------------------------------------------------------+
() (0-d cone of Rational polyhedral fan in 2-d lattice N,)
(0,) (1-d cone of Rational polyhedral fan in 2-d lattice N,)
(1,) ()
(0, 1) (1-d cone of Rational polyhedral fan in 2-d lattice N,)
The fiber over the trivial cone is the generic fiber that we have
already encountered. The interesting fiber is the one over the
2-dimensional cone, which represents the exceptional set of the
blow-up in this single coordinate chart. Lets investigate further::
sage: exceptional_cones = single_chart.fan_morphism().primitive_preimage_cones(A2_Z2.fan(2)[0])
sage: exceptional_set = single_chart.fiber_component(exceptional_cones[0])
sage: exceptional_set
1-d affine toric variety
sage: exceptional_set.embedding_morphism().as_polynomial_map()
Scheme morphism:
From: 1-d affine toric variety
To: 2-d affine toric variety
Defn: Defined on coordinates by sending [z0] to
[z0 : 0]
So we see that the fiber over this point is an affine line. Together
with another affine line in the other coordinate patch, this covers
the exceptional $\mathbb{P}^1$ of the blowup $O_{\mathbb{P}^1}(2) \to
\CC^2/\ZZ_2$.
Here is an example with higher dimensional varieties involved::
sage: A3 = toric_varieties.A(3)
sage: P3 = toric_varieties.P(3)
sage: m = matrix([(2,0,0), (1,1,0), (3, 1, 0)])
sage: phi = A3.hom(m, P3)
sage: phi.as_polynomial_map()
Scheme morphism:
From: 3-d affine toric variety
To: 3-d CPR-Fano toric variety covered by 4 affine patches
Defn: Defined on coordinates by sending [z0 : z1 : z2] to
[z0^2*z1*z2^3 : z1*z2 : 1 : 1]
sage: phi.fiber_generic()
Traceback (most recent call last):
...
AttributeError: 'SchemeMorphism_fan_toric_variety' object
has no attribute 'fiber_generic'
Let's use factorization mentioned above::
sage: phi_i, phi_b, phi_s = phi.factor()
It is possible to study fibers of the last two morphisms or their composition::
sage: phi_d = phi_b * phi_s
sage: phi_d
Scheme morphism:
From: 3-d affine toric variety
To: 2-d toric variety covered by 3 affine patches
Defn: Defined by sending Rational polyhedral fan in 3-d lattice N to
Rational polyhedral fan in Sublattice <N(1, 0, 0), N(0, 1, 0)>.
sage: phi_d.as_polynomial_map()
Scheme morphism:
From: 3-d affine toric variety
To: 2-d toric variety covered by 3 affine patches
Defn: Defined on coordinates by sending [z0 : z1 : z2] to
[z0^2*z1*z2^3 : z1*z2 : 1]
sage: phi_d.codomain().fan().rays()
N( 1, 0, 0),
N( 0, 1, 0),
N(-1, -1, 0)
in Sublattice <N(1, 0, 0), N(0, 1, 0)>
sage: for c in phi_d.codomain().fan():
....: c.ambient_ray_indices()
(1, 2)
(0, 2)
(0, 1)
We see that codomain fan of this morphism is a projective plane, which can be
verified by ::
sage: phi_d.codomain().fan().is_isomorphic(toric_varieties.P2().fan()) # known bug
True
(Unfortunately it cannot be verified correctly until :trac:`16012` is fixed.)
We now have access to fiber methods::
sage: fiber = phi_d.fiber_generic()
sage: fiber
(1-d affine toric variety, 2)
sage: fiber[0].embedding_morphism()
Scheme morphism:
From: 1-d affine toric variety
To: 3-d affine toric variety
Defn: Defined by sending
Rational polyhedral fan in Sublattice <N(1, 1, -1)> to
Rational polyhedral fan in 3-d lattice N.
sage: fiber[0].embedding_morphism().as_polynomial_map()
Traceback (most recent call last):
...
NotImplementedError: polynomial representations for
fans with virtual rays are not implemented yet
sage: fiber[0].fan().rays()
Empty collection
in Sublattice <N(1, 1, -1)>
We see that generic fibers of this morphism consist of 2 one-dimensional tori
each. To see what happens over boundary points we can look at fiber components
corresponding to the cones of the domain fan::
sage: fm = phi_d.fan_morphism()
sage: for c in flatten(phi_d.domain().fan().cones()):
....: fc, m = phi_d.fiber_component(c, multiplicity=True)
....: print("{} |-> {} ({} rays, multiplicity {}) over {}".format(
....: c.ambient_ray_indices(), fc, fc.fan().nrays(),
....: m, fm.image_cone(c).ambient_ray_indices()))
() |-> 1-d affine toric variety (0 rays, multiplicity 2) over ()
(0,) |-> 1-d affine toric variety (0 rays, multiplicity 1) over (0,)
(1,) |-> 2-d affine toric variety (2 rays, multiplicity 1) over (0, 1)
(2,) |-> 2-d affine toric variety (2 rays, multiplicity 1) over (0, 1)
(0, 1) |-> 1-d affine toric variety (1 rays, multiplicity 1) over (0, 1)
(1, 2) |-> 1-d affine toric variety (1 rays, multiplicity 1) over (0, 1)
(0, 2) |-> 1-d affine toric variety (1 rays, multiplicity 1) over (0, 1)
(0, 1, 2) |-> 0-d affine toric variety (0 rays, multiplicity 1) over (0, 1)
Now we see that over one of the coordinate lines of the projective plane we also
have one-dimensional tori (but only one in each fiber), while over one of the
points fixed by torus action we have two affine planes intersecting along an
affine line. An alternative perspective is provided by cones of the codomain
fan::
sage: for c in flatten(phi_d.codomain().fan().cones()):
....: print("{} connected components over {}, each with {} irreducible components.".format(
....: fm.index(c), c.ambient_ray_indices(),
....: len(fm.primitive_preimage_cones(c))))
2 connected components over (), each with 1 irreducible components.
1 connected components over (0,), each with 1 irreducible components.
None connected components over (1,), each with 0 irreducible components.
None connected components over (2,), each with 0 irreducible components.
None connected components over (1, 2), each with 0 irreducible components.
None connected components over (0, 2), each with 0 irreducible components.
1 connected components over (0, 1), each with 2 irreducible components.
REFERENCES:
.. [BB]
Gavin Brown, Jaroslaw Buczynski:
Maps of toric varieties in Cox coordinates,
http://arxiv.org/abs/1004.4924
"""
#*****************************************************************************
# Copyright (C) 2011 Volker Braun <[email protected]>
# Copyright (C) 2010 Andrey Novoseltsev <[email protected]>
# Copyright (C) 2006 William Stein <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from __future__ import print_function
from six import iteritems
# For now, the scheme morphism base class cannot derive from Morphism
# since this would clash with elliptic curves. So we derive only on
# the toric varieties level from Morphism. See
# https://groups.google.com/d/msg/sage-devel/qF4yU6Vdmao/wQlNrneSmWAJ
from sage.categories.morphism import Morphism
from sage.structure.richcmp import richcmp_not_equal, richcmp
from sage.structure.sequence import Sequence
from sage.rings.all import ZZ
from sage.arith.all import gcd
from sage.misc.all import cached_method
from sage.matrix.constructor import matrix, identity_matrix
from sage.modules.free_module_element import vector
from sage.geometry.all import Cone, Fan
from sage.schemes.generic.scheme import is_Scheme
from sage.schemes.generic.morphism import (
is_SchemeMorphism,
SchemeMorphism, SchemeMorphism_point, SchemeMorphism_polynomial
)
############################################################################
# A points on a toric variety determined by homogeneous coordinates.
class SchemeMorphism_point_toric_field(SchemeMorphism_point, Morphism):
"""
A point of a toric variety determined by homogeneous coordinates
in a field.
.. WARNING::
You should not create objects of this class directly. Use the
:meth:`~sage.schemes.generic.scheme.hom` method of
:class:`toric varieties
<sage.schemes.toric.variety.ToricVariety_field>`
instead.
INPUT:
- ``X`` -- toric variety or subscheme of a toric variety.
- ``coordinates`` -- list of coordinates in the base field of ``X``.
- ``check`` -- if ``True`` (default), the input will be checked for
correctness.
OUTPUT:
A :class:`SchemeMorphism_point_toric_field`.
TESTS::
sage: P1xP1 = toric_varieties.P1xP1()
sage: P1xP1(1,2,3,4)
[1 : 2 : 3 : 4]
"""
# Mimicking affine/projective classes
def __init__(self, X, coordinates, check=True):
r"""
See :class:`SchemeMorphism_point_toric_field` for documentation.
TESTS::
sage: P1xP1 = toric_varieties.P1xP1()
sage: P1xP1(1,2,3,4)
[1 : 2 : 3 : 4]
"""
# Convert scheme to its set of points over the base ring
if is_Scheme(X):
X = X(X.base_ring())
super(SchemeMorphism_point_toric_field, self).__init__(X)
if check:
# Verify that there are the right number of coords
# Why is it not done in the parent?
if is_SchemeMorphism(coordinates):
coordinates = list(coordinates)
if not isinstance(coordinates, (list, tuple)):
raise TypeError("coordinates must be a scheme point, list, "
"or tuple. Got %s" % coordinates)
d = X.codomain().ambient_space().ngens()
if len(coordinates) != d:
raise ValueError("there must be %d coordinates! Got only %d: "
"%s" % (d, len(coordinates), coordinates))
# Make sure the coordinates all lie in the appropriate ring
coordinates = Sequence(coordinates, X.value_ring())
# Verify that the point satisfies the equations of X.
X.codomain()._check_satisfies_equations(coordinates)
self._coords = coordinates
############################################################################
# A morphism of toric varieties determined by homogeneous polynomials.
class SchemeMorphism_polynomial_toric_variety(SchemeMorphism_polynomial, Morphism):
"""
A morphism determined by homogeneous polynomials.
.. WARNING::
You should not create objects of this class directly. Use the
:meth:`~sage.schemes.generic.scheme.hom` method of
:class:`toric varieties
<sage.schemes.toric.variety.ToricVariety_field>`
instead.
INPUT:
Same as for
:class:`~sage.schemes.toric.morphism.SchemeMorphism_polynomial`.
OUTPUT:
A :class:`~sage.schemes.toric.morphism.SchemeMorphism_polynomial_toric_variety`.
TESTS::
sage: P1xP1 = toric_varieties.P1xP1()
sage: P1xP1.inject_variables()
Defining s, t, x, y
sage: P1 = P1xP1.subscheme(s-t)
sage: H = P1xP1.Hom(P1)
sage: import sage.schemes.toric.morphism as MOR
sage: MOR.SchemeMorphism_polynomial_toric_variety(H, [s, s, x, y])
Scheme morphism:
From: 2-d CPR-Fano toric variety covered by 4 affine patches
To: Closed subscheme of 2-d CPR-Fano toric variety
covered by 4 affine patches defined by:
s - t
Defn: Defined on coordinates by sending [s : t : x : y] to
[s : s : x : y]
"""
def __init__(self, parent, polynomials, check=True):
r"""
See :class:`SchemeMorphism_polynomial_toric_variety` for documentation.
TESTS::
sage: P1xP1 = toric_varieties.P1xP1()
sage: P1xP1.inject_variables()
Defining s, t, x, y
sage: P1 = P1xP1.subscheme(s-t)
sage: H = P1xP1.Hom(P1)
sage: import sage.schemes.toric.morphism as MOR
sage: MOR.SchemeMorphism_polynomial_toric_variety(H, [s, s, x, y])
Scheme morphism:
From: 2-d CPR-Fano toric variety covered by 4 affine patches
To: Closed subscheme of 2-d CPR-Fano toric variety
covered by 4 affine patches defined by:
s - t
Defn: Defined on coordinates by sending [s : t : x : y] to
[s : s : x : y]
"""
SchemeMorphism_polynomial.__init__(self, parent, polynomials, check)
if check:
# Check that defining polynomials are homogeneous (degrees can be
# different if the target uses weighted coordinates)
for p in self.defining_polynomials():
if not self.domain().ambient_space().is_homogeneous(p):
raise ValueError("%s is not homogeneous!" % p)
def as_fan_morphism(self):
"""
Express the morphism as a map defined by a fan morphism.
OUTPUT:
A :class:`SchemeMorphism_polynomial_toric_variety`. Raises a
``TypeError`` if the morphism cannot be written in such a way.
EXAMPLES::
sage: A1.<z> = toric_varieties.A1()
sage: P1 = toric_varieties.P1()
sage: patch = A1.hom([1,z], P1)
sage: patch.as_fan_morphism()
Traceback (most recent call last):
...
NotImplementedError: expressing toric morphisms as fan morphisms is
not implemented yet!
"""
raise NotImplementedError("expressing toric morphisms as fan "
"morphisms is not implemented yet!")
############################################################################
# The embedding morphism of an orbit closure
class SchemeMorphism_orbit_closure_toric_variety(SchemeMorphism, Morphism):
"""
The embedding of an orbit closure.
INPUT:
- ``parent`` -- the parent homset.
- ``defining_cone`` -- the defining cone.
- ``ray_map`` -- a dictionary ``{ambient ray generator: orbit ray
generator}``. Note that the image of the ambient ray generator
is not necessarily primitive.
.. WARNING::
You should not create objects of this class directly. Use the
:meth:`~sage.schemes.toric.variety.ToricVariety_field.orbit_closure`
method of :class:`toric varieties
<sage.schemes.toric.variety.ToricVariety_field>`
instead.
EXAMPLES::
sage: P1xP1 = toric_varieties.P1xP1()
sage: H = P1xP1.fan(1)[0]
sage: V = P1xP1.orbit_closure(H)
sage: V.embedding_morphism()
Scheme morphism:
From: 1-d toric variety covered by 2 affine patches
To: 2-d CPR-Fano toric variety covered by 4 affine patches
Defn: Defined by embedding the torus closure associated to the 1-d
cone of Rational polyhedral fan in 2-d lattice N.
TESTS::
sage: V.embedding_morphism()._reverse_ray_map()
{N(-1): 3, N(1): 2}
sage: V.embedding_morphism()._defining_cone
1-d cone of Rational polyhedral fan in 2-d lattice N
"""
def __init__(self, parent, defining_cone, ray_map):
"""
The Python constructor.
EXAMPLES::
sage: P2 = toric_varieties.P2()
sage: P1 = P2.orbit_closure(P2.fan(1)[0])
sage: P1.embedding_morphism()
Scheme morphism:
From: 1-d toric variety covered by 2 affine patches
To: 2-d CPR-Fano toric variety covered by 3 affine patches
Defn: Defined by embedding the torus closure associated to the 1-d cone
of Rational polyhedral fan in 2-d lattice N.
"""
SchemeMorphism.__init__(self, parent)
self._defining_cone = defining_cone
self._ray_map = ray_map
def defining_cone(self):
r"""
Return the cone corresponding to the torus orbit.
OUTPUT:
A cone of the fan of the ambient toric variety.
EXAMPLES::
sage: P2 = toric_varieties.P2()
sage: cone = P2.fan(1)[0]
sage: P1 = P2.orbit_closure(cone)
sage: P1.embedding_morphism().defining_cone()
1-d cone of Rational polyhedral fan in 2-d lattice N
sage: _ is cone
True
"""
return self._defining_cone
@cached_method
def _reverse_ray_map(self):
"""
Reverse ``self._ray_map``.
OUTPUT:
Return a dictionary `{orbit ray generator : preimage ray
index}`. Note that the orbit ray generator need not be
primitive. Also, the preimage ray is not necessarily unique.
EXAMPLES::
sage: P2_112 = toric_varieties.P2_112()
sage: P1 = P2_112.orbit_closure(Cone([(1,0)]))
sage: f = P1.embedding_morphism()
sage: f._ray_map
{N(-1, -2): (-2), N(0, 1): (1), N(1, 0): (0)}
sage: f._reverse_ray_map()
{N(-2): 2, N(1): 1}
"""
orbit = self.parent().domain()
codomain_fan = self.parent().codomain().fan()
reverse_ray_dict = dict()
for n1, n2 in iteritems(self._ray_map):
ray_index = codomain_fan.rays().index(n1)
if n2.is_zero():
assert ray_index in self._defining_cone.ambient_ray_indices()
continue
n2 = orbit.fan().lattice()(n2)
n2.set_immutable()
reverse_ray_dict[n2] = ray_index
return reverse_ray_dict
def _repr_defn(self):
"""
Return a string representation of the definition of ``self``.
OUTPUT:
String.
EXAMPLES::
sage: P2 = toric_varieties.P2()
sage: V = P2.orbit_closure(P2.fan(1)[0]); V
1-d toric variety covered by 2 affine patches
sage: V.embedding_morphism()._repr_defn()
'Defined by embedding the torus closure associated to the 1-d cone of
Rational polyhedral fan in 2-d lattice N.'
"""
s = 'Defined by embedding the torus closure associated to the '
s += str(self._defining_cone)
s += '.'
return s
def as_polynomial_map(self):
"""
Express the morphism via homogeneous polynomials.
OUTPUT:
A :class:`SchemeMorphism_polynomial_toric_variety`. Raises a
``TypeError`` if the morphism cannot be written in terms of
homogeneous polynomials.
The defining polynomials are not necessarily unique. There are
choices if multiple ambient space ray generators project to
the same orbit ray generator, and one such choice is made
implicitly. The orbit embedding can be written as a polynomial
map if and only if each primitive orbit ray generator is the
image of at least one primitive ray generator of the ambient
toric variety.
EXAMPLES::
sage: P2 = toric_varieties.P2()
sage: V = P2.orbit_closure(P2.fan(1)[0]); V
1-d toric variety covered by 2 affine patches
sage: V.embedding_morphism().as_polynomial_map()
Scheme morphism:
From: 1-d toric variety covered by 2 affine patches
To: 2-d CPR-Fano toric variety covered by 3 affine patches
Defn: Defined on coordinates by sending [z0 : z1] to
[0 : z1 : z0]
If the toric variety is singular, then some orbit closure
embeddings cannot be written with homogeneous polynomials::
sage: P2_112 = toric_varieties.P2_112()
sage: P1 = P2_112.orbit_closure(Cone([(1,0)]))
sage: P1.embedding_morphism().as_polynomial_map()
Traceback (most recent call last):
...
TypeError: The embedding cannot be written with homogeneous polynomials.
"""
orbit = self.domain()
codomain_fan = self.codomain().fan()
R = orbit.coordinate_ring()
polys = [ R.one() ] * codomain_fan.nrays()
for i in self._defining_cone.ambient_ray_indices():
polys[i] = R.zero()
ray_index_map = self._reverse_ray_map()
for i, ray in enumerate(orbit.fan().rays()):
try:
ray_index = ray_index_map[ray]
except KeyError:
raise TypeError('The embedding cannot be written with homogeneous polynomials.')
polys[ray_index] = R.gen(i)
return SchemeMorphism_polynomial_toric_variety(self.parent(), polys)
def pullback_divisor(self, divisor):
r"""
Pull back a toric divisor.
INPUT:
- ``divisor`` -- a torus-invariant QQ-Cartier divisor on the
codomain of the embedding map.
OUTPUT:
A divisor on the domain of the embedding map (the orbit
closure) that is isomorphic to the pull-back divisor `f^*(D)`
but with possibly different linearization.
EXAMPLES::
sage: P2 = toric_varieties.P2()
sage: P1 = P2.orbit_closure(P2.fan(1)[0])
sage: f = P1.embedding_morphism()
sage: D = P2.divisor([1,2,3]); D
V(x) + 2*V(y) + 3*V(z)
sage: f.pullback_divisor(D)
4*V(z0) + 2*V(z1)
"""
from sage.schemes.toric.divisor import is_ToricDivisor
if not (is_ToricDivisor(divisor) and divisor.is_QQ_Cartier()):
raise ValueError('The divisor must be torus-invariant and QQ-Cartier.')
m = divisor.m(self._defining_cone)
values = []
codomain_rays = self.codomain().fan().rays()
for ray in self.domain().fan().rays():
ray = codomain_rays[self._reverse_ray_map()[ray]]
value = divisor.function_value(ray) - m*ray
values.append(value)
return self.domain().divisor(values)
############################################################################
# A morphism of toric varieties determined by a fan morphism
class SchemeMorphism_fan_toric_variety(SchemeMorphism, Morphism):
"""
Construct a morphism determined by a fan morphism
.. WARNING::
You should not create objects of this class directly. Use the
:meth:`~sage.schemes.generic.scheme.hom` method of
:class:`toric varieties
<sage.schemes.toric.variety.ToricVariety_field>`
instead.
INPUT:
- ``parent`` -- Hom-set whose domain and codomain are toric varieties.
- ``fan_morphism`` -- A morphism of fans whose domain and codomain
fans equal the fans of the domain and codomain in the ``parent``
Hom-set.
- ``check`` -- boolean (optional, default:``True``). Whether to
check the input for consistency.
.. WARNING::
A fibration is a dominant morphism; if you are interested in
these then you have to make sure that your fan morphism is
dominant. For example, this can be achieved by
:meth:`factoring the morphism
<sage.schemes.toric.morphism.SchemeMorphism_fan_toric_variety.factor>`. See
:class:`SchemeMorphism_fan_toric_variety_dominant` for
additional functionality for fibrations.
OUTPUT:
A :class:`~sage.schemes.toric.morphism.SchemeMorphism_fan_toric_variety`.
EXAMPLES::
sage: P1xP1 = toric_varieties.P1xP1()
sage: P1 = toric_varieties.P1()
sage: f = P1.hom(matrix([[1,0]]), P1xP1); f
Scheme morphism:
From: 1-d CPR-Fano toric variety covered by 2 affine patches
To: 2-d CPR-Fano toric variety covered by 4 affine patches
Defn: Defined by sending Rational polyhedral fan in 1-d lattice N
to Rational polyhedral fan in 2-d lattice N.
sage: type(f)
<class 'sage.schemes.toric.morphism.SchemeMorphism_fan_toric_variety'>
Slightly more explicit construction::
sage: P1xP1 = toric_varieties.P1xP1()
sage: P1 = toric_varieties.P1()
sage: hom_set = P1xP1.Hom(P1)
sage: fm = FanMorphism( matrix(ZZ,[[1],[0]]), P1xP1.fan(), P1.fan() )
sage: hom_set(fm)
Scheme morphism:
From: 2-d CPR-Fano toric variety covered by 4 affine patches
To: 1-d CPR-Fano toric variety covered by 2 affine patches
Defn: Defined by sending Rational polyhedral fan in 2-d lattice N
to Rational polyhedral fan in 1-d lattice N.
sage: P1xP1.hom(fm, P1)
Scheme morphism:
From: 2-d CPR-Fano toric variety covered by 4 affine patches
To: 1-d CPR-Fano toric variety covered by 2 affine patches
Defn: Defined by sending Rational polyhedral fan in 2-d lattice N
to Rational polyhedral fan in 1-d lattice N.
"""
def __init__(self, parent, fan_morphism, check=True):
r"""
See :class:`SchemeMorphism_polynomial_toric_variety` for documentation.
TESTS::
sage: P1xP1 = toric_varieties.P1xP1()
sage: P1 = toric_varieties.P1()
sage: hom_set = P1xP1.Hom(P1)
sage: fan_morphism = FanMorphism( matrix(ZZ,[[1],[0]]), P1xP1.fan(), P1.fan() )
sage: from sage.schemes.toric.morphism import SchemeMorphism_fan_toric_variety
sage: SchemeMorphism_fan_toric_variety(hom_set, fan_morphism)
Scheme morphism:
From: 2-d CPR-Fano toric variety covered by 4 affine patches
To: 1-d CPR-Fano toric variety covered by 2 affine patches
Defn: Defined by sending Rational polyhedral fan in 2-d lattice N
to Rational polyhedral fan in 1-d lattice N.
"""
SchemeMorphism.__init__(self, parent)
if check and self.domain().fan()!=fan_morphism.domain_fan():
raise ValueError('The fan morphism domain must be the fan of the domain.')
if check and self.codomain().fan()!=fan_morphism.codomain_fan():
raise ValueError('The fan morphism codomain must be the fan of the codomain.')
self._fan_morphism = fan_morphism
def _richcmp_(self, right, op):
r"""
Compare ``self`` and ``right``.
INPUT:
- ``right`` -- another toric morphism
OUTPUT:
- boolean
Comparison is done first by domain, then by codomain, then by
fan morphism.
TESTS::
sage: A2 = toric_varieties.A2()
sage: P3 = toric_varieties.P(3)
sage: m = matrix([(2,0,0), (1,1,0)])
sage: phi = A2.hom(m, P3)
sage: phi == phi
True
sage: phi == prod(phi.factor())
True
sage: phi == phi.factor()[0]
False
"""
if not isinstance(right, SchemeMorphism_fan_toric_variety):
return NotImplemented
lx = self.domain()
rx = right.domain()
if lx != rx:
return richcmp_not_equal(lx, rx, op)
lx = self.codomain()
rx = right.codomain()
if lx != rx:
return richcmp_not_equal(lx, rx, op)
return richcmp(self.fan_morphism(), right.fan_morphism(), op)
def _composition_(self, right, homset):
"""
Return the composition of ``self`` and ``right``.
INPUT:
- ``right`` -- a toric morphism defined by a fan morphism.
OUTPUT:
- a toric morphism.
EXAMPLES::
sage: A2 = toric_varieties.A2()
sage: P3 = toric_varieties.P(3)
sage: m = matrix([(2,0,0), (1,1,0)])
sage: phi = A2.hom(m, P3)
sage: phi1, phi2, phi3 = phi.factor()
sage: phi1 * phi2
Scheme morphism:
From: 2-d affine toric variety
To: 3-d CPR-Fano toric variety covered by 4 affine patches
Defn: Defined by sending Rational polyhedral fan in Sublattice
<N(1, 0, 0), N(0, 1, 0)> to Rational polyhedral fan in 3-d lattice N.
sage: phi1 * phi2 * phi3 == phi
True
"""
f = self.fan_morphism() * right.fan_morphism()
return homset(f, self.codomain())
def _repr_defn(self):
"""
Return a string representation of the definition of ``self``.
OUTPUT:
String.
EXAMPLES::
sage: P1xP1 = toric_varieties.P1xP1()
sage: P1 = toric_varieties.P1()
sage: f = P1xP1.hom(matrix([[1],[0]]), P1)
sage: f._repr_defn()
'Defined by sending Rational polyhedral fan in 2-d lattice N to Rational polyhedral fan in 1-d lattice N.'
"""
s = 'Defined by sending '
s += str(self.domain().fan())
s += ' to '
s += str(self.codomain().fan())
s += '.'
return s
def factor(self):
r"""
Factor ``self`` into injective * birational * surjective morphisms.
OUTPUT:
- a triple of toric morphisms `(\phi_i, \phi_b, \phi_s)`, such that
`\phi_s` is surjective, `\phi_b` is birational, `\phi_i` is injective,
and ``self`` is equal to `\phi_i \circ \phi_b \circ \phi_s`.
The intermediate varieties are universal in the following sense. Let
``self`` map `X` to `X'` and let `X_s`, `X_i` sit in between, that is,
.. MATH::
X
\twoheadrightarrow
X_s
\to
X_i
\hookrightarrow
X'.
Then any toric morphism from `X` coinciding with ``self`` on the maximal
torus factors through `X_s` and any toric morphism into `X'` coinciding
with ``self`` on the maximal torus factors through `X_i`. In particular,
`X_i` is the closure of the image of ``self`` in `X'`.
See
:meth:`~sage.geometry.fan_morphism.FanMorphism.factor`
for a description of the toric algorithm.
EXAMPLES:
We map an affine plane into a projective 3-space in such a way, that it
becomes "a double cover of a chart of the blow up of one of the
coordinate planes"::
sage: A2 = toric_varieties.A2()
sage: P3 = toric_varieties.P(3)
sage: m = matrix([(2,0,0), (1,1,0)])
sage: phi = A2.hom(m, P3)
sage: phi.as_polynomial_map()
Scheme morphism:
From: 2-d affine toric variety
To: 3-d CPR-Fano toric variety covered by 4 affine patches
Defn: Defined on coordinates by sending [x : y] to
[x^2*y : y : 1 : 1]
sage: phi.is_surjective(), phi.is_birational(), phi.is_injective()
(False, False, False)
sage: phi_i, phi_b, phi_s = phi.factor()
sage: phi_s.is_surjective(), phi_b.is_birational(), phi_i.is_injective()
(True, True, True)
sage: prod(phi.factor()) == phi
True
Double cover (surjective)::
sage: phi_s.as_polynomial_map()
Scheme morphism:
From: 2-d affine toric variety
To: 2-d affine toric variety
Defn: Defined on coordinates by sending [x : y] to
[x^2 : y]
Blowup chart (birational)::
sage: phi_b.as_polynomial_map()
Scheme morphism:
From: 2-d affine toric variety
To: 2-d toric variety covered by 3 affine patches
Defn: Defined on coordinates by sending [z0 : z1] to
[z0*z1 : z1 : 1]
Coordinate plane inclusion (injective)::
sage: phi_i.as_polynomial_map()
Scheme morphism:
From: 2-d toric variety covered by 3 affine patches
To: 3-d CPR-Fano toric variety covered by 4 affine patches
Defn: Defined on coordinates by sending [z0 : z1 : z2] to
[z0 : z1 : z2 : z2]
"""
phi_i, phi_b, phi_s = self.fan_morphism().factor()
from sage.schemes.toric.all import ToricVariety
X = self.domain()
X_s = ToricVariety(phi_s.codomain_fan())
X_i = ToricVariety(phi_i.domain_fan())
X_prime = self.codomain()
return X_i.hom(phi_i, X_prime), X_s.hom(phi_b, X_i), X.hom(phi_s, X_s)
def fan_morphism(self):
"""
Return the defining fan morphism.
OUTPUT:
A :class:`~sage.geometry.fan_morphism.FanMorphism`.
EXAMPLES::
sage: P1xP1 = toric_varieties.P1xP1()
sage: P1 = toric_varieties.P1()
sage: f = P1xP1.hom(matrix([[1],[0]]), P1)
sage: f.fan_morphism()
Fan morphism defined by the matrix
[1]
[0]
Domain fan: Rational polyhedral fan in 2-d lattice N
Codomain fan: Rational polyhedral fan in 1-d lattice N
"""
return self._fan_morphism
def as_polynomial_map(self):
"""
Express the morphism via homogeneous polynomials.
OUTPUT:
A :class:`SchemeMorphism_polynomial_toric_variety`. Raises a
``TypeError`` if the morphism cannot be written in terms of
homogeneous polynomials.
EXAMPLES::
sage: A1 = toric_varieties.A1()
sage: square = A1.hom(matrix([[2]]), A1)
sage: square.as_polynomial_map()
Scheme endomorphism of 1-d affine toric variety
Defn: Defined on coordinates by sending [z] to
[z^2]
sage: P1 = toric_varieties.P1()
sage: patch = A1.hom(matrix([[1]]), P1)
sage: patch.as_polynomial_map()
Scheme morphism:
From: 1-d affine toric variety
To: 1-d CPR-Fano toric variety covered by 2 affine patches
Defn: Defined on coordinates by sending [z] to
[z : 1]
"""
R = self.domain().coordinate_ring()
phi = self.fan_morphism()
polys = [R.one()] * self.codomain().ngens()
for rho, x in zip(phi.domain_fan(1), R.gens()):
ray = rho.ray(0)
sigma = phi.image_cone(rho)
degrees = sigma.rays().matrix().solve_left(phi(ray))
for i, d in zip(sigma.ambient_ray_indices(), degrees):
try:
d = ZZ(d)
except TypeError:
raise TypeError('The fan morphism cannot be written in '
'homogeneous polynomials.')
polys[i] *= x**d
if phi.domain_fan().virtual_rays():
raise NotImplementedError("polynomial representations for fans "
"with virtual rays are not implemented yet")
return SchemeMorphism_polynomial_toric_variety(self.parent(), polys)
def is_bundle(self):
r"""
Check if ``self`` is a bundle.
See :meth:`~sage.geometry.fan_morphism.FanMorphism.is_bundle`
for fan morphisms for details.
OUTPUT:
- ``True`` if ``self`` is a bundle, ``False`` otherwise.
EXAMPLES::
sage: P1xP1 = toric_varieties.P1xP1()
sage: P1 = toric_varieties.P1()
sage: P1xP1.hom(matrix([[1],[0]]), P1).is_bundle()
True
"""
return self.fan_morphism().is_bundle()
def is_fibration(self):
r"""
Check if ``self`` is a fibration.
See
:meth:`~sage.geometry.fan_morphism.FanMorphism.is_fibration`
for fan morphisms for details.
OUTPUT:
- ``True`` if ``self`` is a fibration, ``False`` otherwise.
EXAMPLES::
sage: P1xP1 = toric_varieties.P1xP1()
sage: P1 = toric_varieties.P1()
sage: P1xP1.hom(matrix([[1],[0]]), P1).is_fibration()
True
"""
return self.fan_morphism().is_fibration()
def is_injective(self):
r"""
Check if ``self`` is injective.
See
:meth:`~sage.geometry.fan_morphism.FanMorphism.is_injective`
for fan morphisms for a description of the toric algorithm.
OUTPUT:
Boolean. Whether ``self`` is injective.
EXAMPLES::
sage: P1xP1 = toric_varieties.P1xP1()
sage: P1 = toric_varieties.P1()
sage: P1xP1.hom(matrix([[1],[0]]), P1).is_injective()
False
sage: X = toric_varieties.A(2)
sage: m = identity_matrix(2)
sage: f = X.hom(m, X)
sage: f.is_injective()
True
sage: Y = ToricVariety(Fan([Cone([(1,0), (1,1)])]))
sage: f = Y.hom(m, X)
sage: f.is_injective()
False
"""
return self.fan_morphism().is_injective()
def is_surjective(self):
r"""
Check if ``self`` is surjective.
See
:meth:`~sage.geometry.fan_morphism.FanMorphism.is_surjective`
for fan morphisms for a description of the toric algorithm.
OUTPUT:
Boolean. Whether ``self`` is surjective.
EXAMPLES::
sage: P1xP1 = toric_varieties.P1xP1()
sage: P1 = toric_varieties.P1()
sage: P1xP1.hom(matrix([[1],[0]]), P1).is_surjective()
True
sage: X = toric_varieties.A(2)
sage: m = identity_matrix(2)
sage: f = X.hom(m, X)
sage: f.is_surjective()
True
sage: Y = ToricVariety(Fan([Cone([(1,0), (1,1)])]))
sage: f = Y.hom(m, X)
sage: f.is_surjective()
False
"""
return self.fan_morphism().is_surjective()
def is_birational(self):
r"""
Check if ``self`` is birational.
See
:meth:`~sage.geometry.fan_morphism.FanMorphism.is_birational`
for fan morphisms for a description of the toric algorithm.
OUTPUT:
Boolean. Whether ``self`` is birational.
EXAMPLES::
sage: dP8 = toric_varieties.dP8()
sage: P2 = toric_varieties.P2()
sage: dP8.hom(identity_matrix(2), P2).is_birational()
True
sage: X = toric_varieties.A(2)
sage: Y = ToricVariety(Fan([Cone([(1,0), (1,1)])]))
sage: m = identity_matrix(2)
sage: f = Y.hom(m, X)
sage: f.is_birational()
True
"""
return self.fan_morphism().is_birational()
def is_dominant(self):
r"""
Return whether ``self`` is dominant.
See
:meth:`~sage.geometry.fan_morphism.FanMorphism.is_dominant`
for fan morphisms for a description of the toric algorithm.
OUTPUT:
Boolean. Whether ``self`` is a dominant scheme morphism.
EXAMPLES::
sage: P1 = toric_varieties.P1()
sage: A1 = toric_varieties.A1()
sage: phi = A1.hom(identity_matrix(1), P1); phi
Scheme morphism:
From: 1-d affine toric variety
To: 1-d CPR-Fano toric variety covered by 2 affine patches
Defn: Defined by sending Rational polyhedral fan in 1-d lattice N
to Rational polyhedral fan in 1-d lattice N.
sage: phi.is_dominant()
True
sage: phi.is_surjective()
False
"""
return self.fan_morphism().is_dominant()
def pullback_divisor(self, divisor):
r"""
Pull back a toric divisor.
INPUT:
- ``divisor`` -- a torus-invariant QQ-Cartier divisor on the
codomain of ``self``.
OUTPUT:
The pull-back divisor `f^*(D)`.
EXAMPLES::
sage: A2_Z2 = toric_varieties.A2_Z2()
sage: A2 = toric_varieties.A2()
sage: f = A2.hom( matrix([[1,0],[1,2]]), A2_Z2)
sage: f.pullback_divisor(A2_Z2.divisor(0))
V(x)
sage: A1 = toric_varieties.A1()
sage: square = A1.hom(matrix([[2]]), A1)
sage: D = A1.divisor(0); D
V(z)
sage: square.pullback_divisor(D)
2*V(z)
"""
from sage.schemes.toric.divisor import is_ToricDivisor
if not (is_ToricDivisor(divisor) and divisor.is_QQ_Cartier()):
raise ValueError('The divisor must be torus-invariant and QQ-Cartier.')
fm = self.fan_morphism()
values = []
for ray in self.domain().fan().rays():
value = divisor.function_value(fm(ray))
values.append(value)
return self.domain().divisor(values)
############################################################################
# A morphism of toric varieties determined by a dominant fan morphism
class SchemeMorphism_fan_toric_variety_dominant(SchemeMorphism_fan_toric_variety):
"""
Construct a morphism determined by a dominant fan morphism.
A dominant morphism is one that is surjective onto a dense
subset. In the context of toric morphisms, this means that it is
onto the big torus orbit.
.. WARNING::
You should not create objects of this class directly. Use the
:meth:`~sage.schemes.generic.scheme.hom` method of
:class:`toric varieties
<sage.schemes.toric.variety.ToricVariety_field>`
instead.
INPUT:
See :class:`SchemeMorphism_fan_toric_variety`. The given fan
morphism :meth:`must be dominant
<sage.geometry.fan_morphism.FanMorphism.is_dominant>`.
OUTPUT:
A :class:`~sage.schemes.toric.morphism.SchemeMorphism_fan_toric_variety_dominant`.
EXAMPLES::
sage: P2 = toric_varieties.P2()
sage: dP8 = toric_varieties.dP8()
sage: f = dP8.hom(identity_matrix(2), P2); f
Scheme morphism:
From: 2-d CPR-Fano toric variety covered by 4 affine patches
To: 2-d CPR-Fano toric variety covered by 3 affine patches
Defn: Defined by sending Rational polyhedral fan in 2-d lattice N
to Rational polyhedral fan in 2-d lattice N.
sage: type(f)
<class 'sage.schemes.toric.morphism.SchemeMorphism_fan_toric_variety_dominant'>
"""
@cached_method
def fiber_generic(self):
"""
Return the generic fiber.
OUTPUT:
- a tuple `(X, n)`, where `X` is a :class:`toric variety
<sage.schemes.toric.variety.ToricVariety_field>` with the
embedding morphism into domain of ``self`` and `n` is an integer.
The fiber over the base point with homogeneous coordinates
`[1:1:\cdots:1]` consists of `n` disjoint toric varieties isomorphic to
`X`. Note that fibers of a dominant toric morphism are isomorphic over
all points of a fixed torus orbit of its codomain, in particular over
all points of the maximal torus, so it makes sense to talk about "the
generic" fiber.
The embedding of `X` is a toric morphism with
the :meth:`~sage.geometry.fan_morphism.FanMorphism.domain_fan`
being the
:meth:`~sage.geometry.fan_morphism.FanMorphism.kernel_fan` of
the defining fan morphism. By contrast, embeddings of fiber components
over lower-dimensional torus orbits of the image are not toric
morphisms. Use :meth:`fiber_component` for the latter
(non-generic) fibers.
EXAMPLES::
sage: P1xP1 = toric_varieties.P1xP1()
sage: P1 = toric_varieties.P1()
sage: fiber = P1xP1.hom(matrix([[1],[0]]), P1).fiber_generic()
sage: fiber
(1-d toric variety covered by 2 affine patches, 1)
sage: f = fiber[0].embedding_morphism(); f
Scheme morphism:
From: 1-d toric variety covered by 2 affine patches
To: 2-d CPR-Fano toric variety covered by 4 affine patches
Defn: Defined by sending Rational polyhedral fan in Sublattice <N(0, 1)> to
Rational polyhedral fan in 2-d lattice N.
sage: f.as_polynomial_map()
Scheme morphism:
From: 1-d toric variety covered by 2 affine patches
To: 2-d CPR-Fano toric variety covered by 4 affine patches
Defn: Defined on coordinates by sending [z0 : z1] to
[1 : 1 : z0 : z1]
sage: A1 = toric_varieties.A1()
sage: fan = Fan([(0,1,2)], [(1,1,0),(1,0,1),(1,-1,-1)])
sage: fan = fan.subdivide(new_rays=[(1,0,0)])
sage: f = ToricVariety(fan).hom(matrix([[1],[0],[0]]), A1)
sage: f.fiber_generic()
(2-d affine toric variety, 1)
sage: _[0].fan().generating_cones()
(0-d cone of Rational polyhedral fan in Sublattice <N(0, 1, 0), N(0, 0, 1)>,)
"""
from sage.schemes.toric.variety import ToricVariety
fm = self.fan_morphism()
X = ToricVariety(fm.kernel_fan())
m = X.fan().lattice().echelonized_basis_matrix()
N = fm.domain() # May be a sublattice as well
m *= N.basis_matrix().solve_right(identity_matrix(N.dimension()))
X._embedding_morphism = X.hom(m, self.domain())
return X, fm.index()
def fiber_component(self, domain_cone, multiplicity=False):
r"""
Return a fiber component corresponding to ``domain_cone``.
INPUT:
- ``domain_cone`` -- a cone of the domain fan of ``self``.
- ``multiplicity`` (default: ``False``) -- whether to return the number
of fiber components corresponding to ``domain_cone`` as well.
OUTPUT:
- either `X` or a tuple `(X, n)`, where `X` is a :class:`toric variety
<sage.schemes.toric.variety.ToricVariety_field>` with the
embedding morphism into domain of ``self`` and `n` is an integer.
Let `\phi: \Sigma \to \Sigma'` be the :class:`fan morphism
<sage.geometry.fan_morphism.FanMorphism>` corresponding to
``self``. Let `\sigma \in \Sigma` and `\sigma' \in \Sigma'` be
the :meth:`~sage.geometry.fan_morphism.FanMorphism.image_cone`
of `\sigma`. The fiber over any point of the torus orbit corresponding
to `\sigma'` consists of `n` isomorphic connected components with each
component being a union of toric varieties intersecting along
their torus invariant subvarieties. The latter correspond to
:meth:`~sage.geometry.fan_morphism.FanMorphism.preimage_cones` of
`\sigma'` and `X` is one of the `n` components corresponding to
`\sigma`. The irreducible components correspond to
:meth:`~sage.geometry.fan_morphism.FanMorphism.primitive_preimage_cones`.
EXAMPLES::
sage: polytope = LatticePolytope(
....: [(-3,0,-1,-1),(-1,2,-1,-1),(0,-1,0,0),(0,0,0,1),(0,0,1,0),
....: (0,1,0,0),(0,2,-1,-1),(1,0,0,0),(2,0,-1,-1)])
sage: coarse_fan = FaceFan(polytope)
sage: P2 = toric_varieties.P2()
sage: proj24 = matrix([[0,0],[1,0],[0,0],[0,1]])
sage: fm = FanMorphism(proj24, coarse_fan, P2.fan(), subdivide=True)
sage: fibration = ToricVariety(fm.domain_fan()).hom(fm, P2)
sage: primitive_cones = fibration.fan_morphism().primitive_preimage_cones(P2.fan(1)[0])
sage: primitive_cone = primitive_cones[0]
sage: fibration.fiber_component(primitive_cone)
2-d toric variety covered by 4 affine patches
sage: fibration.fiber_component(primitive_cone, True)
(2-d toric variety covered by 4 affine patches, 1)
sage: for primitive_cone in primitive_cones:
....: print(fibration.fiber_component(primitive_cone))
2-d toric variety covered by 4 affine patches
2-d toric variety covered by 3 affine patches
2-d toric variety covered by 3 affine patches
"""
domain_cone = self.domain().fan().embed(domain_cone)
if domain_cone.is_trivial():
if multiplicity:
return self.fiber_generic()
else:
return self.fiber_generic()[0]
embedding = SchemeMorphism_fan_fiber_component_toric_variety(self, domain_cone)
if multiplicity:
return embedding.domain(), \
self.fan_morphism().index(embedding.base_cone())
else:
return embedding.domain()
@cached_method
def fiber_dimension(self, codomain_cone):
r"""
Return the dimension of the fiber over a particular torus
orbit in the base.
INPUT:
- ``codomain_cone`` -- a cone `\sigma` of the codomain,
specifying a torus orbit `O(\sigma)`.
OUTPUT:
An integer. The dimension of the fiber over the torus orbit
corresponding to ``codomain_cone``. If the fiber is the empty
set, ``-1`` is returned. Note that all fibers over this torus
orbit are isomorphic, and therefore have the same dimension.
EXAMPLES::
sage: P1xP1 = toric_varieties.P1xP1()
sage: P1 = toric_varieties.P1()
sage: f = P1xP1.hom(matrix([[1],[0]]), P1)
sage: f.fiber_dimension(P1.fan(0)[0])
1
sage: f.fiber_dimension(P1.fan(1)[0])
1
sage: f.fiber_dimension(P1.fan(1)[1])
1
Here is a more complicated example that is not a flat fibration::
sage: A2_Z2 = toric_varieties.A2_Z2()
sage: O2_P1 = A2_Z2.resolve(new_rays=[(1,1)])
sage: blowup = O2_P1.hom(identity_matrix(2), A2_Z2)
sage: blowup.fiber_dimension(A2_Z2.fan(0)[0])
0
sage: blowup.fiber_dimension(A2_Z2.fan(1)[0])
0
sage: blowup.fiber_dimension(A2_Z2.fan(2)[0])
1
This corresponds to the three different fibers::
sage: blowup.fiber_generic()
(0-d affine toric variety, 1)
sage: blowup.fiber_component(Cone([(1,0)]))
0-d affine toric variety
sage: blowup.fiber_component(Cone([(1,1)]))
1-d toric variety covered by 2 affine patches
"""
dim = []
fm = self.fan_morphism()
base_dim = codomain_cone.dim()
for c in fm.primitive_preimage_cones(codomain_cone):
dim.append(base_dim - c.dim())
if dim:
return max(dim) + self.domain().dimension() - self.codomain().dimension()
else:
return ZZ(-1)
def fiber_graph(self, codomain_cone):
r"""
Return the fiber over a given torus orbit in the codomain.
INPUT:
- ``codomain_cone`` -- a cone `\sigma` of the codomain,
specifying a torus orbit `O(\sigma)`.
OUTPUT:
A graph whose nodes are the irreducible components of a connected
component of the fiber over a point of `O(\sigma)`. If two irreducible
components intersect, the
corresponding nodes of the graph are joined by an edge. Note that
irreducible components do not have to be of the same dimension.
.. SEEALSO::
:meth:`~SchemeMorphism_fan_toric_variety_dominant.fiber_component`.
EXAMPLES::
sage: polytope = Polyhedron(
....: [(-3,0,-1,-1),(-1,2,-1,-1),(0,-1,0,0),(0,0,0,1),(0,0,1,0),
....: (0,1,0,0),(0,2,-1,-1),(1,0,0,0),(2,0,-1,-1)])
sage: coarse_fan = FaceFan(polytope, lattice=ToricLattice(4))
sage: P2 = toric_varieties.P2()
sage: proj34 = block_matrix(2,1,[zero_matrix(2,2), identity_matrix(2)])
sage: fm = FanMorphism(proj34, coarse_fan, P2.fan(), subdivide=True)
sage: fibration = ToricVariety(fm.domain_fan()).hom(fm, P2)
sage: fibration.fiber_graph( P2.fan(0)[0] )
Graph on 1 vertex
sage: for c1 in P2.fan(1):
....: fibration.fiber_graph(c1)
Graph on 1 vertex
Graph on 1 vertex
Graph on 4 vertices
sage: fibration.fiber_graph(P2.fan(1)[2]).get_vertices()
{0: 2-d toric variety covered by 4 affine patches,
1: 2-d toric variety covered by 3 affine patches,
2: 2-d toric variety covered by 3 affine patches,
3: 2-d toric variety covered by 4 affine patches}
sage: fibration
Scheme morphism:
From: 4-d toric variety covered by 18 affine patches
To: 2-d CPR-Fano toric variety covered by 3 affine patches
Defn: Defined by sending Rational polyhedral fan in 4-d lattice N
to Rational polyhedral fan in 2-d lattice N.
"""
fm = self.fan_morphism()
prim = fm.primitive_preimage_cones(codomain_cone)
n = len(prim)
def is_union_in_fan(self, c0, c1):
indices = c0.ambient_ray_indices() + c1.ambient_ray_indices()
try:
fm.domain_fan().cone_containing(*indices)
return True
except ValueError:
return False
m = matrix(ZZ, n, n, lambda i,j:is_union_in_fan(self,prim[i], prim[j]))
for i in range(n):
m[i, i] = 0
from sage.graphs.graph import Graph
graph = Graph(m, loops=False, multiedges=False)
for i in range(n):
graph.set_vertex(i, self.fiber_component(prim[i]))
return graph
############################################################################
# The embedding morphism of a fiber component
class SchemeMorphism_fan_fiber_component_toric_variety(SchemeMorphism):
"""
The embedding of a fiber component of a toric morphism.
Note that the embedding map of a fiber component of a toric morphism is
itself not a toric morphism!
INPUT:
- ``toric_morphism`` -- a toric morphism. The toric morphism whose
fiber component we are describing.
- ``defining_cone`` -- a cone of the fan of the domain of
``toric_morphism``. See
:meth:`~SchemeMorphism_fan_toric_variety_dominant.fiber_component` for
details.
EXAMPLES::
sage: polytope = Polyhedron(
....: [(-3,0,-1,-1),(-1,2,-1,-1),(0,-1,0,0),(0,0,0,1),(0,0,1,0),
....: (0,1,0,0),(0,2,-1,-1),(1,0,0,0),(2,0,-1,-1)])
sage: coarse_fan = FaceFan(polytope, lattice=ToricLattice(4))
sage: P2 = toric_varieties.P2()
sage: proj24 = matrix([[0,0],[1,0],[0,0],[0,1]])
sage: fm = FanMorphism(proj24, coarse_fan, P2.fan(), subdivide=True)
sage: fibration = ToricVariety(fm.domain_fan()).hom(fm, P2)
sage: primitive_cones = fibration.fan_morphism().primitive_preimage_cones(P2.fan(1)[0])
sage: primitive_cone = primitive_cones[0]
sage: fiber_component = fibration.fiber_component(primitive_cone)
sage: fiber_component
2-d toric variety covered by 4 affine patches
sage: fiber_component.embedding_morphism()
Scheme morphism:
From: 2-d toric variety covered by 4 affine patches
To: 4-d toric variety covered by 23 affine patches
Defn: Defined by embedding a fiber component corresponding to
1-d cone of Rational polyhedral fan in 4-d lattice N.
sage: fiber_component.embedding_morphism().as_polynomial_map()
Scheme morphism:
From: 2-d toric variety covered by 4 affine patches
To: 4-d toric variety covered by 23 affine patches
Defn: Defined on coordinates by sending [z0 : z1 : z2 : z3] to
[1 : 1 : 1 : 1 : z1 : 0 : 1 : z0 : 1 : 1 : 1 : z2 : z3 : 1 : 1]
sage: type(fiber_component.embedding_morphism())
<class 'sage.schemes.toric.morphism.SchemeMorphism_fan_fiber_component_toric_variety'>
"""
def __init__(self, toric_morphism, defining_cone):
"""
The Python constructor.
TESTS::
sage: polytope = Polyhedron(
....: [(-3,0,-1,-1),(-1,2,-1,-1),(0,-1,0,0),(0,0,0,1),(0,0,1,0),
....: (0,1,0,0),(0,2,-1,-1),(1,0,0,0),(2,0,-1,-1)])
sage: coarse_fan = FaceFan(polytope, lattice=ToricLattice(4))
sage: P2 = toric_varieties.P2()
sage: proj24 = matrix([[0,0],[1,0],[0,0],[0,1]])
sage: fm = FanMorphism(proj24, coarse_fan, P2.fan(), subdivide=True)
sage: fibration = ToricVariety(fm.domain_fan()).hom(fm, P2)
sage: primitive_cone = Cone([(-1, 2, -1, 0)])
sage: fibration.fiber_component(primitive_cone).embedding_morphism()
Scheme morphism:
From: 2-d toric variety covered by 3 affine patches
To: 4-d toric variety covered by 23 affine patches
Defn: Defined by embedding a fiber component corresponding to
1-d cone of Rational polyhedral fan in 4-d lattice N.
"""
fm = toric_morphism.fan_morphism()
self._fan_morphism = fm
defining_cone = fm.domain_fan().embed(defining_cone)
self._defining_cone = defining_cone
self._base_cone = fm.image_cone(defining_cone)
fc = self._make_fiber_component()
fc._embedding_morphism = self
parent = fc.Hom(toric_morphism.domain())
SchemeMorphism.__init__(self, parent)
def _repr_defn(self):
"""
Return a string representation of the definition of ``self``.
OUTPUT:
String.
EXAMPLES::
sage: P1xP1 = toric_varieties.P1xP1()
sage: P1 = toric_varieties.P1()
sage: fc = P1xP1.hom(matrix([[1],[0]]), P1).fiber_component(Cone([(1,0)]))
sage: fc.embedding_morphism()._repr_defn()
'Defined by embedding a fiber component corresponding to 1-d cone of Rational polyhedral fan in 2-d lattice N.'
"""
return 'Defined by embedding a fiber component corresponding to {}.'.format(self.defining_cone())
def as_polynomial_map(self):
"""
Express the embedding morphism via homogeneous polynomials.
OUTPUT:
A :class:`SchemeMorphism_polynomial_toric_variety`. Raises a
``ValueError`` if the morphism cannot be written in terms of
homogeneous polynomials.
EXAMPLES::
sage: polytope = Polyhedron(
....: [(-3,0,-1,-1),(-1,2,-1,-1),(0,-1,0,0),(0,0,0,1),(0,0,1,0),
....: (0,1,0,0),(0,2,-1,-1),(1,0,0,0),(2,0,-1,-1)])
sage: coarse_fan = FaceFan(polytope, lattice=ToricLattice(4))
sage: P2 = toric_varieties.P2()
sage: proj24 = matrix([[0,0],[1,0],[0,0],[0,1]])
sage: fm = FanMorphism(proj24, coarse_fan, P2.fan(), subdivide=True)
sage: fibration = ToricVariety(fm.domain_fan()).hom(fm, P2)
sage: primitive_cone = Cone([(0, 1, 0, 0)])
sage: f = fibration.fiber_component(primitive_cone).embedding_morphism()
sage: f.as_polynomial_map()
Scheme morphism:
From: 2-d toric variety covered by 4 affine patches
To: 4-d toric variety covered by 23 affine patches
Defn: Defined on coordinates by sending [z0 : z1 : z2 : z3] to
[1 : 1 : 1 : 1 : z1 : 0 : 1 : z0 : 1 : 1 : 1 : z2 : z3 : 1 : 1]
sage: primitive_cone = Cone([(-1, 2, -1, 0)])
sage: f = fibration.fiber_component(primitive_cone).embedding_morphism()
sage: f.as_polynomial_map()
Traceback (most recent call last):
...
ValueError: The morphism cannot be written using homogeneous polynomials.
"""
fc = self.domain()
toric_variety = self.codomain()
R = fc.coordinate_ring()
polys = [R.one()] * toric_variety.fan().nrays()
for i in self.defining_cone().ambient_ray_indices():
polys[i] = R.zero()
for ray, x in zip(fc.fan().rays(), R.gens()):
try:
ray_index = self._ray_index_map[ray]
except KeyError:
raise ValueError('The morphism cannot be written using homogeneous polynomials.')
polys[ray_index] = x
return SchemeMorphism_polynomial_toric_variety(self.parent(), polys)
def _make_fiber_component(self):
"""
Construct the fiber component as a toric variety.
OUTPUT:
The fiber component as a toric variety.
EXAMPLES::
sage: P1xP1 = toric_varieties.P1xP1()
sage: P1 = toric_varieties.P1()
sage: fc = P1xP1.hom(matrix([[1],[0]]), P1).fiber_component(Cone([(1,0)]))
sage: f = fc.embedding_morphism()
sage: f._ray_index_map # indirect doctest
{N(-1): 3, N(1): 2}
TESTS::
sage: A2_Z2 = toric_varieties.A2_Z2()
sage: O2_P1 = A2_Z2.resolve(new_rays=[(1,1)])
sage: blowup = O2_P1.hom(identity_matrix(2), A2_Z2)
sage: blowup.fiber_generic()
(0-d affine toric variety, 1)
sage: blowup.fiber_component(Cone([(1,0)]))
0-d affine toric variety
sage: blowup.fiber_component(Cone([(1,1)]))
1-d toric variety covered by 2 affine patches
sage: P1 = toric_varieties.P1()
sage: f = P1.hom(matrix([2]), P1)
sage: f.fiber_component(P1.fan(1)[0])
0-d affine toric variety
sage: f.fan_morphism().index(P1.fan(1)[0])
1
sage: f.fiber_generic()
(0-d affine toric variety, 2)
"""
fm = self._fan_morphism
defining_cone = self._defining_cone
base_cone = self._base_cone
ker = fm.kernel().basis()
m = fm.matrix() * base_cone.lattice().basis_matrix()
base_cone_preimg = [m.solve_left(r) for r in base_cone.rays()]
L = fm.domain_fan().lattice().span(ker+base_cone_preimg).saturation()
cone_L = Cone([L.coordinates(r) for r in defining_cone.rays()])
L_quotient = cone_L.sublattice_quotient()
def projection(ray):
ray_L = L.coordinates(ray)
return vector(ZZ, L_quotient(ray_L))
cones = []
star_rays = set()
for cone in fm.relative_star_generators(defining_cone):
star_rays.update(cone.rays())
projected_rays = [ projection(r) for r in cone.rays() ]
cones.append(Cone(projected_rays))
fiber_fan = Fan(cones)
ray_index_map = dict()
for ray in star_rays:
ray_index = fm.domain_fan().rays().index(ray)
projected_ray = fiber_fan.lattice()(projection(ray))
if projected_ray.is_zero():
assert ray in defining_cone.rays()
continue
projected_ray.set_immutable()
ray_index_map[projected_ray] = ray_index
self._ray_index_map = ray_index_map
from sage.schemes.toric.variety import ToricVariety
return ToricVariety(fiber_fan)
def defining_cone(self):
r"""
Return the cone corresponding to the fiber torus orbit.
OUTPUT:
A cone of the fan of the total space of the toric fibration.
EXAMPLES::
sage: P1xP1 = toric_varieties.P1xP1()
sage: P1 = toric_varieties.P1()
sage: fc = P1xP1.hom(matrix([[1],[0]]), P1).fiber_component(Cone([(1,0)]))
sage: f = fc.embedding_morphism()
sage: f.defining_cone().rays()
N(1, 0)
in 2-d lattice N
sage: f.base_cone().rays()
N(1)
in 1-d lattice N
"""
return self._defining_cone
def base_cone(self):
r"""
Return the base cone `\sigma`.
The fiber is constant over the base orbit closure `V(\sigma)`.
OUTPUT:
A cone of the base of the toric fibration.
EXAMPLES::
sage: P1xP1 = toric_varieties.P1xP1()
sage: P1 = toric_varieties.P1()
sage: fc = P1xP1.hom(matrix([[1],[0]]), P1).fiber_component(Cone([(1,0)]))
sage: f = fc.embedding_morphism()
sage: f.defining_cone().rays()
N(1, 0)
in 2-d lattice N
sage: f.base_cone().rays()
N(1)
in 1-d lattice N
"""
return self._base_cone
def _image_ray_multiplicity(self, fiber_ray):
"""
Find the image ray of ``fiber_ray`` with multiplicity in the relative star.
INPUT:
A ray of the domain fan (the fiber component).
OUTPUT:
A pair ``(codomain ray index, multiplicity)``
EXAMPLES::
sage: polytope = Polyhedron(
....: [(-3,0,-1,-1),(-1,2,-1,-1),(0,-1,0,0),(0,0,0,1),(0,0,1,0),
....: (0,1,0,0),(0,2,-1,-1),(1,0,0,0),(2,0,-1,-1)])
sage: coarse_fan = FaceFan(polytope, lattice=ToricLattice(4))
sage: P2 = toric_varieties.P2()
sage: proj24 = matrix([[0,0],[1,0],[0,0],[0,1]])
sage: fm = FanMorphism(proj24, coarse_fan, P2.fan(), subdivide=True)
sage: fibration = ToricVariety(fm.domain_fan()).hom(fm, P2)
sage: primitive_cone = Cone([(-1, 2, -1, 0)])
sage: fc = fibration.fiber_component(primitive_cone)
sage: f = fc.embedding_morphism()
sage: for r in fc.fan().rays():
....: print("{} {}".format(r, f._image_ray_multiplicity(r)))
N(0, 1) (5, 1)
N(1, -3) (9, 2)
N(-1, 2) (11, 1)
sage: f._ray_index_map
{N(-3, 4): 10, N(-1, 2): 11, N(0, 1): 5, N(1, 0): 4, N(2, -6): 9}
"""
try:
image_ray_index = self._ray_index_map[fiber_ray]
return (image_ray_index, 1)
except KeyError:
pass
multiplicity = None
image_ray_index = None
for ray, index in iteritems(self._ray_index_map):
d = gcd(ray)
if d * fiber_ray != ray:
continue
if multiplicity is not None and d>multiplicity:
continue
multiplicity = d
image_ray_index = index
return (image_ray_index, multiplicity)
def pullback_divisor(self, divisor):
r"""
Pull back a toric divisor.
INPUT:
- ``divisor`` -- a torus-invariant QQ-Cartier divisor on the
codomain of the embedding map.
OUTPUT:
A divisor on the domain of the embedding map (irreducible
component of a fiber of a toric morphism) that is isomorphic
to the pull-back divisor `f^*(D)` but with possibly different
linearization.
EXAMPLES::
sage: A1 = toric_varieties.A1()
sage: fan = Fan([(0,1,2)], [(1,1,0),(1,0,1),(1,-1,-1)]).subdivide(new_rays=[(1,0,0)])
sage: f = ToricVariety(fan).hom(matrix([[1],[0],[0]]), A1)
sage: D = f.domain().divisor([1,1,3,4]); D
V(z0) + V(z1) + 3*V(z2) + 4*V(z3)
sage: fc = f.fiber_component(Cone([(1,1,0)]))
sage: fc.embedding_morphism().pullback_divisor(D)
3*V(z0) + 2*V(z2)
sage: fc = f.fiber_component(Cone([(1,0,0)]))
sage: fc.embedding_morphism().pullback_divisor(D)
-3*V(z0) - 3*V(z1) - V(z2)
"""
from sage.schemes.toric.divisor import is_ToricDivisor
if not (is_ToricDivisor(divisor) and divisor.is_QQ_Cartier()):
raise ValueError('The divisor must be torus-invariant and QQ-Cartier.')
m = divisor.m(self.defining_cone())
values = []
codomain_rays = self.codomain().fan().rays()
for ray in self.domain().fan().rays():
image_ray_index, multiplicity = self._image_ray_multiplicity(ray)
image_ray = codomain_rays[image_ray_index]
value = divisor.function_value(image_ray) - m*image_ray
value /= multiplicity
values.append(value)
return self.domain().divisor(values)
|
the-stack_106_22394 | #--------------------------------------------------------------------------------
# DESCRIPTION:
# a. This example uses the Keithley DAQ6510 to perform temperature
# scanning
# b. For storing results to the cloud, we introduce the streaming
# tools provided by Initial State
# i. To install the Python driver for the streaming tools
# open a command prompt or terminal
# ii. Issue the following command:
# aa. On Win10: pip install ISStreamer
# bb. On Linux: sudo pip install ISStreamer
# or sudo pip3 install ISStreamer
#--------------------------------------------------------------------------------
import socket
import struct
import math
import time
import Keithley_DMM6500_Sockets_Driver as kei
from ISStreamer.Streamer import Streamer
def writeToInitialState(ch101, ch110, ch115, ch120):
streamer.log("CH101", ch101)
streamer.log("CH110", ch110)
streamer.log("CH115", ch115)
streamer.log("CH120", ch120)
return
#===== MAIN PROGRAM STARTS HERE =====
ipAddress1 = "192.168.1.165"
port = 5025
timeout = 20.0
myFile = "dmm_functions.tsp"
bucketName = time.strftime("DAQ6510_Data_%Y-%m-%d_%H-%M-%S")
myAccessKey = "YOUR_ACCESS_KEY_GOES_HERE"
streamer = Streamer(bucket_name=bucketName,
access_key=myAccessKey)
DAQ6510 = kei.DMM6500()
myID = DAQ6510.Connect(ipAddress1, 5025, 20000, 1, 1)
DAQ6510.echoCmd = 0
scanCount = 360 * 40 # setting up for a 40-hour scan
scanInterval = 10.0 # for this setup, limit to no less than 5s intervals
print(myID)
t1 = time.time()
DAQ6510.LoadScriptFile(myFile)
DAQ6510.SendCmd("do_beep(1.0, 3500)")
DAQ6510.Reset()
DAQ6510.SetFunction_Temperature("101,110,115,120", DAQ6510.Transducer.TC, DAQ6510.TCType.K)
DAQ6510.SetScan_BasicAttributes("101,110,115,120", scanCount, scanInterval)
DAQ6510.Init()
startIndex = 1
endIndex = 4
chanCnt = 4
targetCnt = scanCount * 4
loopCnt = 1
accumCnt = DAQ6510.QueryCmd("print(defbuffer1.n)", 8)
while(endIndex < (targetCnt+1)):
myData = DAQ6510.GetScan_Data(chanCnt, startIndex, endIndex)
print("Scan {}: {}".format(loopCnt, myData))
myDataList = myData.split(",")
writeToInitialState(float(myDataList[0]), float(myDataList[1]), float(myDataList[2]), float(myDataList[3]))
startIndex += chanCnt
endIndex += chanCnt
loopCnt += 1
time.sleep(1.0)
streamer.close()
DAQ6510.Disconnect()
t2 = time.time()
# Notify the user of completion and the test time achieved.
print("done")
print("{0:.6f} s".format(t2-t1))
input("Press Enter to continue...")
exit()
exit()
|
the-stack_106_22395 | ################################################################################
# Copyright (c) 2020-2021, Berkeley Design Technology, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
################################################################################
from datetime import datetime, timezone
from typing import Dict, List, Optional, Union
from app.db.schema import StatisticsModel
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import Session
from sqlalchemy.orm.exc import NoResultFound
def create_statistic(
db_session: Session, statistic_information: Dict = {}
) -> Union[StatisticsModel, IntegrityError]:
"""
Register new statistic entry.
Arguments:
db_session {Session} -- Database session.
statistic_information {Dict} -- New statistic information.
Returns:
Union[StatisticsModel, IntegrityError] -- Statistic instance that was added
to the database or an exception in case a statistic already exists.
"""
try:
statistic = StatisticsModel(**statistic_information)
db_session.add(statistic)
db_session.commit()
db_session.refresh(statistic)
return statistic
except IntegrityError:
db_session.rollback()
raise
def get_statistic(
db_session: Session, device_id: str, datetime: datetime
) -> Union[StatisticsModel, NoResultFound]:
"""
Get a specific statistic.
Arguments:
db_session {Session} -- Database session.
device_id {str} -- Jetson id which sent the information.
datetime {datetime} -- Datetime when the device registered the information.
Returns:
Union[StatisticsModel, NoResultFound] -- Statistic instance defined by device_id and datetime
or an exception in case there's no matching statistic.
"""
try:
return get_statistic_by_id_and_datetime(db_session, device_id, datetime)
except NoResultFound:
raise
def get_statistics(
db_session: Session, device_id: Optional[str] = None
) -> List[StatisticsModel]:
"""
Get all statistics.
Arguments:
db_session {Session} -- Database session.
device_id {Optional[str]} -- Device id.
Returns:
List[StatisticsModel] -- All statistic instances present in the database or
all statistics from a specific device.
"""
if device_id:
# Get all statistics from a specific device
query = db_session.query(StatisticsModel)
return query.filter(StatisticsModel.device_id == device_id).all()
else:
# Get all statistics from form the database
return db_session.query(StatisticsModel).all()
def get_statistics_from_to(
db_session: Session,
device_id: str,
from_date: Optional[str] = None,
to_date: Optional[str] = None,
) -> List[StatisticsModel]:
"""
Get all statistics within a datetime range.
Arguments:
db_session {Session} -- Database session.
device_id {str} -- Device id.
from_date {Optional[str]} -- Beginning of datetime range.
to_date {Optional[str]} -- End of datetime range.
Returns:
List[StatisticsModel] -- All statistic instances present in the database
within a given datetime range.
"""
query = db_session.query(StatisticsModel)
query = query.filter(StatisticsModel.device_id == device_id)
if to_date is None:
# By default, show information until the current day
to_date = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%S")
if from_date:
return query.filter(
StatisticsModel.datetime.between(from_date, to_date)
).all()
return query.filter(StatisticsModel.datetime <= to_date).all()
def update_statistic(
db_session: Session,
device_id: str,
datetime: datetime,
new_statistic_information: Dict = {},
) -> Union[StatisticsModel, NoResultFound]:
"""
Modify a specific statistic.
Arguments:
db_session {Session} -- Database session.
device_id {str} -- Jetson id which sent the information.
datetime {datetime} -- Datetime when the device registered the information.
new_statistic_information {Dict} -- New statistic information.
Returns:
Union[StatisticsModel, NoResultFound] -- Updated statistic instance defined by device_id
and datetime or an exception in case there's no matching statistic.
"""
try:
try:
# Remove device id as it can't be modified
del new_statistic_information["device_id"]
except KeyError:
pass
try:
# Remove datetime as it can't be modified
del new_statistic_information["datetime"]
except KeyError:
pass
statistic = get_statistic_by_id_and_datetime(
db_session, device_id, datetime
)
for key, value in new_statistic_information.items():
if hasattr(statistic, key):
setattr(statistic, key, value)
db_session.commit()
return statistic
except NoResultFound:
raise
def delete_statistic(
db_session: Session, device_id: str, datetime: datetime
) -> Union[StatisticsModel, NoResultFound]:
"""
Delete a specific statistic.
Arguments:
db_session {Session} -- Database session.
device_id {str} -- Jetson id which sent the information.
datetime {datetime} -- Datetime when the device registered the information.
Returns:
Union[StatisticsModel, NoResultFound] -- Statistic instance that was deleted
or an exception in case there's no matching statistic.
"""
try:
statistic = get_statistic_by_id_and_datetime(
db_session, device_id, datetime
)
db_session.delete(statistic)
db_session.commit()
return statistic
except NoResultFound:
raise
def get_statistic_by_id_and_datetime(
db_session: Session, device_id: str, datetime: datetime
) -> Union[StatisticsModel, NoResultFound]:
"""
Get a statistic using the table's primary keys.
Arguments:
db_session {Session} -- Database session.
device_id {str} -- Jetson id which sent the information.
datetime {datetime} -- Datetime when the device registered the information.
Returns:
Union[StatisticsModel, NoResultFound] -- Statistic instance defined by device_id and
datetime or an exception in case there's no matching statistic.
"""
statistic = db_session.query(StatisticsModel).get((device_id, datetime))
if not statistic:
raise NoResultFound()
return statistic
|
the-stack_106_22396 | import os
from pathlib import Path
from torchaudio.datasets.libritts import LIBRITTS
from torchaudio_unittest.common_utils import (
TempDirMixin,
TorchaudioTestCase,
get_whitenoise,
save_wav,
normalize_wav,
)
class TestLibriTTS(TempDirMixin, TorchaudioTestCase):
backend = 'default'
root_dir = None
data = []
utterance_ids = [
[19, 198, '000000', '000000'],
[26, 495, '000004', '000000'],
]
original_text = 'this is the original text.'
normalized_text = 'this is the normalized text.'
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
base_dir = os.path.join(cls.root_dir, 'LibriTTS', 'train-clean-100')
for i, utterance_id in enumerate(cls.utterance_ids):
filename = f'{"_".join(str(u) for u in utterance_id)}.wav'
file_dir = os.path.join(base_dir, str(utterance_id[0]), str(utterance_id[1]))
os.makedirs(file_dir, exist_ok=True)
path = os.path.join(file_dir, filename)
data = get_whitenoise(sample_rate=24000, duration=2, n_channels=1, dtype='int16', seed=i)
save_wav(path, data, 24000)
cls.data.append(normalize_wav(data))
original_text_filename = f'{"_".join(str(u) for u in utterance_id)}.original.txt'
path_original = os.path.join(file_dir, original_text_filename)
with open(path_original, 'w') as file_:
file_.write(cls.original_text)
normalized_text_filename = f'{"_".join(str(u) for u in utterance_id)}.normalized.txt'
path_normalized = os.path.join(file_dir, normalized_text_filename)
with open(path_normalized, 'w') as file_:
file_.write(cls.normalized_text)
def _test_libritts(self, dataset):
n_ites = 0
for i, (waveform,
sample_rate,
original_text,
normalized_text,
speaker_id,
chapter_id,
utterance_id) in enumerate(dataset):
expected_ids = self.utterance_ids[i]
expected_data = self.data[i]
self.assertEqual(expected_data, waveform, atol=5e-5, rtol=1e-8)
assert sample_rate == 24000
assert speaker_id == expected_ids[0]
assert chapter_id == expected_ids[1]
assert original_text == self.original_text
assert normalized_text == self.normalized_text
assert utterance_id == f'{"_".join(str(u) for u in expected_ids[-4:])}'
n_ites += 1
assert n_ites == len(self.utterance_ids)
def test_libritts_str(self):
dataset = LIBRITTS(self.root_dir)
self._test_libritts(dataset)
def test_libritts_path(self):
dataset = LIBRITTS(Path(self.root_dir))
self._test_libritts(dataset)
|
the-stack_106_22398 | import contextlib
from subprocess import list2cmdline
from rrmngmnt.executor import Executor
import six
class FakeFile(six.StringIO):
def __init__(self, *args, **kwargs):
six.StringIO.__init__(self, *args, **kwargs)
self.data = None
def __exit__(self, *args):
self.close()
def __enter__(self):
return self
def close(self):
self.seek(0)
self.data = self.read()
six.StringIO.close(self)
class ByteFakeFile(six.BytesIO):
def __init__(self, buf=six.b('')):
six.BytesIO.__init__(self, six.b(buf))
self.data = None
def __exit__(self, *args):
self.close()
def __enter__(self):
return self
def close(self):
self.seek(0)
self.data = self.read().decode("utf-8", errors="replace")
six.BytesIO.close(self)
class FakeExecutor(Executor):
cmd_to_data = None
files_content = {}
class Session(Executor.Session):
def __init__(self, executor, timeout=None, use_pkey=False):
super(FakeExecutor.Session, self).__init__(executor)
self._timeout = timeout
def open(self):
pass
def get_data(self, cmd):
cmd = list2cmdline(cmd)
try:
return self._executor.cmd_to_data[cmd]
except KeyError:
raise Exception("There are no data for '%s'" % cmd)
def get_file_data(self, name):
try:
data = self._executor.files_content[name]
except KeyError:
raise Exception("There is not such file %s" % name)
if isinstance(data, FakeFile):
data = data.data
return data
def command(self, cmd):
return FakeExecutor.Command(cmd, self)
def run_cmd(self, cmd, input_=None, timeout=None):
cmd = self.command(cmd)
return cmd.run(input_, timeout)
def open_file(self, name, mode):
try:
data = self.get_file_data(name)
except Exception:
if mode[0] not in ('w', 'a'):
raise
else:
data = ''
if len(mode) == 2 and mode[1] == 'b':
data = ByteFakeFile(data)
else:
data = FakeFile(data)
if mode[0] == 'w':
data.seek(0)
self._executor.files_content[name] = data
return data
class Command(Executor.Command):
def get_rc(self):
return self._rc
def run(self, input_, timeout=None):
with self.execute() as (in_, out, err):
if input_:
in_.write(input_)
self.out = out.read()
self.err = err.read()
return self.rc, self.out, self.err
@contextlib.contextmanager
def execute(self, bufsize=-1, timeout=None):
rc, out, err = self._ss.get_data(self.cmd)
yield six.StringIO(), six.StringIO(out), six.StringIO(err)
self._rc = rc
def __init__(self, user, address):
super(FakeExecutor, self).__init__(user)
self.address = address
def session(self, timeout=None):
return FakeExecutor.Session(self, timeout)
def run_cmd(self, cmd, input_=None, tcp_timeout=None, io_timeout=None):
with self.session(tcp_timeout) as session:
return session.run_cmd(cmd, input_, io_timeout)
if __name__ == "__main__":
from rrmngmnt import RootUser
u = RootUser('password')
e = FakeExecutor(u)
e.cmd_to_data = {'echo ahoj': (0, 'ahoj', '')}
print(e.run_cmd(['echo', 'ahoj']))
with e.session() as ss:
with ss.open_file('/tmp/a', 'w') as fh:
fh.write("ahoj")
print(e.files_content['/tmp/a'], e.files_content['/tmp/a'].data)
|
the-stack_106_22399 | # module corpus.py
#
# Copyright (c) 2015 Rafael Reis
#
"""
corpus module - Classes and functions to read and process corpus data.
"""
__version__="1.0"
__author__ = "Rafael Reis <[email protected]>"
import re
def groupByFeed(c):
feeds = []
p = c.next()
lastIndex = p.index
text = p.sentence.replace('\n', "")
feed = Feed()
feed.addPiece(p)
while p:
p = c.next()
if p:
if p.index != lastIndex:
#print(text, '\n')
feeds.append(feed)
text = p.sentence.replace('\n', "")
lastIndex = p.index
feed = Feed([])
else:
text += " " + p.sentence
feed.addPiece(p)
return feeds
class CorpusAd:
"""
Class that represents a corpus in the AD format.
In order to creat an object of this class, you must
pass a fileName to the constructor.
"""
def __init__(self, fileName=None, speechVerbs=None):
"""
Receives a fileName as an argument. The fileName must be
a valid corpus in the AD format.
"""
if not fileName:
raise InvalidArgumentException('You must inform a valid fileName!')
self.isFloresta = False
if "Floresta" in fileName:
self.isFloresta = True
self.fileName = fileName
with open(fileName, 'r') as f:
self.raw = f.readlines()
self.i = 0 # Index of the last line read. It'll be used in the "next" method.
self.rawLen = len(self.raw)
if speechVerbs:
self.verbs = speechVerbs
else:
self.verbs = SpeechVerbs()
def next(self):
p = Piece()
begin = self.goToSentenceBegin()
#p.sentence = self.getSentenceDescription()
if not begin:
return None
lastNode = Node()
lastNode.child = []
while not self.isSentenceEnd():
if self.isSentenceDescription():
p.id, p.sentence = self.getSentenceDescription()
elif self.isSource():
p.index = self.getPieceIndex()
p.source = self.raw[self.i].replace('\n', '');
elif self.isValidLevel() or self.raw[self.i] == "\"\n" or self.raw[self.i] == ",\n":
currLevel = self.getCurrentLevel()
newNode = Node()
newNode.child = []
newNode.level = currLevel
newNode.line = self.i
newNode.raw = self.raw[self.i]
newNode.type, newNode.stype, newNode.txt = self.getInfo()
newNode.pos = self.getPos(newNode.stype)
speechVerb = self.getSpeechVerb(newNode.stype)
newNode.speechVerb = speechVerb
newNode.anterior = lastNode
lastNode.posterior = newNode
#print("########################")
#print("# raw: ", newNode.raw)
#print("# level: ", newNode.level)
# Node traversing depends on if Corpus is Floresta or Bosque
if self.isFloresta:
if currLevel > lastNode.level and lastNode.level != 0: # Child from lastNode
newNode.parent = lastNode
#print("DEBUG 1")
elif currLevel == lastNode.level: # Sibbling of lastNode
newNode.parent = lastNode.parent
newNode.previous = lastNode
lastNode.next = newNode
#print("DEBUG 2")
elif currLevel == 0 or lastNode.level == 0:
newNode.previous = lastNode
lastNode.next = newNode
#print("DEBUG 3")
elif lastNode.parent:
#print("DEBUG 4")
newNode.parent = lastNode.parent
while newNode.parent and newNode.parent.level >= newNode.level:
if newNode.parent.parent:
newNode.parent = newNode.parent.parent
else:
newNode.parent = None
if newNode.parent:
newNode.parent.child[-1].next = newNode
newNode.previous = newNode.parent.child[-1]
if newNode.parent:
#print("# parent: ", newNode.parent.raw)
newNode.parent.child.append(newNode)
else: # Corpus is Bosque
if currLevel > lastNode.level: # Child from lastNode
newNode.parent = lastNode
elif currLevel == lastNode.level: # Sibbling of lastNode
newNode.parent = lastNode.parent
newNode.previous = lastNode
lastNode.next = newNode
else: #currLevel < previousLevel
newNode.parent = lastNode.parent
while newNode.parent.level >= newNode.level:
newNode.parent = newNode.parent.parent
newNode.parent.child[-1].next = newNode
newNode.previous = newNode.parent.child[-1]
newNode.parent.child.append(newNode)
lastNode = newNode
p.nodes.append(newNode)
if speechVerb:
p.speechVerb = speechVerb
p.speechNodes.append(newNode)
#print('node: ' + str(newNode.line) + ' ' + newNode.txt + ' ' + str(len(newNode.child)))
self.i += 1
return p
def isSentenceBegin(self):
#return self.raw[self.i] == "<s>\n"
return re.match(r'^<s' , self.raw[self.i])
def isSentenceDescription(self):
return re.match(r'^C[FP]\d+-\d+' , self.raw[self.i])
def goToSentenceBegin(self):
while self.i < self.rawLen and not self.isSentenceBegin():
self.i += 1
if self.i >= self.rawLen:
return None
return 1
def getSentenceDescription(self):
m = re.search(r'^(?P<ID>C[FP]\d+-\d+)\w*( )?(?P<SENT>.+)$', self.raw[self.i])
i = m.group('ID')
s = m.group('SENT')
return i, s
def isSentenceEnd(self):
return self.raw[self.i] == "</s>\n"
def isValidLevel(self):
return re.match(r'^=+' , self.raw[self.i])
def isSource(self):
return re.match(r'^SOURCE:' , self.raw[self.i]) or re.match(r'^SOURCECETENFolha' , self.raw[self.i])
def getCurrentLevel(self):
levels = re.findall('=', self.raw[self.i])
return len(levels)
def getPieceIndex(self):
m = re.search(r' n=(?P<INDEX>\d+) ', self.raw[self.i]) or re.search(r' id=(?P<INDEX>\d+) ', self.raw[self.i])
if not m:
raise NameError('SOURCE: sem n=\d+ : ' + self.raw[self.i])
return int(m.group('INDEX'))
def getInfo(self):
info = re.sub(r'=+', "", self.raw[self.i]).replace("\n", "")
if len(info) == 1 or info.find(":") == -1:
return info, None, info
else:
#m = re.search(r'(?P<TYPE>.+):(?P<TAIL>.*)$', info)
m = re.search(r'(?P<TYPE>(.+?)):(?P<TAIL>.*)$', info)
txt = ''
if info.find(")") > 0:
n = re.search(r'\)([ \t]*)(?P<TEXT>[^ ]*)$', info)
txt = n.group('TEXT').strip()
return m.group('TYPE'), m.group('TAIL'), txt
def getSpeechVerb(self, s):
if s:
v = None
if ("v-fin" in s):
m = re.search(r'.*v-fin\(\'(?P<VERB>(\w|-)+)\'', s) or re.search(r'.*v-fin\(\"(?P<VERB>(\w|-)+)\"', s)
v = m.group('VERB')
if v and v in self.verbs.all:
return v
return ''
def getPos(self, tail):
pos = ''
if tail:
m = re.search(r'(?P<POS>\w+)\(', tail)
if m:
if tail.find("<n>") > 0:
pos = "N"
elif tail.find("<adv>") > 0:
pos = "ADV"
else:
pos = m.group('POS').upper()
else:
pos = tail
return pos
class SpeechVerbs:
def __init__(self):
self.verbs = self.loadSpeechVerbs()
self.all = self.verbs[0]
self.pattern1 = self.verbs[1]
self.pattern2 = self.verbs[2]
self.pattern3 = self.verbs[3]
self.pattern4 = self.verbs[4]
self.pattern5 = self.verbs[5]
self.pattern6 = self.verbs[6]
self.pattern7 = self.verbs[7]
def loadSpeechVerbs(self):
verbs = [[], [], [], [], [], [], [], []]
s = set()
i = 1
with open('verbos_dizer_ACDC.txt', 'r') as f:
for line in f:
if re.match(r'#(?P<INDEX>\d+)', line):
m = re.search(r'#(?P<INDEX>\d+)\n', line)
i = int(m.group('INDEX'))
else:
line = line.strip()
s.add(line)
verbs[i].append(line)
verbs[0] = list(s)
return verbs
def __len__(self):
return len(self.verbs)
class Feed:
def __init__(self, pieces=[]):
self.pieces = pieces
def addPiece(self, piece):
self.pieces.append(piece)
class Piece:
def __init__(self):
self.start = 0
self.end = 0
self.sentence = ""
self.nodes = []
self.speechNodes = []
self.speechVerb = ""
self.index = 0
self.id = ""
self.indexSpeechVerb = 0
self.source = ""
self.roots = []
class Node:
def __init__(self, type=None, stype=None, child=[], parent=None, next=None, previous=None, line=None, level=0):
self.type = type
self.stype = stype
self.child = child
self.parent = parent
self.next = next
self.previous = previous
self.line = line
self.level = level
self.txt = ''
self.raw = ''
self.posterior = None
self.anterior = None
self.quote = False
self.author = False
self.dep = ''
self.nosubj = False
self.pattern = ''
def text(self):
t = self.txt
if t:
t = ' ' + t
for c in self.child:
t += c.text()
return t
def markQuote(self):
self.quote = True
for c in self.child:
c.markQuote()
def markNosubj(self):
self.nosubj = True
for c in self.child:
c.markNosubj()
def markAuthor(self):
self.author = True
for c in self.child:
c.markAuthor()
def markDep(self, label):
if self.dep == '':
self.dep = label
for c in self.child:
c.markDep(label)
def markPattern(self, label):
self.pattern = label
for c in self.child:
c.markPattern(label)
class InvalidArgumentException(Exception):
pass
|
the-stack_106_22400 | # Copyright 2014, Doug Wiegley, A10 Networks.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import unicode_literals
from acos_client.v21 import base
class BaseSSL(base.BaseV21):
def get(self, name, **kwargs):
return self._post(("slb.template.%s.search" % self.template_type),
{'name': name}, **kwargs)
def _set(self, action, name, cert_name, key_name, **kwargs):
params = {
"%s_template" % self.template_type: {
"cert_name": cert_name,
"key_name": key_name,
"name": name
}
}
self._post(action, params, **kwargs)
def create(self, name, cert_name, key_name, **kwargs):
self._set(("slb.template.%s.create" % self.template_type),
name, cert_name, key_name, **kwargs)
def update(self, name, cert_name, key_name, **kwargs):
self._set(("slb.template.%s.update" % self.template_type),
name, cert_name, key_name, **kwargs)
def delete(self, name, **kwargs):
self._post(("slb.template.%s.delete" % self.template_type),
{"name": name}, **kwargs)
class ClientSSL(BaseSSL):
template_type = "client_ssl"
class ServerSSL(BaseSSL):
template_type = "server_ssl"
|
the-stack_106_22403 | from abc import ABC, abstractmethod
from color_palette import ColorPalette
from functools import reduce
from random import randint
class DoomFire(ABC):
def __init__(self, width, height, pixel_size = 4, decay_rate = 2, \
windforce = 1, fire_source_inc = (4, 6), \
fire_source_enabled = True, color_palette = ColorPalette()):
self.width = width
self.height = height
self.pixel_size = pixel_size
self.decay_rate = decay_rate
self.windforce = windforce
self.fire_source_inc = fire_source_inc
self.color_palette = color_palette
self.max_intensity = len(self.color_palette.get_colors()) - 1
self.pixels_array = [0] * self.width * self.height
self.fire_source_enabled = fire_source_enabled
if self.fire_source_enabled:
self.create_fire_source()
def create_fire_source(self):
self.pixels_array[-self.width:] = [self.max_intensity] * self.width
self.fire_source_enabled = True
def destroy_fire_source(self):
self.pixels_array[-self.width:] = [0] * self.width
self.fire_source_enabled = False
def has_fire_source(self):
return self.fire_source_enabled
def increase_fire_source(self):
fire_source_row = self.pixels_array[-self.width:]
for i, f in enumerate(fire_source_row):
if f == self.max_intensity:
continue
inc = randint(self.fire_source_inc[0], self.fire_source_inc[1])
fire_source_row[i] += inc if f + inc <= self.max_intensity else \
self.max_intensity - f
self.pixels_array[-self.width:] = fire_source_row
fire_source_row_sum = reduce(lambda x, y: x + y, fire_source_row)
if fire_source_row_sum > 0 and not self.fire_source_enabled:
self.fire_source_enabled = True
def decrease_fire_source(self):
fire_source_row = self.pixels_array[-self.width:]
for i, f in enumerate(fire_source_row):
if f == 0:
continue
dec = randint(self.fire_source_inc[0], self.fire_source_inc[1])
fire_source_row[i] -= dec if f - dec >= 0 else f
self.pixels_array[-self.width:] = fire_source_row
fire_source_row_sum = reduce(lambda x, y: x + y, fire_source_row)
if fire_source_row_sum == 0 and self.fire_source_enabled:
self.fire_source_enabled = False
def update(self):
for j in range(self.width):
for i in range(self.height - 1):
current_pixel_index = i * self.width + j
below_pixel_index = current_pixel_index + self.width
below_pixel_intensity = self.pixels_array[below_pixel_index]
decay = randint(0, self.decay_rate)
new_pixel_intensity = 0
if below_pixel_intensity - decay > 0:
new_pixel_intensity = below_pixel_intensity - decay
wind_direction = randint(-self.windforce, self.windforce)
# Checking if the wind direction exceeds the boundaries of
# pixels array and if it does, reverse it
if current_pixel_index + wind_direction >= \
len(self.pixels_array) or current_pixel_index + \
wind_direction < 0:
wind_direction = -wind_direction
pixel_neighbor_index = current_pixel_index + wind_direction
self.pixels_array[pixel_neighbor_index] = new_pixel_intensity
@abstractmethod
def render(self):
pass
|
the-stack_106_22406 | """
Created on Feb 9, 2016
@author: Chris Smith
"""
from __future__ import division, print_function, absolute_import, unicode_literals
import os
from warnings import warn
import numpy as np
import h5py
from skimage.measure import block_reduce
from skimage.util import crop
from sidpy.sid import Translator
from sidpy.hdf.hdf_utils import write_simple_attrs
from pyUSID.io.write_utils import Dimension, calc_chunks
from pyUSID.io.hdf_utils import create_indexed_group, write_main_dataset
from .image import read_image
from .df_utils import dm4reader
from .df_utils.dm_utils import parse_dm4_parms, read_dm3
class OneViewTranslator(Translator):
"""
Translate Pytchography data from a set of images to an HDF5 file
"""
def __init__(self, *args, **kwargs):
super(OneViewTranslator, self).__init__(*args, **kwargs)
self.rebin = False
self.bin_factor = 1
self.h5_f = None
self.binning_func = self.__no_bin
self.bin_func = None
self.h5_main = None
self.root_image_list = list()
self.crop_method = 'percent'
self.crop_ammount = None
self.image_list_tag = None
def translate(self, h5_path, image_path, bin_factor=None, bin_func=np.mean, start_image=0, scan_size_x=None,
scan_size_y=None, crop_ammount=None, crop_method='percent'):
"""
Basic method that adds Ptychography data to existing hdf5 thisfile
You must have already done the basic translation with BEodfTranslator
Parameters
----------------
h5_path : str
Absolute path to where the HDF5 file should be located
image_path : str
Absolute path to folder holding the image files
bin_factor : array_like of uint, optional
Downsampling factor for each dimension. Default is None.
bin_func : callable, optional
Function which will be called to calculate the return value
of each block. Function must implement an axis parameter,
i.e. numpy.mean. Ignored if bin_factor is None. Default is
numpy.mean.
start_image : int, optional
Integer denoting which image in the file path should be considered the starting
point. Default is 0, start with the first image on the list.
scan_size_x : int, optional
Number of Ronchigrams in the x direction. Default is None, value will be determined
from the number of images and `scan_size_y` if it is given.
scan_size_y : int, optional
Number of Ronchigrams in the y direction. Default is None, value will be determined
from the number of images and `scan_size_x` if it is given.
crop_ammount : uint or list of uints, optional
How much should be cropped from the original image. Can be a single unsigned
integer or a list of unsigned integers. A single integer will crop the same
ammount from all edges. A list of two integers will crop the x-dimension by
the first integer and the y-dimension by the second integer. A list of 4
integers will crop the image as [left, right, top, bottom].
crop_method : str, optional
Which cropping method should be used. How much of the image is removed is
determined by the value of `crop_ammount`.
'percent' - A percentage of the image is removed.
'absolute' - The specific number of pixel is removed.
Returns
----------
h5_main : h5py.Dataset
HDF5 Dataset object that contains the flattened images
"""
# Open the hdf5 file and delete any contents
if os.path.exists(h5_path):
os.remove(h5_path)
h5_f = h5py.File(h5_path, 'w')
self.h5_f = h5_f
self.crop_method = crop_method
self.crop_ammount = crop_ammount
'''
Get the list of all files with the .tif extension and
the number of files in the list
'''
image_path = os.path.abspath(image_path)
root_file_list, file_list = self._parse_file_path(image_path)
size, image_parms = self._getimageparms(file_list[0])
usize, vsize = size
self.image_list_tag = image_parms.pop('Image_Tag', None)
tmp, _ = read_image(file_list[0])
if crop_ammount is not None:
tmp = self.crop_ronc(tmp)
usize, vsize = tmp.shape
num_files = len(file_list)
if scan_size_x is None and scan_size_y is None:
scan_size_x = int(np.sqrt(num_files))
scan_size_y = int(num_files/scan_size_x)
elif scan_size_x is None:
scan_size_x = int(num_files/scan_size_y)
elif scan_size_y is None:
scan_size_y = int(num_files/scan_size_x)
'''
Check if a bin_factor is given. Set up binning objects if it is.
'''
if bin_factor is not None:
self.rebin = True
if isinstance(bin_factor, int):
self.bin_factor = (bin_factor, bin_factor)
elif len(bin_factor) == 2:
self.bin_factor = tuple(bin_factor)
else:
raise ValueError('Input parameter `bin_factor` must be a length 2 array_like or an integer.\n' +
'{} was given.'.format(bin_factor))
usize = int(usize / self.bin_factor[0])
vsize = int(vsize / self.bin_factor[1])
self.binning_func = block_reduce
self.bin_func = bin_func
num_files = scan_size_x * scan_size_y
h5_main, h5_mean_spec, h5_ronch = self._setupH5(usize, vsize, np.float32,
scan_size_x, scan_size_y,
image_parms)
for root_file in root_file_list:
print('Saving the root image located at {}.'.format(root_file))
self._create_root_image(root_file)
self._read_data(file_list[start_image:start_image + num_files],
h5_main, h5_mean_spec, h5_ronch, image_path)
self.h5_f.close()
return
def _create_root_image(self, image_path):
"""
Create the Groups and Datasets for a single root image
Parameters
----------
image_path : str
Path to the image file
Returns
-------
None
"""
image, image_parms = read_dm3(image_path)
if image.ndim == 3:
image = np.sum(image, axis=0)
'''
Create the Measurement and Channel Groups to hold the
image Datasets
'''
meas_grp = create_indexed_group(self.h5_f, 'Measurement')
chan_grp = create_indexed_group(meas_grp, 'Channel')
'''
Set the Measurement Group attributes
'''
usize, vsize = image.shape
image_parms['image_size_u'] = usize
image_parms['image_size_v'] = vsize
image_parms['translator'] = 'OneView'
image_parms['num_pixels'] = image.size
write_simple_attrs(meas_grp, image_parms)
'''
Build Spectroscopic and Position dimensions
'''
spec_desc = Dimension('Image', 'a.u.', [1])
pos_desc = [Dimension('X', 'pixel', np.arange(image.shape[0])),
Dimension('Y', 'pixel', np.arange(image.shape[1]))]
h5_image = write_main_dataset(chan_grp, np.reshape(image, (-1, 1)), 'Raw_Data',
'Intensity', 'a.u.',
pos_desc, spec_desc)
self.root_image_list.append(h5_image)
def _read_data(self, file_list, h5_main, h5_mean_spec, h5_ronch, image_path):
"""
Iterates over the images in `file_list`, reading each image and downsampling if
reqeusted, and writes the flattened image to file. Also builds the Mean_Ronchigram
and the Spectroscopic_Mean datasets at the same time.
Parameters
----------
file_list : list of str
List of all files in `image_path` that will be read
h5_main : h5py.Dataset
Dataset which will hold the Ronchigrams
h5_mean_spec : h5py.Dataset
Dataset which will hold the Spectroscopic Mean
h5_ronch : h5py.Dataset
Dataset which will hold the Mean Ronchigram
image_path : str
Absolute file path to the directory which hold the images
Returns
-------
None
"""
mean_ronch = np.zeros(h5_ronch.shape, dtype=np.float32)
num_files = len(file_list)
for ifile, thisfile in enumerate(file_list):
selected = (ifile + 1) % int(round(num_files / 16)) == 0
if selected:
print('Processing file...{}% - reading: {}'.format(round(100 * ifile / num_files), thisfile))
image, _ = read_image(os.path.join(image_path, thisfile), get_parms=False, header=self.image_list_tag)
# image, _ = read_image(os.path.join(image_path, thisfile), get_parms=False)
image = self.crop_ronc(image)
image = self.binning_func(image, self.bin_factor, self.bin_func)
image = image.flatten()
h5_main[ifile, :] = image
h5_mean_spec[ifile] = np.mean(image)
mean_ronch += image
self.h5_f.flush()
h5_ronch[:] = mean_ronch / num_files
self.h5_f.flush()
def crop_ronc(self, ronc):
"""
Crop the input Ronchigram by the specified ammount using the specified method.
Parameters
----------
ronc : numpy.array
Input image to be cropped.
Returns
-------
cropped_ronc : numpy.array
Cropped image
"""
if self.crop_ammount is None:
return ronc
crop_ammount = self.crop_ammount
crop_method = self.crop_method
if crop_method == 'percent':
crop_ammount = np.round(np.atleast_2d(crop_ammount)/100.0*ronc.shape)
crop_ammount = tuple([tuple(row) for row in crop_ammount.astype(np.uint32)])
elif crop_method == 'absolute':
if isinstance(crop_ammount, int):
pass
elif len(crop_ammount) == 2:
crop_ammount = ((crop_ammount[0],), (crop_ammount[1],))
elif len(crop_ammount) == 4:
crop_ammount = ((crop_ammount[0], crop_ammount[1]), (crop_ammount[2], crop_ammount[3]))
else:
raise ValueError('The crop_ammount should be an integer or list of 2 or 4 integers.')
else:
raise ValueError('Allowed values of crop_method are percent and absolute.')
cropped_ronc = crop(ronc, crop_ammount)
if any([dim == 0 for dim in cropped_ronc.shape]):
warn("Requested crop ammount is greater than the image size. No cropping will be done.")
return ronc
return cropped_ronc
@staticmethod
def downSampRoncVec(ronch_vec, binning_factor):
"""
Downsample the image by taking the mean over nearby values
Parameters
----------
ronch_vec : ndarray
Image data
binning_factor : int
factor to reduce the size of the image by
Returns
-------
ronc_mat3_mean : ndarray
Flattened downsampled image
"""
ccd_pix = int(np.sqrt(ronch_vec.size))
ronc_mat = ronch_vec.reshape(ccd_pix, ccd_pix)
ronc_mat2 = ronc_mat.reshape(ccd_pix, ccd_pix / binning_factor, binning_factor)
ronc_mat2_mean = ronc_mat2.mean(2) # take the mean along the 3rd dimension
ronc_mat3 = ronc_mat2_mean.reshape(ccd_pix / binning_factor, binning_factor, -1)
ronc_mat3_mean = ronc_mat3.mean(1)
return ronc_mat3_mean.reshape(-1)
@staticmethod
def _parse_file_path(image_folder):
"""
Returns a list of all files in the directory given by path
Parameters
---------------
path : string / unicode
absolute path to directory containing files
Returns
----------
file_list : list of strings
names of all files in directory located at path
"""
file_list = list()
root_file_list = list()
allowed_image_types = ['.dm3', '.dm4', '.jpg', '.png', '.tif',
'.tiff', '.jpeg', '.bmp']
for root, dirs, files in os.walk(image_folder):
for thisfile in files:
_, ext = os.path.splitext(thisfile)
if ext not in allowed_image_types:
continue
if root == image_folder:
root_file_list.append(os.path.join(image_folder, thisfile))
else:
file_list.append(os.path.join(root, thisfile))
return root_file_list, file_list
@staticmethod
def _getimageparms(image):
"""
Returns the x and y size of the image in pixels
Parameters
------------
image : string / unicode
absolute path to the dm4 file
Returns
-----------
size : unsigned integer
x and y dimenstions of image
parms : dict
Image parameters from the dm4 file
"""
dm4_file = dm4reader.DM4File.open(image)
tags = dm4_file.read_directory()
parms = parse_dm4_parms(dm4_file, tags, '')
u_size = parms['Root_ImageList_SubDir_000_ImageData_Dimensions_Tag_000']
v_size = parms['Root_ImageList_SubDir_000_ImageData_Dimensions_Tag_001']
size = u_size, v_size
return size, parms
def _setupH5(self, usize, vsize, data_type, scan_size_x, scan_size_y, image_parms):
"""
Setup the HDF5 file in which to store the data including creating
the Position and Spectroscopic datasets
Parameters
----------
usize : int
Number of pixel columns in the images
vsize : int
Number of pixel rows in the images
data_type : type
Data type to save image as
scan_size_x : int
Number of images in the x dimension
scan_size_y : int
Number of images in the y dimension
image_parms : dict
Dictionary of parameters
Returns
-------
h5_main : h5py.Dataset
HDF5 Dataset that the images will be written into
h5_mean_spec : h5py.Dataset
HDF5 Dataset that the mean over all positions will be written
into
h5_ronch : h5py.Dataset
HDF5 Dateset that the mean over all Spectroscopic steps will be
written into
"""
num_pixels = usize * vsize
num_files = scan_size_x * scan_size_y
root_parms = dict()
root_parms['data_type'] = 'PtychographyData'
main_parms = {'num_images': num_files,
'image_size_u': usize,
'image_size_v': vsize,
'num_pixels': num_pixels,
'translator': 'Ptychography',
'scan_size_x': scan_size_x,
'scan_size_y': scan_size_y}
main_parms.update(image_parms)
# Create the hdf5 data Group
write_simple_attrs(self.h5_f, root_parms)
meas_grp = create_indexed_group(self.h5_f, 'Measurement')
write_simple_attrs(meas_grp, main_parms)
chan_grp = create_indexed_group(meas_grp, 'Channel')
# Build the Position and Spectroscopic Datasets
spec_desc = [Dimension('U', 'pixel', np.arange(usize)),
Dimension('V', 'pixel', np.arange(vsize))]
pos_desc = [Dimension('X', 'pixel', np.arange(scan_size_x)),
Dimension('Y', 'pixel', np.arange(scan_size_y))]
ds_chunking = calc_chunks([num_files, num_pixels],
data_type(0).itemsize,
unit_chunks=(1, num_pixels))
# Allocate space for Main_Data and Pixel averaged Data
h5_main = write_main_dataset(chan_grp, (num_files, num_pixels), 'Raw_Data',
'Intensity', 'a.u.',
pos_desc, spec_desc,
chunks=ds_chunking, dtype=data_type)
h5_ronch= chan_grp.create_dataset('Mean_Ronchigram', shape=[num_pixels], dtype=np.float32)
h5_mean_spec = chan_grp.create_dataset('Spectroscopic_Mean', shape=[num_files], dtype=np.float32)
self.h5_f.flush()
return h5_main, h5_mean_spec, h5_ronch
@staticmethod
def __no_bin(image, *args, **kwargs):
"""
Does absolutely nothing to the image. Exists so that we can have
a bin function to call whether we actually rebin the image or not.
Parameters
----------
image : ndarray
Image
args:
Argument list
kwargs:
Keyword argument list
Returns
-------
image : ndarray
The input image
"""
return image
|
the-stack_106_22407 | import requests
from bs4 import BeautifulSoup
from .classes import work
from .utility import file_management, parse_paths
from .classes import author
from .utility import pdf_generation
"""
ACADEMIA.EDU WEB SCRAPER and AUTOMATIC (Basic) PDF CV GENERATOR
A PYTHON-A-THON 2016 PROJECT by ZACHARY CACERES
www.python-a-thon.com
"""
def get_user():
url = input("Please paste the URL of the Academia.edu page: ")
return url.strip()
def query_website(url):
r = requests.get('{0}'.format(url))
return r.text
def convert_html_to_soup(site_text):
html = site_text
soup = BeautifulSoup(html, 'html.parser')
return soup
def extract_prof_data(html_results_file, soup):
name = None
affiliation = None
interests = None
portrait_url = None
bio = None
# Get Professor Name
prof_name = soup.select(parse_paths.PROF_NAME_PATH)
for p in prof_name:
name = p.text
#Get Affiliations
affiliations = soup.select(parse_paths.PROF_AFFILIATION_PATH)
for aff in affiliations:
affiliation = aff.text
#Get Research Interests
research_interests = soup.select(parse_paths.PROF_RESEARCH_INTERESTS_PATH)
for interest in research_interests:
interests = interest.text
# Get Url to Portrait
portrait = soup.select(parse_paths.PROF_PORTRAIT_PATH)
for p in portrait:
if p.has_attr('src'):
portrait_url = p['src']
print("PORTRAIT: {0}".format(portrait_url))
# Get Biography
biography = soup.select(parse_paths.PROF_BIO_PATH)
for b in biography:
bio = b.text
# Makes a new Author object
make_author(html_results_file, name, affiliation, interests, portrait_url, bio)
return name
def extract_work_data(soup):
# Lists to assemble top ten works on Academia.edu
total_titles = []
total_abstracts = []
total_download_urls = []
# Get Total Works
total_works = soup.select(parse_paths.WORKS_PATH)
print("–––––– AUTHOR HAS {0} TOTAL WORKS ––––––––".format(len(total_works)))
for work in total_works:
# Get Titles
title = work.select(parse_paths.TITLES_PATH)
for t in title: # Number of titles determines number of works on the author's page
total_titles.append(t.text)
# Get Abstracts
abstract = work.select(parse_paths.ABSTRACT_UNTRUNCATED_PATH)
"""
Looks through list of elements at work abstracts and download_urls path in
parse_paths module. If element is found, append text to total_abstracts and
total_download_urls list if not, append None type so that lists are created with
proper number of elements.
"""
if len(abstract) == 0:
total_abstracts.append(None)
else:
for a in abstract:
total_abstracts.append(a.text)
# Get Download URLS
download_urls = work.select(parse_paths.DOWNLOAD_URL_PATH)
if len(download_urls) == 0:
total_download_urls.append(None)
else:
for link in download_urls:
if link.has_attr('href'):
total_download_urls.append(link['href'])
else:
total_download_urls.append(None)
print("WARNING: Link was found but has NO HREF!")
print("––––– AUTHOR OVERVIEW –––––––")
print("Total Titles is: {0}".format(len(total_titles)))
print("Total Abstracts length is: {0}".format(len(total_abstracts)))
print("Total Download URLs is: {0}".format(len(total_download_urls)))
generate_work_lists(total_titles, total_abstracts, total_download_urls)
def generate_work_lists (total_titles, total_abstracts, total_download_urls):
counter = 0
for t in total_titles:
print("Making work at index {0} with title {1}".format(counter, t))
temp_abs = total_abstracts[counter]
temp_download = total_download_urls[counter]
make_work(t, temp_abs, temp_download)
counter += 1
def make_author(html_results_file, name, affiliation, interests, portrait_url, bio):
prof = author.Author(name, affiliation, interests, portrait_url, bio) # Construct new author object
file_management.add_prof_data_to_text_file(html_results_file, prof)
def make_work(title, abstract, download_url):
new_work.append(work.Work(title, abstract, download_url))
print("NEW WORK is {0}".format(len(new_work)))
new_work = []
def process_input_to_request_pdf(url):
if not url:
url = get_user()
cv = process_pdf_request(url)
return cv
else:
cv = process_pdf_request(url)
return cv
def process_pdf_request(url):
html_results_file = file_management.create_text_file()
site_text = query_website(url)
soup = convert_html_to_soup(site_text)
prof_name = extract_prof_data(html_results_file, soup)
extract_work_data(soup)
file_management.add_work_data_to_text_file(html_results_file, new_work)
html_results_file.close() # File must be closed for PDFKit to print to file correctly
final_cv = pdf_generation.generate_pdf(prof_name)
return final_cv
if __name__ == '__main__':
process_input_to_request_pdf()
|
the-stack_106_22408 | import os
import tensorflow as tf
class LogSaver:
def __init__(self, logs_path, model_name, dateset_name, mode):
if not os.path.isdir(logs_path):
os.makedirs(logs_path)
self.train_writer = tf.summary.create_file_writer(
'{}/{}/{}/{}/train/'.format(logs_path, dateset_name, model_name, mode))
self.valid_writer = tf.summary.create_file_writer(
'{}/{}/{}/{}/valid/'.format(logs_path, dateset_name, model_name, mode))
def log_train(self, loss, acc, bce, global_step):
with self.train_writer.as_default():
tf.summary.scalar('loss', loss, step=global_step)
tf.summary.scalar('acc', acc, step=global_step)
tf.summary.scalar('entropy', bce, step=global_step)
def log_valid(self, acc, bce, global_step):
with self.valid_writer.as_default():
tf.summary.scalar('acc', acc, step=global_step)
tf.summary.scalar('entropy', bce, step=global_step)
|
the-stack_106_22411 | # Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import atexit
import concurrent.futures
import json
import logging
import threading
import traceback
from collections import OrderedDict
from contextlib import contextmanager
from types import MappingProxyType, TracebackType
from typing import (
Any,
Callable,
Iterator,
MutableSequence,
Optional,
Sequence,
Tuple,
Type,
Union,
)
from opentelemetry import context as context_api
from opentelemetry import trace as trace_api
from opentelemetry.sdk import util
from opentelemetry.sdk.resources import Resource
from opentelemetry.sdk.trace import sampling
from opentelemetry.sdk.util import BoundedDict, BoundedList
from opentelemetry.sdk.util.instrumentation import InstrumentationInfo
from opentelemetry.trace import SpanContext
from opentelemetry.trace.propagation import SPAN_KEY
from opentelemetry.trace.status import (
EXCEPTION_STATUS_FIELD,
Status,
StatusCanonicalCode,
)
from opentelemetry.util import time_ns, types
logger = logging.getLogger(__name__)
MAX_NUM_ATTRIBUTES = 1000
MAX_NUM_EVENTS = 1000
MAX_NUM_LINKS = 1000
VALID_ATTR_VALUE_TYPES = (bool, str, int, float)
class SpanProcessor:
"""Interface which allows hooks for SDK's `Span` start and end method
invocations.
Span processors can be registered directly using
:func:`TracerProvider.add_span_processor` and they are invoked
in the same order as they were registered.
"""
def on_start(
self,
span: "Span",
parent_context: Optional[context_api.Context] = None,
) -> None:
"""Called when a :class:`opentelemetry.trace.Span` is started.
This method is called synchronously on the thread that starts the
span, therefore it should not block or throw an exception.
Args:
span: The :class:`opentelemetry.trace.Span` that just started.
parent_context: The parent context of the span that just started.
"""
def on_end(self, span: "Span") -> None:
"""Called when a :class:`opentelemetry.trace.Span` is ended.
This method is called synchronously on the thread that ends the
span, therefore it should not block or throw an exception.
Args:
span: The :class:`opentelemetry.trace.Span` that just ended.
"""
def shutdown(self) -> None:
"""Called when a :class:`opentelemetry.sdk.trace.Tracer` is shutdown.
"""
def force_flush(self, timeout_millis: int = 30000) -> bool:
"""Export all ended spans to the configured Exporter that have not yet
been exported.
Args:
timeout_millis: The maximum amount of time to wait for spans to be
exported.
Returns:
False if the timeout is exceeded, True otherwise.
"""
class SynchronousMultiSpanProcessor(SpanProcessor):
"""Implementation of class:`SpanProcessor` that forwards all received
events to a list of span processors sequentially.
The underlying span processors are called in sequential order as they were
added.
"""
def __init__(self):
# use a tuple to avoid race conditions when adding a new span and
# iterating through it on "on_start" and "on_end".
self._span_processors = () # type: Tuple[SpanProcessor, ...]
self._lock = threading.Lock()
def add_span_processor(self, span_processor: SpanProcessor) -> None:
"""Adds a SpanProcessor to the list handled by this instance."""
with self._lock:
self._span_processors = self._span_processors + (span_processor,)
def on_start(
self,
span: "Span",
parent_context: Optional[context_api.Context] = None,
) -> None:
for sp in self._span_processors:
sp.on_start(span, parent_context=parent_context)
def on_end(self, span: "Span") -> None:
for sp in self._span_processors:
sp.on_end(span)
def shutdown(self) -> None:
"""Sequentially shuts down all underlying span processors.
"""
for sp in self._span_processors:
sp.shutdown()
def force_flush(self, timeout_millis: int = 30000) -> bool:
"""Sequentially calls force_flush on all underlying
:class:`SpanProcessor`
Args:
timeout_millis: The maximum amount of time over all span processors
to wait for spans to be exported. In case the first n span
processors exceeded the timeout followup span processors will be
skipped.
Returns:
True if all span processors flushed their spans within the
given timeout, False otherwise.
"""
deadline_ns = time_ns() + timeout_millis * 1000000
for sp in self._span_processors:
current_time_ns = time_ns()
if current_time_ns >= deadline_ns:
return False
if not sp.force_flush((deadline_ns - current_time_ns) // 1000000):
return False
return True
class ConcurrentMultiSpanProcessor(SpanProcessor):
"""Implementation of :class:`SpanProcessor` that forwards all received
events to a list of span processors in parallel.
Calls to the underlying span processors are forwarded in parallel by
submitting them to a thread pool executor and waiting until each span
processor finished its work.
Args:
num_threads: The number of threads managed by the thread pool executor
and thus defining how many span processors can work in parallel.
"""
def __init__(self, num_threads: int = 2):
# use a tuple to avoid race conditions when adding a new span and
# iterating through it on "on_start" and "on_end".
self._span_processors = () # type: Tuple[SpanProcessor, ...]
self._lock = threading.Lock()
self._executor = concurrent.futures.ThreadPoolExecutor(
max_workers=num_threads
)
def add_span_processor(self, span_processor: SpanProcessor) -> None:
"""Adds a SpanProcessor to the list handled by this instance."""
with self._lock:
self._span_processors = self._span_processors + (span_processor,)
def _submit_and_await(
self,
func: Callable[[SpanProcessor], Callable[..., None]],
*args: Any,
**kwargs: Any
):
futures = []
for sp in self._span_processors:
future = self._executor.submit(func(sp), *args, **kwargs)
futures.append(future)
for future in futures:
future.result()
def on_start(
self,
span: "Span",
parent_context: Optional[context_api.Context] = None,
) -> None:
self._submit_and_await(
lambda sp: sp.on_start, span, parent_context=parent_context
)
def on_end(self, span: "Span") -> None:
self._submit_and_await(lambda sp: sp.on_end, span)
def shutdown(self) -> None:
"""Shuts down all underlying span processors in parallel."""
self._submit_and_await(lambda sp: sp.shutdown)
def force_flush(self, timeout_millis: int = 30000) -> bool:
"""Calls force_flush on all underlying span processors in parallel.
Args:
timeout_millis: The maximum amount of time to wait for spans to be
exported.
Returns:
True if all span processors flushed their spans within the given
timeout, False otherwise.
"""
futures = []
for sp in self._span_processors: # type: SpanProcessor
future = self._executor.submit(sp.force_flush, timeout_millis)
futures.append(future)
timeout_sec = timeout_millis / 1e3
done_futures, not_done_futures = concurrent.futures.wait(
futures, timeout_sec
)
if not_done_futures:
return False
for future in done_futures:
if not future.result():
return False
return True
class EventBase(abc.ABC):
def __init__(self, name: str, timestamp: Optional[int] = None) -> None:
self._name = name
if timestamp is None:
self._timestamp = time_ns()
else:
self._timestamp = timestamp
@property
def name(self) -> str:
return self._name
@property
def timestamp(self) -> int:
return self._timestamp
@property
@abc.abstractmethod
def attributes(self) -> types.Attributes:
pass
class Event(EventBase):
"""A text annotation with a set of attributes.
Args:
name: Name of the event.
attributes: Attributes of the event.
timestamp: Timestamp of the event. If `None` it will filled
automatically.
"""
def __init__(
self,
name: str,
attributes: types.Attributes = None,
timestamp: Optional[int] = None,
) -> None:
super().__init__(name, timestamp)
self._attributes = attributes
@property
def attributes(self) -> types.Attributes:
return self._attributes
def _is_valid_attribute_value(value: types.AttributeValue) -> bool:
"""Checks if attribute value is valid.
An attribute value is valid if it is one of the valid types. If the value
is a sequence, it is only valid if all items in the sequence are of valid
type, not a sequence, and are of the same type.
"""
if isinstance(value, Sequence):
if len(value) == 0:
return True
first_element_type = type(value[0])
if first_element_type not in VALID_ATTR_VALUE_TYPES:
logger.warning(
"Invalid type %s in attribute value sequence. Expected one of "
"%s or a sequence of those types",
first_element_type.__name__,
[valid_type.__name__ for valid_type in VALID_ATTR_VALUE_TYPES],
)
return False
for element in list(value)[1:]:
if not isinstance(element, first_element_type):
logger.warning(
"Mixed types %s and %s in attribute value sequence",
first_element_type.__name__,
type(element).__name__,
)
return False
elif not isinstance(value, VALID_ATTR_VALUE_TYPES):
logger.warning(
"Invalid type %s for attribute value. Expected one of %s or a "
"sequence of those types",
type(value).__name__,
[valid_type.__name__ for valid_type in VALID_ATTR_VALUE_TYPES],
)
return False
return True
def _filter_attribute_values(attributes: types.Attributes):
if attributes:
for attr_key, attr_value in list(attributes.items()):
if _is_valid_attribute_value(attr_value):
if isinstance(attr_value, MutableSequence):
attributes[attr_key] = tuple(attr_value)
else:
attributes.pop(attr_key)
def _create_immutable_attributes(attributes):
return MappingProxyType(attributes.copy() if attributes else {})
class Span(trace_api.Span):
"""See `opentelemetry.trace.Span`.
Users should create `Span` objects via the `Tracer` instead of this
constructor.
Args:
name: The name of the operation this span represents
context: The immutable span context
parent: This span's parent's `opentelemetry.trace.SpanContext`, or
None if this is a root span
sampler: The sampler used to create this span
trace_config: TODO
resource: Entity producing telemetry
attributes: The span's attributes to be exported
events: Timestamped events to be exported
links: Links to other spans to be exported
span_processor: `SpanProcessor` to invoke when starting and ending
this `Span`.
"""
def __new__(cls, *args, **kwargs):
if cls is Span:
raise TypeError("Span must be instantiated via a tracer.")
return super().__new__(cls)
def __init__(
self,
name: str,
context: trace_api.SpanContext,
parent: Optional[trace_api.SpanContext] = None,
sampler: Optional[sampling.Sampler] = None,
trace_config: None = None, # TODO
resource: Resource = Resource.create({}),
attributes: types.Attributes = None, # TODO
events: Sequence[Event] = None, # TODO
links: Sequence[trace_api.Link] = (),
kind: trace_api.SpanKind = trace_api.SpanKind.INTERNAL,
span_processor: SpanProcessor = SpanProcessor(),
instrumentation_info: InstrumentationInfo = None,
set_status_on_exception: bool = True,
) -> None:
self.name = name
self.context = context
self.parent = parent
self.sampler = sampler
self.trace_config = trace_config
self.resource = resource
self.kind = kind
self._set_status_on_exception = set_status_on_exception
self.span_processor = span_processor
self.status = None
self._lock = threading.Lock()
_filter_attribute_values(attributes)
if not attributes:
self.attributes = self._new_attributes()
else:
self.attributes = BoundedDict.from_map(
MAX_NUM_ATTRIBUTES, attributes
)
self.events = self._new_events()
if events:
for event in events:
_filter_attribute_values(event.attributes)
# pylint: disable=protected-access
event._attributes = _create_immutable_attributes(
event.attributes
)
self.events.append(event)
if links is None:
self.links = self._new_links()
else:
self.links = BoundedList.from_seq(MAX_NUM_LINKS, links)
self._end_time = None # type: Optional[int]
self._start_time = None # type: Optional[int]
self.instrumentation_info = instrumentation_info
@property
def start_time(self):
return self._start_time
@property
def end_time(self):
return self._end_time
def __repr__(self):
return '{}(name="{}", context={})'.format(
type(self).__name__, self.name, self.context
)
@staticmethod
def _new_attributes():
return BoundedDict(MAX_NUM_ATTRIBUTES)
@staticmethod
def _new_events():
return BoundedList(MAX_NUM_EVENTS)
@staticmethod
def _new_links():
return BoundedList(MAX_NUM_LINKS)
@staticmethod
def _format_context(context):
x_ctx = OrderedDict()
x_ctx["trace_id"] = trace_api.format_trace_id(context.trace_id)
x_ctx["span_id"] = trace_api.format_span_id(context.span_id)
x_ctx["trace_state"] = repr(context.trace_state)
return x_ctx
@staticmethod
def _format_attributes(attributes):
if isinstance(attributes, BoundedDict):
return attributes._dict # pylint: disable=protected-access
if isinstance(attributes, MappingProxyType):
return attributes.copy()
return attributes
@staticmethod
def _format_events(events):
f_events = []
for event in events:
f_event = OrderedDict()
f_event["name"] = event.name
f_event["timestamp"] = util.ns_to_iso_str(event.timestamp)
f_event["attributes"] = Span._format_attributes(event.attributes)
f_events.append(f_event)
return f_events
@staticmethod
def _format_links(links):
f_links = []
for link in links:
f_link = OrderedDict()
f_link["context"] = Span._format_context(link.context)
f_link["attributes"] = Span._format_attributes(link.attributes)
f_links.append(f_link)
return f_links
def to_json(self, indent=4):
parent_id = None
if self.parent is not None:
if isinstance(self.parent, Span):
ctx = self.parent.context
parent_id = trace_api.format_span_id(ctx.span_id)
elif isinstance(self.parent, SpanContext):
parent_id = trace_api.format_span_id(self.parent.span_id)
start_time = None
if self.start_time:
start_time = util.ns_to_iso_str(self.start_time)
end_time = None
if self.end_time:
end_time = util.ns_to_iso_str(self.end_time)
if self.status is not None:
status = OrderedDict()
status["canonical_code"] = str(self.status.canonical_code.name)
if self.status.description:
status["description"] = self.status.description
f_span = OrderedDict()
f_span["name"] = self.name
f_span["context"] = self._format_context(self.context)
f_span["kind"] = str(self.kind)
f_span["parent_id"] = parent_id
f_span["start_time"] = start_time
f_span["end_time"] = end_time
if self.status is not None:
f_span["status"] = status
f_span["attributes"] = self._format_attributes(self.attributes)
f_span["events"] = self._format_events(self.events)
f_span["links"] = self._format_links(self.links)
f_span["resource"] = self.resource.attributes
return json.dumps(f_span, indent=indent)
def get_span_context(self):
return self.context
def set_attribute(self, key: str, value: types.AttributeValue) -> None:
with self._lock:
if not self.is_recording():
return
has_ended = self.end_time is not None
if has_ended:
logger.warning("Setting attribute on ended span.")
return
if not key:
logger.warning("invalid key (empty or null)")
return
if _is_valid_attribute_value(value):
# Freeze mutable sequences defensively
if isinstance(value, MutableSequence):
value = tuple(value)
if isinstance(value, bytes):
try:
value = value.decode()
except ValueError:
logger.warning("Byte attribute could not be decoded.")
return
with self._lock:
self.attributes[key] = value
def _add_event(self, event: EventBase) -> None:
with self._lock:
if not self.is_recording():
return
has_ended = self.end_time is not None
if has_ended:
logger.warning("Calling add_event() on an ended span.")
return
self.events.append(event)
def add_event(
self,
name: str,
attributes: types.Attributes = None,
timestamp: Optional[int] = None,
) -> None:
_filter_attribute_values(attributes)
attributes = _create_immutable_attributes(attributes)
self._add_event(
Event(
name=name,
attributes=attributes,
timestamp=time_ns() if timestamp is None else timestamp,
)
)
def start(
self,
start_time: Optional[int] = None,
parent_context: Optional[context_api.Context] = None,
) -> None:
with self._lock:
if not self.is_recording():
return
has_started = self.start_time is not None
if not has_started:
self._start_time = (
start_time if start_time is not None else time_ns()
)
if has_started:
logger.warning("Calling start() on a started span.")
return
self.span_processor.on_start(self, parent_context=parent_context)
def end(self, end_time: Optional[int] = None) -> None:
with self._lock:
if not self.is_recording():
return
if self.start_time is None:
raise RuntimeError("Calling end() on a not started span.")
has_ended = self.end_time is not None
if not has_ended:
if self.status is None:
self.status = Status(canonical_code=StatusCanonicalCode.OK)
self._end_time = (
end_time if end_time is not None else time_ns()
)
if has_ended:
logger.warning("Calling end() on an ended span.")
return
self.span_processor.on_end(self)
def update_name(self, name: str) -> None:
with self._lock:
has_ended = self.end_time is not None
if has_ended:
logger.warning("Calling update_name() on an ended span.")
return
self.name = name
def is_recording(self) -> bool:
return True
def set_status(self, status: trace_api.Status) -> None:
with self._lock:
has_ended = self.end_time is not None
if has_ended:
logger.warning("Calling set_status() on an ended span.")
return
self.status = status
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
"""Ends context manager and calls `end` on the `Span`."""
if (
self.status is None
and self._set_status_on_exception
and exc_val is not None
):
self.set_status(
Status(
canonical_code=StatusCanonicalCode.UNKNOWN,
description="{}: {}".format(exc_type.__name__, exc_val),
)
)
super().__exit__(exc_type, exc_val, exc_tb)
def record_exception(self, exception: Exception) -> None:
"""Records an exception as a span event."""
try:
stacktrace = traceback.format_exc()
except Exception: # pylint: disable=broad-except
# workaround for python 3.4, format_exc can raise
# an AttributeError if the __context__ on
# an exception is None
stacktrace = "Exception occurred on stacktrace formatting"
self.add_event(
name="exception",
attributes={
"exception.type": exception.__class__.__name__,
"exception.message": str(exception),
"exception.stacktrace": stacktrace,
},
)
class _Span(Span):
"""Protected implementation of `opentelemetry.trace.Span`.
This constructor should only be used internally.
"""
class Tracer(trace_api.Tracer):
"""See `opentelemetry.trace.Tracer`.
Args:
name: The name of the tracer.
shutdown_on_exit: Register an atexit hook to shut down the tracer when
the application exits.
"""
def __init__(
self,
source: "TracerProvider",
instrumentation_info: InstrumentationInfo,
) -> None:
self.source = source
self.instrumentation_info = instrumentation_info
def start_as_current_span(
self,
name: str,
context: Optional[context_api.Context] = None,
kind: trace_api.SpanKind = trace_api.SpanKind.INTERNAL,
attributes: types.Attributes = None,
links: Sequence[trace_api.Link] = (),
record_exception: bool = True,
) -> Iterator[trace_api.Span]:
span = self.start_span(name, context, kind, attributes, links)
return self.use_span(
span, end_on_exit=True, record_exception=record_exception
)
def start_span( # pylint: disable=too-many-locals
self,
name: str,
context: Optional[context_api.Context] = None,
kind: trace_api.SpanKind = trace_api.SpanKind.INTERNAL,
attributes: types.Attributes = None,
links: Sequence[trace_api.Link] = (),
start_time: Optional[int] = None,
set_status_on_exception: bool = True,
) -> trace_api.Span:
parent_span_context = trace_api.get_current_span(
context
).get_span_context()
if parent_span_context is not None and not isinstance(
parent_span_context, trace_api.SpanContext
):
raise TypeError(
"parent_span_context must be a SpanContext or None."
)
if parent_span_context is None or not parent_span_context.is_valid:
parent_span_context = None
trace_id = self.source.ids_generator.generate_trace_id()
trace_flags = None
trace_state = None
else:
trace_id = parent_span_context.trace_id
trace_flags = parent_span_context.trace_flags
trace_state = parent_span_context.trace_state
# The sampler decides whether to create a real or no-op span at the
# time of span creation. No-op spans do not record events, and are not
# exported.
# The sampler may also add attributes to the newly-created span, e.g.
# to include information about the sampling result.
sampling_result = self.source.sampler.should_sample(
parent_span_context, trace_id, name, attributes, links,
)
trace_flags = (
trace_api.TraceFlags(trace_api.TraceFlags.SAMPLED)
if sampling_result.decision.is_sampled()
else trace_api.TraceFlags(trace_api.TraceFlags.DEFAULT)
)
span_context = trace_api.SpanContext(
trace_id,
self.source.ids_generator.generate_span_id(),
is_remote=False,
trace_flags=trace_flags,
trace_state=trace_state,
)
# Only record if is_recording() is true
if sampling_result.decision.is_recording():
# pylint:disable=protected-access
span = _Span(
name=name,
context=span_context,
parent=parent_span_context,
sampler=self.source.sampler,
resource=self.source.resource,
attributes=sampling_result.attributes.copy(),
span_processor=self.source._active_span_processor,
kind=kind,
links=links,
instrumentation_info=self.instrumentation_info,
set_status_on_exception=set_status_on_exception,
)
span.start(start_time=start_time, parent_context=context)
else:
span = trace_api.DefaultSpan(context=span_context)
return span
@contextmanager
def use_span(
self,
span: trace_api.Span,
end_on_exit: bool = False,
record_exception: bool = True,
) -> Iterator[trace_api.Span]:
try:
token = context_api.attach(context_api.set_value(SPAN_KEY, span))
try:
yield span
finally:
context_api.detach(token)
except Exception as error: # pylint: disable=broad-except
# pylint:disable=protected-access
if isinstance(span, Span):
if record_exception:
span.record_exception(error)
if span.status is None and span._set_status_on_exception:
span.set_status(
Status(
canonical_code=getattr(
error,
EXCEPTION_STATUS_FIELD,
StatusCanonicalCode.UNKNOWN,
),
description="{}: {}".format(
type(error).__name__, error
),
)
)
raise
finally:
if end_on_exit:
span.end()
class TracerProvider(trace_api.TracerProvider):
def __init__(
self,
sampler: sampling.Sampler = sampling.DEFAULT_ON,
resource: Resource = Resource.create({}),
shutdown_on_exit: bool = True,
active_span_processor: Union[
SynchronousMultiSpanProcessor, ConcurrentMultiSpanProcessor
] = None,
ids_generator: trace_api.IdsGenerator = None,
):
self._active_span_processor = (
active_span_processor or SynchronousMultiSpanProcessor()
)
if ids_generator is None:
self.ids_generator = trace_api.RandomIdsGenerator()
else:
self.ids_generator = ids_generator
self.resource = resource
self.sampler = sampler
self._atexit_handler = None
if shutdown_on_exit:
self._atexit_handler = atexit.register(self.shutdown)
def get_tracer(
self,
instrumenting_module_name: str,
instrumenting_library_version: str = "",
) -> "trace_api.Tracer":
if not instrumenting_module_name: # Reject empty strings too.
instrumenting_module_name = "ERROR:MISSING MODULE NAME"
logger.error("get_tracer called with missing module name.")
return Tracer(
self,
InstrumentationInfo(
instrumenting_module_name, instrumenting_library_version
),
)
def add_span_processor(self, span_processor: SpanProcessor) -> None:
"""Registers a new :class:`SpanProcessor` for this `TracerProvider`.
The span processors are invoked in the same order they are registered.
"""
# no lock here because add_span_processor is thread safe for both
# SynchronousMultiSpanProcessor and ConcurrentMultiSpanProcessor.
self._active_span_processor.add_span_processor(span_processor)
def shutdown(self):
"""Shut down the span processors added to the tracer."""
self._active_span_processor.shutdown()
if self._atexit_handler is not None:
atexit.unregister(self._atexit_handler)
self._atexit_handler = None
def force_flush(self, timeout_millis: int = 30000) -> bool:
"""Requests the active span processor to process all spans that have not
yet been processed.
By default force flush is called sequentially on all added span
processors. This means that span processors further back in the list
have less time to flush their spans.
To have span processors flush their spans in parallel it is possible to
initialize the tracer provider with an instance of
`ConcurrentMultiSpanProcessor` at the cost of using multiple threads.
Args:
timeout_millis: The maximum amount of time to wait for spans to be
processed.
Returns:
False if the timeout is exceeded, True otherwise.
"""
return self._active_span_processor.force_flush(timeout_millis)
|
the-stack_106_22412 | import vcr
from time import sleep
from unittest import TestCase
from contentful_management.environment import Environment
from contentful_management.errors import NotFoundError
from .test_helper import CLIENT, PLAYGROUND_SPACE
BASE_ENVIRONMENT_ITEM = {
'sys': {
'id': 'foo',
'type': 'Environment',
'space': {
'sys': {
'id': 'foobar',
'type': 'Link',
'linkType': 'Space'
}
}
},
'name': 'foo'
}
class EnvironmentTest(TestCase):
def test_entry(self):
entry = Environment(BASE_ENVIRONMENT_ITEM)
self.assertEqual(str(entry), "<Environment[foo] id='foo'>")
def test_entry_to_json(self):
entry = Environment(BASE_ENVIRONMENT_ITEM)
self.assertEqual(entry.to_json(), BASE_ENVIRONMENT_ITEM)
def test_entry_to_link(self):
entry = Environment(BASE_ENVIRONMENT_ITEM)
self.assertEqual(entry.to_link().to_json(), {
'sys': {
'id': 'foo',
'type': 'Link',
'linkType': 'Environment'
}
})
@vcr.use_cassette('fixtures/environment/all.yaml')
def test_environments(self):
environments = CLIENT.environments(PLAYGROUND_SPACE).all()
self.assertEqual(str(environments[0]), "<Environment[master] id='master'>")
@vcr.use_cassette('fixtures/environment/no_id_create.yaml')
def test_create_environment_without_id_raises_404(self):
with self.assertRaises(NotFoundError):
CLIENT.environments(PLAYGROUND_SPACE).create(None, {
'name': 'SDK Tests - No ID'
})
@vcr.use_cassette('fixtures/environment/create.yaml')
def test_create_environment_with_id(self):
environment = CLIENT.environments(PLAYGROUND_SPACE).create('sdk_tests', {
'name': 'SDK Tests'
})
self.assertEqual(environment.name, 'SDK Tests')
self.assertEqual(environment.id, 'sdk_tests')
@vcr.use_cassette('fixtures/environment/create_different_source.yaml')
def test_create_environment_with_different_source(self):
master = CLIENT.environments(PLAYGROUND_SPACE).find('master')
self.assertNotEqual(len(master.entries().all()), 0)
non_master_source = CLIENT.environments(PLAYGROUND_SPACE).create('non-master-py', {
'name': 'Non Master - Python',
'source_environment_id': 'source'
})
sleep(5) # Need to sleep to ensure environment is ready
non_master_source.reload()
self.assertEqual(len(non_master_source.entries().all()), 0)
@vcr.use_cassette('fixtures/environment/find.yaml')
def test_update_environment(self):
environment = CLIENT.environments(PLAYGROUND_SPACE).find('sdk_tests')
self.assertEqual(environment.name, 'SDK Tests')
with vcr.use_cassette('fixtures/environment/update.yaml'):
environment.name = 'something else'
environment.save()
self.assertEqual(environment.name, 'something else')
@vcr.use_cassette('fixtures/environment/find.yaml')
def test_delete_environment(self):
environment = CLIENT.environments(PLAYGROUND_SPACE).find('sdk_tests')
with vcr.use_cassette('fixtures/environment/delete.yaml'):
environment.delete()
with vcr.use_cassette('fixtures/environment/not_found.yaml'):
with self.assertRaises(NotFoundError):
CLIENT.environments(PLAYGROUND_SPACE).find('sdk_tests')
@vcr.use_cassette('fixtures/environment/delete.yaml')
def test_delete_environment_directly_from_client_proxy(self):
CLIENT.environments(PLAYGROUND_SPACE).delete('sdk_tests')
@vcr.use_cassette('fixtures/environment/find_2.yaml')
def test_fetch_entries_from_an_environment(self):
environment = CLIENT.environments(PLAYGROUND_SPACE).find('testing')
with vcr.use_cassette('fixtures/environment/all_entries.yaml'):
entries = environment.entries().all()
self.assertEqual(str(entries[0]), "<Entry[cat] id='IJLRrADsqq2AmwcugoYeK'>")
@vcr.use_cassette('fixtures/environment/find_2.yaml')
def test_environment_proxies(self):
environment = CLIENT.environments(PLAYGROUND_SPACE).find('testing')
self.assertEqual(str(environment.entries()), "<EnvironmentEntriesProxy space_id='facgnwwgj5fe' environment_id='testing'>")
self.assertEqual(str(environment.assets()), "<EnvironmentAssetsProxy space_id='facgnwwgj5fe' environment_id='testing'>")
self.assertEqual(str(environment.content_types()), "<EnvironmentContentTypesProxy space_id='facgnwwgj5fe' environment_id='testing'>")
self.assertEqual(str(environment.locales()), "<EnvironmentLocalesProxy space_id='facgnwwgj5fe' environment_id='testing'>")
self.assertEqual(str(environment.ui_extensions()), "<EnvironmentUIExtensionsProxy space_id='facgnwwgj5fe' environment_id='testing'>")
|
the-stack_106_22413 | import json
from collections import Counter
import re
from VQA.PythonHelperTools.vqaTools.vqa import VQA
import random
import numpy as np
from VQAGenerator import VQAGenerator
import VQAModel
from matplotlib import pyplot as plt
import os
from PIL import Image, ImageOps
from keras.models import load_model
from random import randint
from Environment import DATADIR
from typing import NamedTuple
import time
from VQAConfig import VQAConfig
def trainConfig(config: VQAConfig,recompile=False):
training_generator = VQAGenerator(True,False, config)
if config.modelIdentifier:
model = load_model(DATADIR+'/Results/'+config.testName+'/model_'+config.modelIdentifier+'_'+str(config.epoch)+'.keras')
else:
model = VQAModel.createModel(training_generator.questionLength, training_generator.answerLength, training_generator.gloveEncoding(), config)
# model.get_layer('noise_layer_image').stddev = config.noise
# model.get_layer('noise_layer_question').stddev = config.noise
prediction_generator = VQAGenerator(False,True, config)
eval_model = VQAModel.evalModel(model)
img = prediction_generator.getImage(0)
if not os.path.exists(DATADIR+'/Results/'+config.testName):
os.mkdir(DATADIR+'/Results/'+config.testName)
t = time.localtime()
timestamp = time.strftime('%b-%d-%Y_%H%M', t)
result_str =str(timestamp)+"\n\n"+ str(config) +'\n\n'
with open(DATADIR+'/Results/'+config.testName+'/results-'+timestamp+'.txt', "w") as text_file:
text_file.write(result_str)
if recompile:
model.compile(optimizer=config.optimizer, loss=config.loss)
best = 0
for i in range(config.epoch+1,config.stop):
# if i == 1 and config.loss=='binary_crossentropy':
# model.compile(optimizer="adam", loss="categorical_crossentropy")
# if i == 3 and config.loss=='binary_crossentropy':
# model.compile(optimizer=config.optimizer, loss=config.loss)
model.fit_generator(training_generator, epochs=1,workers=6)
model.save(DATADIR+'/Results/'+config.testName+'/model_'+timestamp+"_"+str(i)+'.keras')
print("Test set")
prediction = eval_model.predict_generator(prediction_generator,workers=6, steps= None if i>6 else 128)
test_accuracy, results = prediction_generator.evaluate(prediction)
print("Training set")
training_generator.predict = True
prediction = eval_model.predict_generator(training_generator,workers=6, steps=128)
train_accuracy, _ = training_generator.evaluate(prediction)
training_generator.predict = False
result_str += "{0:2d}, {1:6.4f}, {2:6.4f}\n".format(i,test_accuracy,train_accuracy)
with open(DATADIR+'/Results/'+config.testName+'/results-'+timestamp+'.txt', "w") as text_file:
text_file.write(result_str)
with open(DATADIR+'/Results/'+config.testName+'/answers-'+timestamp+"_"+str(i)+'.json', 'w') as fp:
json.dump(results, fp)
if test_accuracy > best:
print("best")
best = test_accuracy
if __name__ == '__main__':
trainConfig(VQAConfig(
testName='test_name',
gloveName='glove.42B.300d',
gloveSize=300,
dropout=True,
augmentations=None,
stop=30,
gatedTanh=True,
initializer="he_normal",
batchNorm=False,
embedding='gru',
imageFeaturemapSize=24,
imageFeatureChannels=1536,
predictNormalizer='sigmoid',
loss='categorical_crossentropy',
optimizer='adam',
)
) |
the-stack_106_22414 | """
Classes allowing "generic" relations through ContentType and object-id fields.
"""
from __future__ import unicode_literals
from collections import defaultdict
from functools import partial
from django.core.exceptions import ObjectDoesNotExist
from django.db import connection
from django.db import models, router, DEFAULT_DB_ALIAS
from django.db.models import signals
from django.db.models.fields.related import ForeignObject, ForeignObjectRel
from django.db.models.related import PathInfo
from django.db.models.sql.where import Constraint
from django.forms import ModelForm, ALL_FIELDS
from django.forms.models import (BaseModelFormSet, modelformset_factory, save_instance,
modelform_defines_fields)
from django.contrib.admin.options import InlineModelAdmin, flatten_fieldsets
from django.contrib.contenttypes.models import ContentType
from django.utils import six
from django.utils.deprecation import RenameMethodsBase
from django.utils.encoding import smart_text
class RenameGenericForeignKeyMethods(RenameMethodsBase):
renamed_methods = (
('get_prefetch_query_set', 'get_prefetch_queryset', DeprecationWarning),
)
class GenericForeignKey(six.with_metaclass(RenameGenericForeignKeyMethods)):
"""
Provides a generic relation to any object through content-type/object-id
fields.
"""
def __init__(self, ct_field="content_type", fk_field="object_id", for_concrete_model=True):
self.ct_field = ct_field
self.fk_field = fk_field
self.for_concrete_model = for_concrete_model
def contribute_to_class(self, cls, name):
self.name = name
self.model = cls
self.cache_attr = "_%s_cache" % name
cls._meta.add_virtual_field(self)
# For some reason I don't totally understand, using weakrefs here doesn't work.
signals.pre_init.connect(self.instance_pre_init, sender=cls, weak=False)
# Connect myself as the descriptor for this field
setattr(cls, name, self)
def instance_pre_init(self, signal, sender, args, kwargs, **_kwargs):
"""
Handles initializing an object with the generic FK instead of
content-type/object-id fields.
"""
if self.name in kwargs:
value = kwargs.pop(self.name)
kwargs[self.ct_field] = self.get_content_type(obj=value)
kwargs[self.fk_field] = value._get_pk_val()
def get_content_type(self, obj=None, id=None, using=None):
if obj is not None:
return ContentType.objects.db_manager(obj._state.db).get_for_model(
obj, for_concrete_model=self.for_concrete_model)
elif id:
return ContentType.objects.db_manager(using).get_for_id(id)
else:
# This should never happen. I love comments like this, don't you?
raise Exception("Impossible arguments to GFK.get_content_type!")
def get_prefetch_queryset(self, instances):
# For efficiency, group the instances by content type and then do one
# query per model
fk_dict = defaultdict(set)
# We need one instance for each group in order to get the right db:
instance_dict = {}
ct_attname = self.model._meta.get_field(self.ct_field).get_attname()
for instance in instances:
# We avoid looking for values if either ct_id or fkey value is None
ct_id = getattr(instance, ct_attname)
if ct_id is not None:
fk_val = getattr(instance, self.fk_field)
if fk_val is not None:
fk_dict[ct_id].add(fk_val)
instance_dict[ct_id] = instance
ret_val = []
for ct_id, fkeys in fk_dict.items():
instance = instance_dict[ct_id]
ct = self.get_content_type(id=ct_id, using=instance._state.db)
ret_val.extend(ct.get_all_objects_for_this_type(pk__in=fkeys))
# For doing the join in Python, we have to match both the FK val and the
# content type, so we use a callable that returns a (fk, class) pair.
def gfk_key(obj):
ct_id = getattr(obj, ct_attname)
if ct_id is None:
return None
else:
model = self.get_content_type(id=ct_id,
using=obj._state.db).model_class()
return (model._meta.pk.get_prep_value(getattr(obj, self.fk_field)),
model)
return (ret_val,
lambda obj: (obj._get_pk_val(), obj.__class__),
gfk_key,
True,
self.cache_attr)
def is_cached(self, instance):
return hasattr(instance, self.cache_attr)
def __get__(self, instance, instance_type=None):
if instance is None:
return self
try:
return getattr(instance, self.cache_attr)
except AttributeError:
rel_obj = None
# Make sure to use ContentType.objects.get_for_id() to ensure that
# lookups are cached (see ticket #5570). This takes more code than
# the naive ``getattr(instance, self.ct_field)``, but has better
# performance when dealing with GFKs in loops and such.
f = self.model._meta.get_field(self.ct_field)
ct_id = getattr(instance, f.get_attname(), None)
if ct_id:
ct = self.get_content_type(id=ct_id, using=instance._state.db)
try:
rel_obj = ct.get_object_for_this_type(pk=getattr(instance, self.fk_field))
except ObjectDoesNotExist:
pass
setattr(instance, self.cache_attr, rel_obj)
return rel_obj
def __set__(self, instance, value):
ct = None
fk = None
if value is not None:
ct = self.get_content_type(obj=value)
fk = value._get_pk_val()
setattr(instance, self.ct_field, ct)
setattr(instance, self.fk_field, fk)
setattr(instance, self.cache_attr, value)
class GenericRelation(ForeignObject):
"""Provides an accessor to generic related objects (e.g. comments)"""
def __init__(self, to, **kwargs):
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
kwargs['rel'] = GenericRel(
self, to, related_name=kwargs.pop('related_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),)
# Override content-type/object-id field names on the related class
self.object_id_field_name = kwargs.pop("object_id_field", "object_id")
self.content_type_field_name = kwargs.pop("content_type_field", "content_type")
self.for_concrete_model = kwargs.pop("for_concrete_model", True)
kwargs['blank'] = True
kwargs['editable'] = False
kwargs['serialize'] = False
# This construct is somewhat of an abuse of ForeignObject. This field
# represents a relation from pk to object_id field. But, this relation
# isn't direct, the join is generated reverse along foreign key. So,
# the from_field is object_id field, to_field is pk because of the
# reverse join.
super(GenericRelation, self).__init__(
to, to_fields=[],
from_fields=[self.object_id_field_name], **kwargs)
def resolve_related_fields(self):
self.to_fields = [self.model._meta.pk.name]
return [(self.rel.to._meta.get_field_by_name(self.object_id_field_name)[0],
self.model._meta.pk)]
def get_reverse_path_info(self):
opts = self.rel.to._meta
target = opts.get_field_by_name(self.object_id_field_name)[0]
return [PathInfo(self.model._meta, opts, (target,), self.rel, True, False)]
def get_choices_default(self):
return super(GenericRelation, self).get_choices(include_blank=False)
def value_to_string(self, obj):
qs = getattr(obj, self.name).all()
return smart_text([instance._get_pk_val() for instance in qs])
def get_joining_columns(self, reverse_join=False):
if not reverse_join:
# This error message is meant for the user, and from user
# perspective this is a reverse join along the GenericRelation.
raise ValueError('Joining in reverse direction not allowed.')
return super(GenericRelation, self).get_joining_columns(reverse_join)
def contribute_to_class(self, cls, name):
super(GenericRelation, self).contribute_to_class(cls, name, virtual_only=True)
# Save a reference to which model this class is on for future use
self.model = cls
# Add the descriptor for the relation
setattr(cls, self.name, ReverseGenericRelatedObjectsDescriptor(self, self.for_concrete_model))
def contribute_to_related_class(self, cls, related):
pass
def set_attributes_from_rel(self):
pass
def get_internal_type(self):
return "ManyToManyField"
def get_content_type(self):
"""
Returns the content type associated with this field's model.
"""
return ContentType.objects.get_for_model(self.model,
for_concrete_model=self.for_concrete_model)
def get_extra_restriction(self, where_class, alias, remote_alias):
field = self.rel.to._meta.get_field_by_name(self.content_type_field_name)[0]
contenttype_pk = self.get_content_type().pk
cond = where_class()
cond.add((Constraint(remote_alias, field.column, field), 'exact', contenttype_pk), 'AND')
return cond
def bulk_related_objects(self, objs, using=DEFAULT_DB_ALIAS):
"""
Return all objects related to ``objs`` via this ``GenericRelation``.
"""
return self.rel.to._base_manager.db_manager(using).filter(**{
"%s__pk" % self.content_type_field_name:
ContentType.objects.db_manager(using).get_for_model(
self.model, for_concrete_model=self.for_concrete_model).pk,
"%s__in" % self.object_id_field_name:
[obj.pk for obj in objs]
})
class ReverseGenericRelatedObjectsDescriptor(object):
"""
This class provides the functionality that makes the related-object
managers available as attributes on a model class, for fields that have
multiple "remote" values and have a GenericRelation defined in their model
(rather than having another model pointed *at* them). In the example
"article.publications", the publications attribute is a
ReverseGenericRelatedObjectsDescriptor instance.
"""
def __init__(self, field, for_concrete_model=True):
self.field = field
self.for_concrete_model = for_concrete_model
def __get__(self, instance, instance_type=None):
if instance is None:
return self
# Dynamically create a class that subclasses the related model's
# default manager.
rel_model = self.field.rel.to
superclass = rel_model._default_manager.__class__
RelatedManager = create_generic_related_manager(superclass)
qn = connection.ops.quote_name
content_type = ContentType.objects.db_manager(instance._state.db).get_for_model(
instance, for_concrete_model=self.for_concrete_model)
join_cols = self.field.get_joining_columns(reverse_join=True)[0]
manager = RelatedManager(
model = rel_model,
instance = instance,
source_col_name = qn(join_cols[0]),
target_col_name = qn(join_cols[1]),
content_type = content_type,
content_type_field_name = self.field.content_type_field_name,
object_id_field_name = self.field.object_id_field_name,
prefetch_cache_name = self.field.attname,
)
return manager
def __set__(self, instance, value):
manager = self.__get__(instance)
manager.clear()
for obj in value:
manager.add(obj)
def create_generic_related_manager(superclass):
"""
Factory function for a manager that subclasses 'superclass' (which is a
Manager) and adds behavior for generic related objects.
"""
class GenericRelatedObjectManager(superclass):
def __init__(self, model=None, instance=None, symmetrical=None,
source_col_name=None, target_col_name=None, content_type=None,
content_type_field_name=None, object_id_field_name=None,
prefetch_cache_name=None):
super(GenericRelatedObjectManager, self).__init__()
self.model = model
self.content_type = content_type
self.symmetrical = symmetrical
self.instance = instance
self.source_col_name = source_col_name
self.target_col_name = target_col_name
self.content_type_field_name = content_type_field_name
self.object_id_field_name = object_id_field_name
self.prefetch_cache_name = prefetch_cache_name
self.pk_val = self.instance._get_pk_val()
self.core_filters = {
'%s__pk' % content_type_field_name: content_type.id,
'%s__exact' % object_id_field_name: instance._get_pk_val(),
}
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
db = self._db or router.db_for_read(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).get_queryset().using(db).filter(**self.core_filters)
def get_prefetch_queryset(self, instances):
db = self._db or router.db_for_read(self.model, instance=instances[0])
query = {
'%s__pk' % self.content_type_field_name: self.content_type.id,
'%s__in' % self.object_id_field_name:
set(obj._get_pk_val() for obj in instances)
}
qs = super(GenericRelatedObjectManager, self).get_queryset().using(db).filter(**query)
# We (possibly) need to convert object IDs to the type of the
# instances' PK in order to match up instances:
object_id_converter = instances[0]._meta.pk.to_python
return (qs,
lambda relobj: object_id_converter(getattr(relobj, self.object_id_field_name)),
lambda obj: obj._get_pk_val(),
False,
self.prefetch_cache_name)
def add(self, *objs):
for obj in objs:
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected" % self.model._meta.object_name)
setattr(obj, self.content_type_field_name, self.content_type)
setattr(obj, self.object_id_field_name, self.pk_val)
obj.save()
add.alters_data = True
def remove(self, *objs):
db = router.db_for_write(self.model, instance=self.instance)
for obj in objs:
obj.delete(using=db)
remove.alters_data = True
def clear(self):
db = router.db_for_write(self.model, instance=self.instance)
for obj in self.all():
obj.delete(using=db)
clear.alters_data = True
def create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).using(db).create(**kwargs)
create.alters_data = True
return GenericRelatedObjectManager
class GenericRel(ForeignObjectRel):
def __init__(self, field, to, related_name=None, limit_choices_to=None):
super(GenericRel, self).__init__(field, to, related_name, limit_choices_to)
class BaseGenericInlineFormSet(BaseModelFormSet):
"""
A formset for generic inline objects to a parent.
"""
def __init__(self, data=None, files=None, instance=None, save_as_new=None,
prefix=None, queryset=None, **kwargs):
opts = self.model._meta
self.instance = instance
self.rel_name = '-'.join((
opts.app_label, opts.model_name,
self.ct_field.name, self.ct_fk_field.name,
))
if self.instance is None or self.instance.pk is None:
qs = self.model._default_manager.none()
else:
if queryset is None:
queryset = self.model._default_manager
qs = queryset.filter(**{
self.ct_field.name: ContentType.objects.get_for_model(
self.instance, for_concrete_model=self.for_concrete_model),
self.ct_fk_field.name: self.instance.pk,
})
super(BaseGenericInlineFormSet, self).__init__(
queryset=qs, data=data, files=files,
prefix=prefix,
**kwargs
)
@classmethod
def get_default_prefix(cls):
opts = cls.model._meta
return '-'.join((opts.app_label, opts.model_name,
cls.ct_field.name, cls.ct_fk_field.name,
))
def save_new(self, form, commit=True):
kwargs = {
self.ct_field.get_attname(): ContentType.objects.get_for_model(
self.instance, for_concrete_model=self.for_concrete_model).pk,
self.ct_fk_field.get_attname(): self.instance.pk,
}
new_obj = self.model(**kwargs)
return save_instance(form, new_obj, commit=commit)
def generic_inlineformset_factory(model, form=ModelForm,
formset=BaseGenericInlineFormSet,
ct_field="content_type", fk_field="object_id",
fields=None, exclude=None,
extra=3, can_order=False, can_delete=True,
max_num=None,
formfield_callback=None, validate_max=False,
for_concrete_model=True):
"""
Returns a ``GenericInlineFormSet`` for the given kwargs.
You must provide ``ct_field`` and ``fk_field`` if they are different from
the defaults ``content_type`` and ``object_id`` respectively.
"""
opts = model._meta
# if there is no field called `ct_field` let the exception propagate
ct_field = opts.get_field(ct_field)
if not isinstance(ct_field, models.ForeignKey) or ct_field.rel.to != ContentType:
raise Exception("fk_name '%s' is not a ForeignKey to ContentType" % ct_field)
fk_field = opts.get_field(fk_field) # let the exception propagate
if exclude is not None:
exclude = list(exclude)
exclude.extend([ct_field.name, fk_field.name])
else:
exclude = [ct_field.name, fk_field.name]
FormSet = modelformset_factory(model, form=form,
formfield_callback=formfield_callback,
formset=formset,
extra=extra, can_delete=can_delete, can_order=can_order,
fields=fields, exclude=exclude, max_num=max_num,
validate_max=validate_max)
FormSet.ct_field = ct_field
FormSet.ct_fk_field = fk_field
FormSet.for_concrete_model = for_concrete_model
return FormSet
class GenericInlineModelAdmin(InlineModelAdmin):
ct_field = "content_type"
ct_fk_field = "object_id"
formset = BaseGenericInlineFormSet
def get_formset(self, request, obj=None, **kwargs):
if self.declared_fieldsets:
fields = flatten_fieldsets(self.declared_fieldsets)
else:
fields = None
if self.exclude is None:
exclude = []
else:
exclude = list(self.exclude)
exclude.extend(self.get_readonly_fields(request, obj))
if self.exclude is None and hasattr(self.form, '_meta') and self.form._meta.exclude:
# Take the custom ModelForm's Meta.exclude into account only if the
# GenericInlineModelAdmin doesn't define its own.
exclude.extend(self.form._meta.exclude)
exclude = exclude or None
can_delete = self.can_delete and self.has_delete_permission(request, obj)
defaults = {
"ct_field": self.ct_field,
"fk_field": self.ct_fk_field,
"form": self.form,
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
"formset": self.formset,
"extra": self.extra,
"can_delete": can_delete,
"can_order": False,
"fields": fields,
"max_num": self.max_num,
"exclude": exclude
}
defaults.update(kwargs)
if defaults['fields'] is None and not modelform_defines_fields(defaults['form']):
defaults['fields'] = ALL_FIELDS
return generic_inlineformset_factory(self.model, **defaults)
class GenericStackedInline(GenericInlineModelAdmin):
template = 'admin/edit_inline/stacked.html'
class GenericTabularInline(GenericInlineModelAdmin):
template = 'admin/edit_inline/tabular.html'
|
the-stack_106_22416 | # content of conftest.py
from kueventparser import events
def make_test_event(uri: str, ) -> events.Event:
"""テスト用のEventクラス作成関数.
Args:
uri('str'): URI
Returns:
list: list for event
"""
import datetime as dt
import pytz
from bs4 import BeautifulSoup
# jst の設定
jst = pytz.timezone('Asia/Tokyo')
# soupでassertion用のデータを抜き出す.
data_soup = BeautifulSoup(open(uri, encoding="utf-8"), "lxml-xml",
from_encoding="utf-8")
title = data_soup.title.string
url = data_soup.url.string
location = data_soup.location.string
s = data_soup.start_date
s_date = dt.date(int(s.year.text), int(s.month.text), int(s.day.text))
e = data_soup.end_date
e_date = dt.date(int(e.year.text), int(e.month.text), int(e.day.text))
start = dt.time(9, 0, tzinfo=jst)
end = dt.time(21, 30, tzinfo=jst)
description = ""
for string in data_soup.description.stripped_strings:
description += string
description += "\n"
# Eventの作成.
event = events.Event(title=title, url=url, location=location,
description=description, start_date=s_date, end_date=e_date, start=start,
end=end)
return event
|
the-stack_106_22419 | import os
import random
import StringIO
from datetime import datetime
import dj_database_url
from fabric.api import cd, env, execute, get, local, put, require, run, settings, shell_env, task
from fabric.context_managers import quiet
from fabric.operations import prompt
from gitric import api as gitric
# This is the definition of your environments. Every item of the ENVIRONMENTS
# dict will be made available as a fabric task and the properties you put in a
# particular environment will be made available in the `env` variable.
ENVIRONMENTS = {
'prod': {
'root': '/var/www/django_app/prod/',
'hosts': ['root@myhost'],
# You can set settings that will be automatically deployed when running
# the `bootstrap` command
# 'settings': {
# 'ALLOWED_HOSTS': 'www.myhost.com',
# }
},
'dev': {
'root': '/var/www/django_app/staging/',
'hosts': ['root@myhost'],
# You can set settings that will be automatically deployed when running
# the `bootstrap` command
# 'settings': {
# 'ALLOWED_HOSTS': 'www.myhost.com',
# }
}
}
env.project_name = 'django_app'
def ls(path):
"""
Return the list of the files in the given directory, omitting . and ...
"""
with cd(path), quiet():
files = run('for i in *; do echo $i; done')
files_list = files.replace('\r', '').split('\n')
return files_list
def git_push(commit):
"""
Push the current tree to the remote server and reset the remote git
repository to the given commit. The commit can be any git object, be it a
hash, a tag or a branch.
"""
gitric.git_seed(get_project_root(), commit)
gitric.git_reset(get_project_root(), 'master')
def get_project_root():
"""
Return the path to the root of the project on the remote server.
"""
return os.path.join(env.root, env.project_name)
def get_virtualenv_root():
"""
Return the path to the virtual environment on the remote server.
"""
return os.path.join(env.root, 'venv')
def get_backups_root():
"""
Return the path to the backups directory on the remote server.
"""
return os.path.join(env.root, 'backups')
def run_in_virtualenv(cmd, args):
"""
Run the given command from the remote virtualenv.
"""
return run('%s %s' % (os.path.join(get_virtualenv_root(), 'bin', cmd),
args))
def run_pip(args):
"""
Run the pip command in the remote virtualenv.
"""
return run_in_virtualenv('pip', args)
def run_python(args):
"""
Run the python command in the remote virtualenv.
"""
return run_in_virtualenv('python', args)
def install_requirements():
"""
Install the requirements from the base.txt file to the remote virtualenv.
"""
with cd(get_project_root()):
run_pip("install -r requirements/base.txt")
def migrate_database():
with cd(get_project_root()):
run_python("manage.py migrate")
def collect_static():
"""
Collect static files to the STATIC_ROOT directory.
"""
with cd(get_project_root()):
run_python("manage.py collectstatic --noinput")
def restart_process():
"""
Restart the WSGI process by touching the wsgi.py file.
"""
run('touch %s' % os.path.join(get_project_root(), env.project_name,
'wsgi.py'))
def generate_secret_key():
"""
Generate a random secret key, suitable to be used as a SECRET_KEY setting.
"""
return ''.join(
[random.SystemRandom().choice('abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for i in range(50)]
)
def create_structure():
"""
Create the basic directory structure on the remote server.
"""
run('mkdir -p %s' % env.root)
with cd(env.root):
run('mkdir -p static backups')
run('python3 -m venv venv')
@task
def sync_settings():
"""
Copy all settings defined in the environment to the server.
"""
for setting, value in env.settings.items():
set_setting(setting, value=value)
def set_setting(setting_key, value=None, description=None):
"""
Sets the given setting to the given value on the remote server. If the
value is not provided, the user will be prompted for it.
TODO: use the description parameter to display a help text.
"""
if value is None:
value = prompt("Please provide value for setting %s: " % setting_key)
with cd(os.path.join(get_project_root(), 'envdir')):
put(StringIO.StringIO(value), setting_key)
@task
def bootstrap():
"""
Deploy the project for the first time. This will create the directory
structure, push the project and set the basic settings.
This task needs to be called alongside an environment task, eg. ``fab prod
bootstrap``.
"""
create_structure()
execute(git_push, commit='master')
required_settings = set(['DATABASE_URL', 'MEDIA_ROOT', 'STATIC_ROOT',
'MEDIA_URL', 'STATIC_URL', 'ALLOWED_HOSTS'])
if hasattr(env, 'settings'):
for setting, value in env.settings.items():
set_setting(setting, value=value)
# Ask for settings that are required but were not set in the parameters
# file
for setting in required_settings - set(env.settings.keys()):
set_setting(setting)
set_setting('DJANGO_SETTINGS_MODULE',
value='%s.settings.base' % env.project_name)
set_setting('SECRET_KEY', value=generate_secret_key())
execute(install_requirements)
execute(collect_static)
execute(migrate_database)
execute(restart_process)
@task
def compile_assets():
local('npm install')
local('npm run build')
local(
"rsync -e 'ssh -p {port}' -r --exclude *.map --exclude *.swp static/ "
"{user}@{host}:{path}".format(host=env.host,
user=env.user,
port=env.port,
path=os.path.join(get_project_root(), 'static')))
@task
def deploy(tag):
require('root', 'project_name')
execute(git_push, commit='@')
dump_db(get_backups_root())
execute(install_requirements)
execute(compile_assets)
execute(collect_static)
execute(migrate_database)
execute(restart_process)
execute(clean_old_database_backups, nb_backups_to_keep=10)
def dump_db(destination):
"""
Dump the database to the given directory and return the path to the file created. This creates a gzipped SQL file.
"""
with cd(get_project_root()), quiet():
db_credentials = run('cat envdir/DATABASE_URL')
db_credentials_dict = dj_database_url.parse(db_credentials)
if not is_supported_db_engine(db_credentials_dict['ENGINE']):
raise NotImplementedError(
"The dump_db task doesn't support the remote database engine"
)
outfile = os.path.join(destination, datetime.now().strftime('%Y-%m-%d_%H%M%S.sql.gz'))
with shell_env(PGPASSWORD=db_credentials_dict['PASSWORD'].replace('$', '\$')):
run('pg_dump -O -x -h {host} -U {user} {db}|gzip > {outfile}'.format(
host=db_credentials_dict['HOST'],
user=db_credentials_dict['USER'],
db=db_credentials_dict['NAME'],
outfile=outfile))
return outfile
@task
def fetch_db(destination='.'):
"""
Dump the database on the remote host and retrieve it locally.
The destination parameter controls where the dump should be stored locally.
"""
require('root')
dump_path = dump_db('~')
get(dump_path, destination)
run('rm %s' % dump_path)
return os.path.basename(dump_path)
@task
def import_db(dump_file=None):
"""
Restore the given database dump.
The dump must be a gzipped SQL dump. If the dump_file parameter is not set,
the database will be dumped and retrieved from the remote host.
"""
with open('envdir/DATABASE_URL', 'r') as db_credentials_file:
db_credentials = db_credentials_file.read()
db_credentials_dict = dj_database_url.parse(db_credentials)
if not is_supported_db_engine(db_credentials_dict['ENGINE']):
raise NotImplementedError(
"The import_db task doesn't support your database engine"
)
if dump_file is None:
dump_file = fetch_db()
db_info = {
'host': db_credentials_dict['HOST'],
'user': db_credentials_dict['USER'],
'db': db_credentials_dict['NAME'],
'db_dump': dump_file
}
with shell_env(PGPASSWORD=db_credentials_dict['PASSWORD']):
with settings(warn_only=True):
local('dropdb -h {host} -U {user} {db}'.format(**db_info))
local('createdb -h {host} -U {user} {db}'.format(**db_info))
local('gunzip -c {db_dump}|psql -h {host} -U {user} {db}'.format(
**db_info
))
@task
def clean_old_database_backups(nb_backups_to_keep):
"""
Remove old database backups from the system and keep `nb_backups_to_keep`.
"""
backups = ls(get_backups_root())
backups = sorted(backups, reverse=True)
if len(backups) > nb_backups_to_keep:
backups_to_delete = backups[nb_backups_to_keep:]
for backup_to_delete in backups_to_delete:
run('rm "%s"' % os.path.join(get_backups_root(), backup_to_delete))
print("%d backups deleted." % len(backups_to_delete))
else:
print("No backups to delete.")
def is_supported_db_engine(engine):
return engine == 'django.db.backends.postgresql_psycopg2'
# Environment handling stuff
############################
def get_environment_func(key, value):
def load_environment():
env.update(value)
env.environment = key
load_environment.__name__ = key
load_environment.__doc__ = "Definition of the %s environment." % key
return load_environment
def load_environments(environments):
for (key, values) in environments.items():
globals()[key] = task(get_environment_func(key, values))
load_environments(ENVIRONMENTS)
|
the-stack_106_22421 | # Copyright (C) 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handles all requests relating to transferring ownership of volumes.
"""
import hashlib
import hmac
import os
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
import six
from cinder.db import base
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder import quota
from cinder.volume import api as volume_api
from cinder.volume import utils as volume_utils
volume_transfer_opts = [
cfg.IntOpt('volume_transfer_salt_length', default=8,
help='The number of characters in the salt.'),
cfg.IntOpt('volume_transfer_key_length', default=16,
help='The number of characters in the '
'autogenerated auth key.'), ]
CONF = cfg.CONF
CONF.register_opts(volume_transfer_opts)
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
class API(base.Base):
"""API for interacting volume transfers."""
def __init__(self, db_driver=None):
self.volume_api = volume_api.API()
super(API, self).__init__(db_driver)
def get(self, context, transfer_id):
rv = self.db.transfer_get(context, transfer_id)
return dict(rv)
def delete(self, context, transfer_id):
"""Make the RPC call to delete a volume transfer."""
volume_api.check_policy(context, 'delete_transfer')
transfer = self.db.transfer_get(context, transfer_id)
volume_ref = self.db.volume_get(context, transfer.volume_id)
volume_utils.notify_about_volume_usage(context, volume_ref,
"transfer.delete.start")
if volume_ref['status'] != 'awaiting-transfer':
LOG.error(_LE("Volume in unexpected state"))
self.db.transfer_destroy(context, transfer_id)
volume_utils.notify_about_volume_usage(context, volume_ref,
"transfer.delete.end")
def get_all(self, context, filters=None):
filters = filters or {}
volume_api.check_policy(context, 'get_all_transfers')
if context.is_admin and 'all_tenants' in filters:
transfers = self.db.transfer_get_all(context)
else:
transfers = self.db.transfer_get_all_by_project(context,
context.project_id)
return transfers
def _get_random_string(self, length):
"""Get a random hex string of the specified length."""
rndstr = ""
# Note that the string returned by this function must contain only
# characters that the recipient can enter on their keyboard. The
# function ssh224().hexdigit() achieves this by generating a hash
# which will only contain hexidecimal digits.
while len(rndstr) < length:
rndstr += hashlib.sha224(os.urandom(255)).hexdigest()
return rndstr[0:length]
def _get_crypt_hash(self, salt, auth_key):
"""Generate a random hash based on the salt and the auth key."""
if not isinstance(salt, (six.binary_type, six.text_type)):
salt = str(salt)
if isinstance(salt, six.text_type):
salt = salt.encode('utf-8')
if not isinstance(auth_key, (six.binary_type, six.text_type)):
auth_key = str(auth_key)
if isinstance(auth_key, six.text_type):
auth_key = auth_key.encode('utf-8')
return hmac.new(salt, auth_key, hashlib.sha1).hexdigest()
def create(self, context, volume_id, display_name):
"""Creates an entry in the transfers table."""
volume_api.check_policy(context, 'create_transfer')
LOG.info(_LI("Generating transfer record for volume %s"), volume_id)
volume_ref = self.db.volume_get(context, volume_id)
if volume_ref['status'] != "available":
raise exception.InvalidVolume(reason=_("status must be available"))
volume_utils.notify_about_volume_usage(context, volume_ref,
"transfer.create.start")
# The salt is just a short random string.
salt = self._get_random_string(CONF.volume_transfer_salt_length)
auth_key = self._get_random_string(CONF.volume_transfer_key_length)
crypt_hash = self._get_crypt_hash(salt, auth_key)
# TODO(ollie): Transfer expiry needs to be implemented.
transfer_rec = {'volume_id': volume_id,
'display_name': display_name,
'salt': salt,
'crypt_hash': crypt_hash,
'expires_at': None}
try:
transfer = self.db.transfer_create(context, transfer_rec)
except Exception:
LOG.error(_LE("Failed to create transfer record "
"for %s"), volume_id)
raise
volume_utils.notify_about_volume_usage(context, volume_ref,
"transfer.create.end")
return {'id': transfer['id'],
'volume_id': transfer['volume_id'],
'display_name': transfer['display_name'],
'auth_key': auth_key,
'created_at': transfer['created_at']}
def accept(self, context, transfer_id, auth_key):
"""Accept a volume that has been offered for transfer."""
# We must use an elevated context to see the volume that is still
# owned by the donor.
volume_api.check_policy(context, 'accept_transfer')
transfer = self.db.transfer_get(context.elevated(), transfer_id)
crypt_hash = self._get_crypt_hash(transfer['salt'], auth_key)
if crypt_hash != transfer['crypt_hash']:
msg = (_("Attempt to transfer %s with invalid auth key.") %
transfer_id)
LOG.error(msg)
raise exception.InvalidAuthKey(reason=msg)
volume_id = transfer['volume_id']
vol_ref = self.db.volume_get(context.elevated(), volume_id)
volume_utils.notify_about_volume_usage(context, vol_ref,
"transfer.accept.start")
try:
reservations = QUOTAS.reserve(context, volumes=1,
gigabytes=vol_ref['size'])
except exception.OverQuota as e:
overs = e.kwargs['overs']
usages = e.kwargs['usages']
quotas = e.kwargs['quotas']
def _consumed(name):
return (usages[name]['reserved'] + usages[name]['in_use'])
if 'gigabytes' in overs:
msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
"%(s_size)sG volume (%(d_consumed)dG of "
"%(d_quota)dG already consumed)")
LOG.warning(msg, {'s_pid': context.project_id,
's_size': vol_ref['size'],
'd_consumed': _consumed('gigabytes'),
'd_quota': quotas['gigabytes']})
raise exception.VolumeSizeExceedsAvailableQuota(
requested=vol_ref['size'],
consumed=_consumed('gigabytes'),
quota=quotas['gigabytes'])
elif 'volumes' in overs:
msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
"volume (%(d_consumed)d volumes "
"already consumed)")
LOG.warning(msg, {'s_pid': context.project_id,
'd_consumed': _consumed('volumes')})
raise exception.VolumeLimitExceeded(allowed=quotas['volumes'])
try:
donor_id = vol_ref['project_id']
donor_reservations = QUOTAS.reserve(context.elevated(),
project_id=donor_id,
volumes=-1,
gigabytes=-vol_ref['size'])
except Exception:
donor_reservations = None
LOG.exception(_LE("Failed to update quota donating volume"
" transfer id %s"), transfer_id)
try:
# Transfer ownership of the volume now, must use an elevated
# context.
self.volume_api.accept_transfer(context,
vol_ref,
context.user_id,
context.project_id)
self.db.transfer_accept(context.elevated(),
transfer_id,
context.user_id,
context.project_id)
QUOTAS.commit(context, reservations)
if donor_reservations:
QUOTAS.commit(context, donor_reservations, project_id=donor_id)
LOG.info(_LI("Volume %s has been transferred."), volume_id)
except Exception:
with excutils.save_and_reraise_exception():
QUOTAS.rollback(context, reservations)
if donor_reservations:
QUOTAS.rollback(context, donor_reservations,
project_id=donor_id)
vol_ref = self.db.volume_get(context, volume_id)
volume_utils.notify_about_volume_usage(context, vol_ref,
"transfer.accept.end")
return {'id': transfer_id,
'display_name': transfer['display_name'],
'volume_id': vol_ref['id']}
|
the-stack_106_22422 | import pytest
import cftime
from datetime import datetime
from unittest.mock import Mock, call, patch, sentinel
import unittest
import iris
import numpy as np
from forest.drivers import gridded_forecast
class Test_empty_image(unittest.TestCase):
def test(self):
result = gridded_forecast.empty_image()
self.assertEqual(result.keys(), {'x', 'y', 'dw', 'dh', 'image', 'name',
'units', 'valid', 'initial', 'length',
'level'})
for value in result.values():
self.assertEqual(value, [])
@patch('forest.drivers.gridded_forecast._to_datetime')
class Test_coordinates(unittest.TestCase):
def test_surface_and_times(self, to_datetime):
valid = datetime(2019, 10, 10, 9)
initial = datetime(2019, 10, 10, 3)
to_datetime.side_effect = [valid, initial]
result = gridded_forecast.coordinates(sentinel.valid, sentinel.initial,
[], None)
self.assertEqual(to_datetime.mock_calls, [call(sentinel.valid),
call(sentinel.initial)])
self.assertEqual(result, {'valid': [valid], 'initial': [initial],
'length': ['T+6'], 'level': ['Surface']})
def test_surface_no_pressures(self, to_datetime):
result = gridded_forecast.coordinates(None, None, [], 950)
self.assertEqual(result['level'], ['Surface'])
def test_surface_no_pressure(self, to_datetime):
result = gridded_forecast.coordinates(None, None, [1000, 900], None)
self.assertEqual(result['level'], ['Surface'])
def test_pressure(self, to_datetime):
result = gridded_forecast.coordinates(None, None, [1000, 900], 900)
self.assertEqual(result['level'], ['900 hPa'])
class Test_is_valid_cube(unittest.TestCase):
def setUp(self):
lon = iris.coords.DimCoord(range(5), 'longitude')
lat = iris.coords.DimCoord(range(4), 'latitude')
time = iris.coords.DimCoord(range(3), 'time')
other = iris.coords.DimCoord(range(2), long_name='other')
frt = iris.coords.AuxCoord(range(1), 'forecast_reference_time')
cube = iris.cube.Cube(np.empty((2, 3, 4, 5)), 'air_temperature',
dim_coords_and_dims=[(other, 0), (time, 1),
(lat, 2), (lon, 3)],
aux_coords_and_dims=[(frt, ())])
self.cube = cube
def test_ok(self):
cube = self.cube[0]
self.assertTrue(gridded_forecast._is_valid_cube(cube))
def test_1d(self):
cube = self.cube[0, 0, 0]
self.assertFalse(gridded_forecast._is_valid_cube(cube))
def test_4d(self):
cube = self.cube
self.assertFalse(gridded_forecast._is_valid_cube(cube))
def test_2d_missing_time_coord(self):
cube = self.cube[0, 0]
cube.remove_coord(cube.coord('time'))
self.assertFalse(gridded_forecast._is_valid_cube(cube))
def test_missing_frt_coord(self):
cube = self.cube[0]
cube.remove_coord(cube.coord('forecast_reference_time'))
self.assertFalse(gridded_forecast._is_valid_cube(cube))
def test_missing_dim_coord(self):
cube = self.cube[0]
cube.remove_coord(cube.dim_coords[0])
self.assertFalse(gridded_forecast._is_valid_cube(cube))
def test_invalid_dim_coord(self):
cube = self.cube[0]
cube.dim_coords[2].rename('projection_x_coordinate')
self.assertFalse(gridded_forecast._is_valid_cube(cube))
def test_transposed(self):
cube = self.cube[0]
cube.transpose()
self.assertFalse(gridded_forecast._is_valid_cube(cube))
class Test_load(unittest.TestCase):
@patch('forest.drivers.gridded_forecast._is_valid_cube')
@patch('iris.load')
def test_all_unique(self, load, is_valid_cube):
cube1 = Mock(**{'name.return_value': 'foo'})
cube2 = Mock(**{'name.return_value': 'bar'})
load.return_value = [cube1, cube2]
is_valid_cube.return_value = True
result = gridded_forecast._load(sentinel.pattern)
load.assert_called_once_with(sentinel.pattern)
self.assertEqual(is_valid_cube.mock_calls, [call(cube1), call(cube2)])
self.assertEqual(result, {'foo': cube1, 'bar': cube2})
@patch('forest.drivers.gridded_forecast._is_valid_cube')
@patch('iris.load')
def test_duplicate_name(self, load, is_valid_cube):
cube1 = Mock(**{'name.return_value': 'foo'})
cube2 = Mock(**{'name.return_value': 'foo'})
load.return_value = [cube1, cube2]
is_valid_cube.return_value = True
result = gridded_forecast._load(sentinel.pattern)
load.assert_called_once_with(sentinel.pattern)
self.assertEqual(is_valid_cube.mock_calls, [call(cube1), call(cube2)])
self.assertEqual(result, {'foo (1)': cube1, 'foo (2)': cube2})
@patch('forest.drivers.gridded_forecast._is_valid_cube')
@patch('iris.load')
def test_none_valid(self, load, is_valid_cube):
load.return_value = ['foo', 'bar']
is_valid_cube.return_value = False
with self.assertRaises(AssertionError):
gridded_forecast._load(None)
class Test_ImageLoader(unittest.TestCase):
def test_init(self):
result = gridded_forecast.ImageLoader(sentinel.label, sentinel.cubes)
self.assertEqual(result._label, sentinel.label)
self.assertEqual(result._cubes, sentinel.cubes)
@patch('forest.drivers.gridded_forecast.empty_image')
@patch('iris.Constraint')
@patch('forest.drivers.gridded_forecast._to_datetime')
def test_empty(self, to_datetime, constraint, empty_image):
original_cube = Mock()
original_cube.extract.return_value = None
image_loader = gridded_forecast.ImageLoader(sentinel.label,
{'foo': original_cube})
to_datetime.return_value = sentinel.valid_datetime
constraint.return_value = sentinel.constraint
empty_image.return_value = sentinel.empty_image
result = image_loader.image(
Mock(variable='foo', valid_time=sentinel.valid))
to_datetime.assert_called_once_with(sentinel.valid)
constraint.assert_called_once_with(time=sentinel.valid_datetime)
original_cube.extract.assert_called_once_with(sentinel.constraint)
self.assertEqual(result, sentinel.empty_image)
@patch('forest.drivers.gridded_forecast.coordinates')
@patch('forest.geo.stretch_image')
@patch('iris.Constraint')
@patch('forest.drivers.gridded_forecast._to_datetime')
def test_image(self, to_datetime, constraint, stretch_image, coordinates):
cube = Mock()
cube.coord.side_effect = [Mock(points=sentinel.longitudes),
Mock(points=sentinel.latitudes)]
cube.units.__str__ = lambda self: 'my-units'
original_cube = Mock()
original_cube.extract.return_value = cube
image_loader = gridded_forecast.ImageLoader('my-label',
{'foo': original_cube})
to_datetime.return_value = sentinel.valid_datetime
constraint.return_value = sentinel.constraint
stretch_image.return_value = {'stretched_image': True}
coordinates.return_value = {'coordinates': True}
state = Mock(variable='foo',
valid_time=sentinel.valid,
initial_time=sentinel.initial,
pressures=sentinel.pressures,
pressure=sentinel.pressure)
result = image_loader.image(state)
self.assertEqual(cube.coord.mock_calls, [call('longitude'),
call('latitude')])
stretch_image.assert_called_once_with(sentinel.longitudes,
sentinel.latitudes, cube.data)
coordinates.assert_called_once_with(sentinel.valid, sentinel.initial,
sentinel.pressures,
sentinel.pressure)
self.assertEqual(result, {'stretched_image': True, 'coordinates': True,
'name': ['my-label'], 'units': ['my-units']})
class Test_Navigator(unittest.TestCase):
def test_init(self):
result = gridded_forecast.Navigator(sentinel.cubes)
self.assertEqual(result._cubes, sentinel.cubes)
def test_variables(self):
navigator = Mock(_cubes={'one': 1, 'two': 2, 'three': 3})
result = gridded_forecast.Navigator.variables(navigator, None)
self.assertEqual(list(sorted(result)), ['one', 'three', 'two'])
def test_initial_times(self):
cube1 = Mock()
cube1.coord.return_value.cell().point = '2019-10-18'
cube2 = Mock()
cube2.coord.return_value.cell().point = '2019-10-19'
cube3 = Mock()
cube3.coord.return_value.cell().point = '2019-10-18'
navigator = Mock()
navigator._cubes.values.return_value = [cube1, cube2, cube3]
result = gridded_forecast.Navigator.initial_times(navigator, None,
None)
navigator._cubes.values.assert_called_once_with()
cube1.coord.assert_called_once_with('forecast_reference_time')
cube2.coord.assert_called_once_with('forecast_reference_time')
cube3.coord.assert_called_once_with('forecast_reference_time')
self.assertEqual(list(sorted(result)), ['2019-10-18', '2019-10-19'])
def test_valid_times(self):
cube1 = Mock()
cube1.coord.return_value.cells.return_value = [Mock(point='p1'),
Mock(point='p2')]
navigator = Mock()
navigator._cubes = {'first': cube1, 'second': None}
result = gridded_forecast.Navigator.valid_times(navigator, None,
'first', None)
cube1.coord.assert_called_once_with('time')
self.assertEqual(result, ['p1', 'p2'])
def test_pressures(self):
cube1 = Mock()
cube1.coord.return_value.cells.return_value = [Mock(point='p1'),
Mock(point='p2')]
navigator = Mock()
navigator._cubes = {'first': cube1, 'second': None}
result = gridded_forecast.Navigator.pressures(navigator, None,
'first', None)
cube1.coord.assert_called_once_with('pressure')
self.assertEqual(result, ['p1', 'p2'])
def test_pressures_empty(self):
cube1 = Mock()
cube1.coord.side_effect = iris.exceptions.CoordinateNotFoundError
navigator = Mock()
navigator._cubes = {'first': cube1, 'second': None}
result = gridded_forecast.Navigator.pressures(navigator, None,
'first', None)
cube1.coord.assert_called_once_with('pressure')
self.assertEqual(result, [])
|
the-stack_106_22423 | # (C) Datadog, Inc. 2019-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from xml.etree.ElementTree import ParseError
import requests
from lxml import etree
from six import ensure_text
from checks import AgentCheck
from utils.util import _is_affirmative
from . import metrics, validation
class IbmWasCheck(AgentCheck):
SERVICE_CHECK_CONNECT = "ibm_was.can_connect"
METRIC_PREFIX = 'ibm_was'
def __init__(self, name, init_config, instance, aggregator=None):
super(IbmWasCheck, self).__init__(name, init_config, instance, aggregator)
self.instance = instance
self.metric_type_mapping = {
'AverageStatistic': self.gauge,
'BoundedRangeStatistic': self.gauge,
'CountStatistic': self.monotonic_count,
'DoubleStatistic': self.rate,
'RangeStatistic': self.gauge,
'TimeStatistic': self.gauge,
}
self.url = self.instance.get('servlet_url')
self.custom_queries = self.instance.get('custom_queries', [])
self.custom_queries_units_gauge = set(self.instance.get('custom_queries_units_gauge', []))
self.custom_tags = self.instance.get('tags', [])
self.collect_stats = self.setup_configured_stats()
self.nested_tags, self.metric_categories = self.append_custom_queries()
self.custom_stats = set(self.nested_tags)
self.service_check_tags = self.custom_tags + ['url:{}'.format(self.url)]
# parse HTTP options
username = self.instance.get('username')
password = self.instance.get('password')
tls_verify = _is_affirmative(self.instance.get('tls_verify', True))
tls_cert = self.instance.get('tls_cert')
tls_private_key = self.instance.get('tls_private_key')
tls_ca_cert = self.instance.get('tls_ca_cert')
# http://docs.python-requests.org/en/master/user/authentication/
auth = None
if username and password:
auth = (username, password)
# http://docs.python-requests.org/en/master/user/advanced/#ssl-cert-verification
verify = True
if isinstance(tls_ca_cert, str):
verify = tls_ca_cert
elif not tls_verify:
verify = False
# http://docs.python-requests.org/en/master/user/advanced/#client-side-certificates
cert = None
if isinstance(tls_cert, str):
if isinstance(tls_private_key, str):
cert = (tls_cert, tls_private_key)
else:
cert = tls_cert
self.http_options = {'auth': auth, 'cert': cert, 'verify': verify}
def check(self, _):
if not self.url:
raise ValueError("Please specify a servlet_url in the configuration file")
data = self.make_request()
try:
server_data_xml = etree.fromstring(data)
except ParseError as e:
self.submit_service_checks(AgentCheck.CRITICAL)
self.log.error("Unable to parse the XML response: {}".format(e))
return
node_list = self.get_node_from_root(server_data_xml, "Node")
for node in node_list:
server_list = self.get_node_from_root(node, 'Server')
node_tags = list(self.custom_tags)
node_tags.append('node:{}'.format(node.get('name')))
for server in server_list:
server_tags = ['server:{}'.format(server.get('name'))]
server_tags.extend(node_tags)
for category, prefix in self.metric_categories.items():
self.log.debug("Collecting %s stats", category)
if self.collect_stats.get(category):
stats = self.get_node_from_name(server, category)
self.process_stats(stats, prefix, server_tags)
def get_node_from_name(self, xml_data, path):
# XMLPath returns a list, but there should only be one element here since the function starts
# the search within a given Node/Server
data = xml_data.xpath('.//Stat[normalize-space(@name)="{}"]'.format(path))
if len(data):
return data[0]
else:
self.warning('Error finding {} stats in XML output.'.format(path))
return []
def get_node_from_root(self, xml_data, path):
return xml_data.findall(path)
def process_stats(self, stats, prefix, tags, recursion_level=0):
"""
The XML will have Stat Nodes and Nodes that contain the metrics themselves
This code recursively goes through each Stat Node to properly setup tags
where each Stat will have a different tag key depending on the context.
"""
for child in stats:
if child.tag in metrics.METRIC_VALUE_FIELDS:
self.submit_metrics(child, prefix, tags)
elif child.tag in metrics.CATEGORY_FIELDS:
tag_list = self.nested_tags.get(prefix)
if tag_list and len(tag_list) > recursion_level:
recursion_tags = tags + ['{}:{}'.format(tag_list[recursion_level], child.get('name'))]
else:
recursion_tags = tags
self.process_stats(child, prefix, recursion_tags, recursion_level + 1)
def submit_metrics(self, child, prefix, tags):
value = child.get(metrics.METRIC_VALUE_FIELDS[child.tag])
metric_name = self.normalize(
ensure_text(child.get('name')), prefix='{}.{}'.format(self.METRIC_PREFIX, prefix), fix_case=True
)
tag = child.tag
if (
child.get('unit') in self.custom_queries_units_gauge
and prefix in self.custom_stats
and tag == 'CountStatistic'
):
tag = 'TimeStatistic'
self.metric_type_mapping[tag](metric_name, value, tags=tags)
# creates new JVM metrics correctly as gauges
if prefix == "jvm":
jvm_metric_name = "{}_gauge".format(metric_name)
self.gauge(jvm_metric_name, value, tags=tags)
def make_request(self):
try:
resp = requests.get(self.url, **self.http_options)
resp.raise_for_status()
self.submit_service_checks(AgentCheck.OK)
except (requests.HTTPError, requests.ConnectionError) as e:
self.warning(
"Couldn't connect to URL: {} with exception: {}. Please verify the address is reachable".format(self.url, e)
)
self.submit_service_checks(AgentCheck.CRITICAL)
raise e
return resp.content
def submit_service_checks(self, value):
self.gauge(self.SERVICE_CHECK_CONNECT, 1 if value == AgentCheck.OK else 0, tags=list(self.service_check_tags))
self.service_check(self.SERVICE_CHECK_CONNECT, value, tags=list(self.service_check_tags))
def append_custom_queries(self):
custom_recursion_tags = {}
custom_metric_categories = {}
for query in self.custom_queries:
validation.validate_query(query)
custom_metric_categories[query['stat']] = query['metric_prefix']
custom_recursion_tags[query['metric_prefix']] = [key for key in query.get('tag_keys', [])]
self.collect_stats[query['stat']] = True
return (
dict(metrics.NESTED_TAGS, **custom_recursion_tags),
dict(metrics.METRIC_CATEGORIES, **custom_metric_categories),
)
def setup_configured_stats(self):
collect_stats = {}
for category, prefix in metrics.METRIC_CATEGORIES.items():
if _is_affirmative(self.instance.get('collect_{}_stats'.format(prefix), True)):
collect_stats[category] = True
return collect_stats
|
the-stack_106_22425 | #!/usr/bin/env python
import sys
import os
from setuptools import setup, find_packages, __version__
v = sys.version_info
if sys.version_info < (3, 5):
msg = "FAIL: Requires Python 3.5 or later, " \
"but setup.py was run using {}.{}.{}"
v = sys.version_info
print(msg.format(v.major, v.minor, v.micro))
# noinspection PyPackageRequirements
print("NOTE: Installation failed. Run setup.py using python3")
sys.exit(1)
try:
SETUP_DIRNAME = os.path.dirname(__file__)
except NameError:
# We're probably being frozen, and __file__ triggered this NameError
# Work around this
SETUP_DIRNAME = os.path.dirname(sys.argv[0])
if SETUP_DIRNAME != '':
os.chdir(SETUP_DIRNAME)
SETUP_DIRNAME = os.path.abspath(SETUP_DIRNAME)
METADATA = os.path.join(SETUP_DIRNAME, 'indy_node', '__metadata__.py')
# Load the metadata using exec() so we don't trigger an import of
# ioflo.__init__
exec(compile(open(METADATA).read(), METADATA, 'exec'))
BASE_DIR = os.path.join(os.path.expanduser("~"), ".indy")
LOG_DIR = os.path.join(BASE_DIR, "log")
CONFIG_FILE = os.path.join(BASE_DIR, "indy_config.py")
tests_require = ['pytest', 'pytest-xdist', 'python3-indy==1.3.1-dev-403']
setup(
name='indy-node-dev',
version=__version__,
description='Indy node',
url='https://github.com/hyperledger/indy-node',
author=__author__,
author_email='[email protected]',
license=__license__,
keywords='Indy Node',
packages=find_packages(exclude=['docs', 'docs*']) + [
'data'],
package_data={
'': ['*.txt', '*.md', '*.rst', '*.json', '*.conf', '*.html',
'*.css', '*.ico', '*.png', 'LICENSE', 'LEGAL', '*.indy']},
include_package_data=True,
data_files=[(
(BASE_DIR, ['data/nssm_original.exe'])
)],
install_requires=['indy-plenum-dev==1.2.305',
'indy-anoncreds-dev==1.0.32',
'python-dateutil',
'timeout-decorator'],
setup_requires=['pytest-runner'],
extras_require={
'tests': tests_require
},
tests_require=tests_require,
scripts=['scripts/indy',
'scripts/change_node_ha',
'scripts/add_new_node',
'scripts/reset_client',
'scripts/start_indy_node',
'scripts/start_node_control_tool',
'scripts/clear_node.py',
'scripts/get_keys',
'scripts/generate_indy_pool_transactions',
'scripts/init_indy_keys',
'scripts/upgrade_indy_node_ubuntu1604.sh',
'scripts/upgrade_indy_node_ubuntu1604_test.sh',
'scripts/upgrade_indy_node.bat',
'scripts/upgrade_indy_node_test.bat',
'scripts/restart_indy_node_ubuntu1604.sh',
'scripts/restart_indy_node.bat',
'scripts/restart_sovrin_node_ubuntu1604.sh',
'scripts/complete_rebranding_upgrade_ubuntu1604.sh',
'scripts/install_indy_node.bat',
'scripts/delete_indy_node.bat',
'scripts/restart_upgrade_agent.bat',
'scripts/install_nssm.bat',
'scripts/read_ledger',
'scripts/test_some_write_keys_others_read_them',
'scripts/test_users_write_and_read_own_keys',
'scripts/validator-info',
'scripts/init_bls_keys',
'scripts/enable_bls',
'scripts/create_dirs.sh',
'scripts/indy_old_cli_export_dids',
'scripts/setup_iptables',
'scripts/setup_indy_node_iptables']
)
|
the-stack_106_22428 | from collections import deque
import gym
import gym_minigrid
import numpy as np
import sys
import unittest
import ray
from ray import tune
from ray.rllib.agents.callbacks import DefaultCallbacks
import ray.rllib.agents.ppo as ppo
from ray.rllib.utils.test_utils import check_learning_achieved, \
framework_iterator
from ray.rllib.utils.numpy import one_hot
from ray.tune import register_env
class MyCallBack(DefaultCallbacks):
def __init__(self):
super().__init__()
self.deltas = []
def on_postprocess_trajectory(self, *, worker, episode, agent_id,
policy_id, policies, postprocessed_batch,
original_batches, **kwargs):
pos = np.argmax(postprocessed_batch["obs"], -1)
x, y = pos % 8, pos // 8
self.deltas.extend((x**2 + y**2)**0.5)
def on_sample_end(self, *, worker, samples, **kwargs):
print("mean. distance from origin={}".format(np.mean(self.deltas)))
self.deltas = []
class OneHotWrapper(gym.core.ObservationWrapper):
def __init__(self, env, vector_index, framestack):
super().__init__(env)
self.framestack = framestack
# 49=7x7 field of vision; 11=object types; 6=colors; 3=state types.
# +4: Direction.
self.single_frame_dim = 49 * (11 + 6 + 3) + 4
self.init_x = None
self.init_y = None
self.x_positions = []
self.y_positions = []
self.x_y_delta_buffer = deque(maxlen=100)
self.vector_index = vector_index
self.frame_buffer = deque(maxlen=self.framestack)
for _ in range(self.framestack):
self.frame_buffer.append(np.zeros((self.single_frame_dim, )))
self.observation_space = gym.spaces.Box(
0.0,
1.0,
shape=(self.single_frame_dim * self.framestack, ),
dtype=np.float32)
def observation(self, obs):
# Debug output: max-x/y positions to watch exploration progress.
if self.step_count == 0:
for _ in range(self.framestack):
self.frame_buffer.append(np.zeros((self.single_frame_dim, )))
if self.vector_index == 0:
if self.x_positions:
max_diff = max(
np.sqrt((np.array(self.x_positions) - self.init_x)**2 +
(np.array(self.y_positions) - self.init_y)**2))
self.x_y_delta_buffer.append(max_diff)
print("100-average dist travelled={}".format(
np.mean(self.x_y_delta_buffer)))
self.x_positions = []
self.y_positions = []
self.init_x = self.agent_pos[0]
self.init_y = self.agent_pos[1]
# Are we carrying the key?
# if self.carrying is not None:
# print("Carrying KEY!!")
self.x_positions.append(self.agent_pos[0])
self.y_positions.append(self.agent_pos[1])
# One-hot the last dim into 11, 6, 3 one-hot vectors, then flatten.
objects = one_hot(obs[:, :, 0], depth=11)
colors = one_hot(obs[:, :, 1], depth=6)
states = one_hot(obs[:, :, 2], depth=3)
# Is the door we see open?
# for x in range(7):
# for y in range(7):
# if objects[x, y, 4] == 1.0 and states[x, y, 0] == 1.0:
# print("Door OPEN!!")
all_ = np.concatenate([objects, colors, states], -1)
all_flat = np.reshape(all_, (-1, ))
direction = one_hot(
np.array(self.agent_dir), depth=4).astype(np.float32)
single_frame = np.concatenate([all_flat, direction])
self.frame_buffer.append(single_frame)
return np.concatenate(self.frame_buffer)
def env_maker(config):
name = config.get("name", "MiniGrid-Empty-5x5-v0")
framestack = config.get("framestack", 4)
env = gym.make(name)
# Only use image portion of observation (discard goal and direction).
env = gym_minigrid.wrappers.ImgObsWrapper(env)
env = OneHotWrapper(
env,
config.vector_index if hasattr(config, "vector_index") else 0,
framestack=framestack)
return env
register_env("mini-grid", env_maker)
CONV_FILTERS = [[16, [11, 11], 3], [32, [9, 9], 3], [64, [5, 5], 3]]
class TestCuriosity(unittest.TestCase):
@classmethod
def setUpClass(cls):
ray.init(num_cpus=3)
@classmethod
def tearDownClass(cls):
ray.shutdown()
def test_curiosity_on_frozen_lake(self):
config = ppo.DEFAULT_CONFIG.copy()
# A very large frozen-lake that's hard for a random policy to solve
# due to 0.0 feedback.
config["env"] = "FrozenLake-v1"
config["env_config"] = {
"desc": [
"SFFFFFFF",
"FFFFFFFF",
"FFFFFFFF",
"FFFFFFFF",
"FFFFFFFF",
"FFFFFFFF",
"FFFFFFFF",
"FFFFFFFG",
],
"is_slippery": False
}
# Print out observations to see how far we already get inside the Env.
config["callbacks"] = MyCallBack
# Limit horizon to make it really hard for non-curious agent to reach
# the goal state.
config["horizon"] = 16
# Local only.
config["num_workers"] = 0
config["lr"] = 0.001
num_iterations = 10
for _ in framework_iterator(config, frameworks=("tf", "torch")):
# W/ Curiosity. Expect to learn something.
config["exploration_config"] = {
"type": "Curiosity",
"eta": 0.2,
"lr": 0.001,
"feature_dim": 128,
"feature_net_config": {
"fcnet_hiddens": [],
"fcnet_activation": "relu",
},
"sub_exploration": {
"type": "StochasticSampling",
}
}
trainer = ppo.PPOTrainer(config=config)
learnt = False
for i in range(num_iterations):
result = trainer.train()
print(result)
if result["episode_reward_max"] > 0.0:
print("Reached goal after {} iters!".format(i))
learnt = True
break
trainer.stop()
self.assertTrue(learnt)
# Disable this check for now. Add too much flakyness to test.
# if fw == "tf":
# # W/o Curiosity. Expect to learn nothing.
# print("Trying w/o curiosity (not expected to learn).")
# config["exploration_config"] = {
# "type": "StochasticSampling",
# }
# trainer = ppo.PPOTrainer(config=config)
# rewards_wo = 0.0
# for _ in range(num_iterations):
# result = trainer.train()
# rewards_wo += result["episode_reward_mean"]
# print(result)
# trainer.stop()
# self.assertTrue(rewards_wo == 0.0)
# print("Did not reach goal w/o curiosity!")
def test_curiosity_on_partially_observable_domain(self):
config = ppo.DEFAULT_CONFIG.copy()
config["env"] = "mini-grid"
config["env_config"] = {
# Also works with:
# - MiniGrid-MultiRoom-N4-S5-v0
# - MiniGrid-MultiRoom-N2-S4-v0
"name": "MiniGrid-Empty-8x8-v0",
"framestack": 1, # seems to work even w/o framestacking
}
config["horizon"] = 15 # Make it impossible to reach goal by chance.
config["num_envs_per_worker"] = 4
config["model"]["fcnet_hiddens"] = [256, 256]
config["model"]["fcnet_activation"] = "relu"
config["num_sgd_iter"] = 8
config["num_workers"] = 0
config["exploration_config"] = {
"type": "Curiosity",
# For the feature NN, use a non-LSTM fcnet (same as the one
# in the policy model).
"eta": 0.1,
"lr": 0.0003, # 0.0003 or 0.0005 seem to work fine as well.
"feature_dim": 64,
# No actual feature net: map directly from observations to feature
# vector (linearly).
"feature_net_config": {
"fcnet_hiddens": [],
"fcnet_activation": "relu",
},
"sub_exploration": {
"type": "StochasticSampling",
}
}
min_reward = 0.001
stop = {
"training_iteration": 25,
"episode_reward_mean": min_reward,
}
for _ in framework_iterator(config, frameworks="torch"):
# To replay:
# trainer = ppo.PPOTrainer(config=config)
# trainer.restore("[checkpoint file]")
# env = env_maker(config["env_config"])
# s = env.reset()
# for _ in range(10000):
# s, r, d, _ = env.step(trainer.compute_single_action(s))
# if d:
# s = env.reset()
# env.render()
results = tune.run("PPO", config=config, stop=stop, verbose=1)
check_learning_achieved(results, min_reward)
iters = results.trials[0].last_result["training_iteration"]
print("Reached in {} iterations.".format(iters))
# config_wo = config.copy()
# config_wo["exploration_config"] = {"type": "StochasticSampling"}
# stop_wo = stop.copy()
# stop_wo["training_iteration"] = iters
# results = tune.run(
# "PPO", config=config_wo, stop=stop_wo, verbose=1)
# try:
# check_learning_achieved(results, min_reward)
# except ValueError:
# print("Did not learn w/o curiosity (expected).")
# else:
# raise ValueError("Learnt w/o curiosity (not expected)!")
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
the-stack_106_22430 | # -*- coding: utf-8 -*-
import re, json, glob, argparse
from gensim.corpora import WikiCorpus, Dictionary
from gensim.utils import to_unicode
"""
Creates a corpus from Wikipedia dump file.
Inspired by:
https://www.kdnuggets.com/2017/11/building-wikipedia-text-corpus-nlp.html
"""
def make_corpus(in_f, out_f):
"""Convert Wikipedia xml dump file to text corpus"""
output = open(out_f, 'w', encoding = "utf-8")
wiki = WikiCorpus(in_f, tokenizer_func=tokenize, dictionary=Dictionary())
i = 0
for text in wiki.get_texts():
output.write(bytes(' '.join(text), 'utf-8').decode('utf-8') + '\n')
i = i + 1
if (i % 10000 == 0):
print('Processed ' + str(i) + ' articles')
output.close()
print('Processing complete!')
WIKI_REMOVE_CHARS = re.compile("'+|(=+.{2,30}=+)|__TOC__|(ファイル:).+|:(en|de|it|fr|es|kr|zh|no|fi):|\n", re.UNICODE)
WIKI_SPACE_CHARS = re.compile("(\\s|゙|゚| )+", re.UNICODE)
EMAIL_PATTERN = re.compile("(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)", re.UNICODE)
URL_PATTERN = re.compile("(ftp|http|https)?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+", re.UNICODE)
WIKI_REMOVE_TOKEN_CHARS = re.compile("(\\*$|:$|^파일:.+|^;)", re.UNICODE)
MULTIPLE_SPACES = re.compile(' +', re.UNICODE)
def tokenize(content, token_min_len=2, token_max_len=100, lower=True):
content = re.sub(EMAIL_PATTERN, ' ', content) # remove email pattern
content = re.sub(URL_PATTERN, ' ', content) # remove url pattern
content = re.sub(WIKI_REMOVE_CHARS, ' ', content) # remove unnecessary chars
content = re.sub(WIKI_SPACE_CHARS, ' ', content)
content = re.sub(MULTIPLE_SPACES, ' ', content)
tokens = content.replace(", )", "").split(" ")
result = []
for token in tokens:
if not token.startswith('_'):
token_candidate = to_unicode(re.sub(WIKI_REMOVE_TOKEN_CHARS, '', token))
else:
token_candidate = ""
if len(token_candidate) > 0:
result.append(token_candidate)
return result
def process_nsmc(corpus_path, output_fname, process_json=True, with_label=True):
if process_json:
file_paths = glob.glob(corpus_path + "/*")
with open(output_fname, 'w', encoding='utf-8') as f:
for path in file_paths:
contents = json.load(open(path))
for content in contents:
sentence = content['review'].strip()
if len(sentence) > 0:
f.writelines(sentence + "\u241E" + content['movie_id'] + "\n")
else:
with open(corpus_path, 'r', encoding='utf-8') as f1, \
open(output_fname, 'w', encoding='utf-8') as f2:
next(f1) # skip head line
for line in f1:
_, sentence, label = line.strip().split('\t')
if not sentence: continue
if with_label:
f2.writelines(sentence + "\u241E" + label + "\n")
else:
f2.writelines(sentence + "\n")
def process_korQuAD(corpus_fname, output_fname):
with open(corpus_fname) as f1, open(output_fname, 'w', encoding='utf-8') as f2:
dataset_json = json.load(f1)
dataset = dataset_json['data']
for article in dataset:
w_lines = []
for paragraph in article['paragraphs']:
w_lines.append(paragraph['context'])
for qa in paragraph['qas']:
q_text = qa['question']
for a in qa['answers']:
a_text = a['text']
w_lines.append(q_text + " " + a_text)
for line in w_lines:
f2.writelines(line + "\n")
def process_documents(corpus_fname, output_fname):
with open(corpus_fname) as f1, open(output_fname, 'w', encoding='utf-8') as f2:
for line in f1:
sentences = re.split("(?<=[.!?])\s+", line.strip())
for sentence in sentences:
f2.writelines(sentence + "\n")
f2.writelines("\n")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--preprocess_mode', type=str, help='preprocess mode')
parser.add_argument('--input_path', type=str, help='Location of input files')
parser.add_argument('--output_path', type=str, help='Location of output files')
parser.add_argument('--with_label', help='with label', type=str, default="False")
args = parser.parse_args()
if args.preprocess_mode == "wiki":
make_corpus(args.input_path, args.output_path)
elif "nsmc" in args.preprocess_mode:
process_nsmc(args.input_path, args.output_path, "json" in args.preprocess_mode, args.with_label.lower() == "true")
elif args.preprocess_mode == "korquad":
process_korQuAD(args.input_path, args.output_path)
elif args.preprocess_mode == "process-documents":
process_documents(args.input_path, args.output_path)
|
the-stack_106_22435 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import os
from fvcore.common.timer import Timer
from detectron2.structures import BoxMode
from fvcore.common.file_io import PathManager
from detectron2.data import DatasetCatalog, MetadataCatalog
from .lvis_v0_5_categories import LVIS_CATEGORIES
"""
This file contains functions to parse LVIS-format annotations into dicts in the
"Detectron2 format".
"""
logger = logging.getLogger(__name__)
__all__ = ["load_lvis_json", "register_lvis_instances", "get_lvis_instances_meta"]
def register_lvis_instances(name, metadata, json_file, image_root):
"""
Register a dataset in LVIS's json annotation format for instance detection and segmentation.
Args:
name (str): a name that identifies the dataset, e.g. "lvis_v0.5_train".
metadata (dict): extra metadata associated with this dataset. It can be an empty dict.
json_file (str): path to the json instance annotation file.
image_root (str): directory which contains all the images.
"""
DatasetCatalog.register(name, lambda: load_lvis_json(json_file, image_root, name))
MetadataCatalog.get(name).set(
json_file=json_file, image_root=image_root, evaluator_type="lvis", **metadata
)
def load_lvis_json(json_file, image_root, dataset_name=None):
"""
Load a json file in LVIS's annotation format.
Args:
json_file (str): full path to the LVIS json annotation file.
image_root (str): the directory where the images in this json file exists.
dataset_name (str): the name of the dataset (e.g., "lvis_v0.5_train").
If provided, this function will put "thing_classes" into the metadata
associated with this dataset.
Returns:
list[dict]: a list of dicts in Detectron2 standard format. (See
`Using Custom Datasets </tutorials/datasets.html>`_ )
Notes:
1. This function does not read the image files.
The results do not have the "image" field.
"""
from lvis import LVIS
json_file = PathManager.get_local_path(json_file)
timer = Timer()
lvis_api = LVIS(json_file)
if timer.seconds() > 1:
logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds()))
if dataset_name is not None:
meta = get_lvis_instances_meta(dataset_name)
MetadataCatalog.get(dataset_name).set(**meta)
# sort indices for reproducible results
img_ids = sorted(lvis_api.imgs.keys())
# imgs is a list of dicts, each looks something like:
# {'license': 4,
# 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg',
# 'file_name': 'COCO_val2014_000000001268.jpg',
# 'height': 427,
# 'width': 640,
# 'date_captured': '2013-11-17 05:57:24',
# 'id': 1268}
imgs = lvis_api.load_imgs(img_ids)
# anns is a list[list[dict]], where each dict is an annotation
# record for an object. The inner list enumerates the objects in an image
# and the outer list enumerates over images. Example of anns[0]:
# [{'segmentation': [[192.81,
# 247.09,
# ...
# 219.03,
# 249.06]],
# 'area': 1035.749,
# 'image_id': 1268,
# 'bbox': [192.81, 224.8, 74.73, 33.43],
# 'category_id': 16,
# 'id': 42986},
# ...]
anns = [lvis_api.img_ann_map[img_id] for img_id in img_ids]
# Sanity check that each annotation has a unique id
ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image]
assert len(set(ann_ids)) == len(ann_ids), "Annotation ids in '{}' are not unique".format(
json_file
)
imgs_anns = list(zip(imgs, anns))
logger.info("Loaded {} images in the LVIS format from {}".format(len(imgs_anns), json_file))
dataset_dicts = []
for (img_dict, anno_dict_list) in imgs_anns:
record = {}
file_name = img_dict["file_name"]
if img_dict["file_name"].startswith("COCO"):
# Convert form the COCO 2014 file naming convention of
# COCO_[train/val/test]2014_000000000000.jpg to the 2017 naming convention of
# 000000000000.jpg (LVIS v1 will fix this naming issue)
file_name = file_name[-16:]
record["file_name"] = os.path.join(image_root, file_name)
record["height"] = img_dict["height"]
record["width"] = img_dict["width"]
record["not_exhaustive_category_ids"] = img_dict.get("not_exhaustive_category_ids", [])
record["neg_category_ids"] = img_dict.get("neg_category_ids", [])
image_id = record["image_id"] = img_dict["id"]
objs = []
for anno in anno_dict_list:
# Check that the image_id in this annotation is the same as
# the image_id we're looking at.
# This fails only when the data parsing logic or the annotation file is buggy.
assert anno["image_id"] == image_id
obj = {"bbox": anno["bbox"], "bbox_mode": BoxMode.XYWH_ABS}
obj["category_id"] = anno["category_id"] - 1 # Convert 1-indexed to 0-indexed
segm = anno["segmentation"] # list[list[float]]
# filter out invalid polygons (< 3 points)
valid_segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6]
assert len(segm) == len(
valid_segm
), "Annotation contains an invalid polygon with < 3 points"
assert len(segm) > 0
obj["segmentation"] = segm
objs.append(obj)
record["annotations"] = objs
dataset_dicts.append(record)
return dataset_dicts
def get_lvis_instances_meta(dataset_name):
"""
Load LVIS metadata.
Args:
dataset_name (str): LVIS dataset name without the split name (e.g., "lvis_v0.5").
Returns:
dict: LVIS metadata with keys: thing_classes
"""
if "v0.5" in dataset_name:
return _get_lvis_instances_meta_v0_5()
# There will be a v1 in the future
# elif dataset_name == "lvis_v1":
# return get_lvis_instances_meta_v1()
raise ValueError("No built-in metadata for dataset {}".format(dataset_name))
def _get_lvis_instances_meta_v0_5():
assert len(LVIS_CATEGORIES) == 1230
cat_ids = [k["id"] for k in LVIS_CATEGORIES]
assert min(cat_ids) == 1 and max(cat_ids) == len(
cat_ids
), "Category ids are not in [1, #categories], as expected"
# Ensure that the category list is sorted by id
lvis_categories = sorted(LVIS_CATEGORIES, key=lambda x: x["id"])
thing_classes = [k["synonyms"][0] for k in lvis_categories]
meta = {"thing_classes": thing_classes}
return meta
if __name__ == "__main__":
"""
Test the LVIS json dataset loader.
Usage:
python -m detectron2.data.datasets.lvis \
path/to/json path/to/image_root dataset_name vis_limit
"""
import sys
import numpy as np
from detectron2.utils.logger import setup_logger
from PIL import Image
import detectron2.data.datasets # noqa # add pre-defined metadata
from detectron2.utils.visualizer import Visualizer
logger = setup_logger(name=__name__)
meta = MetadataCatalog.get(sys.argv[3])
dicts = load_lvis_json(sys.argv[1], sys.argv[2], sys.argv[3])
logger.info("Done loading {} samples.".format(len(dicts)))
dirname = "lvis-data-vis"
os.makedirs(dirname, exist_ok=True)
for d in dicts[: int(sys.argv[4])]:
img = np.array(Image.open(d["file_name"]))
visualizer = Visualizer(img, metadata=meta)
vis = visualizer.draw_dataset_dict(d)
fpath = os.path.join(dirname, os.path.basename(d["file_name"]))
vis.save(fpath)
|
the-stack_106_22438 | from typing import Any, Dict, List, Optional
import attr
import numpy as np
import habitat_sim.agent
import habitat_sim.bindings as hsim
from habitat_sim import errors, utils
@attr.s(auto_attribs=True)
class GreedyGeodesicFollower(object):
r"""Greedily fits actions to follow the geodesic shortest path
Args:
pathfinder (hsim.PathFinder): Instance of the pathfinder that has the correct navmesh already loaded
agent (habitat_sim.agent.Agent): Agent to fit actions for. This agent's current configuration is used
to specify the actions. The fitted actions will also correspond to keys in the agents action_space.
`None` is used to signify that the goal location has been reached
goal_radius (Optional[float]): Specifies how close the agent must get to the goal in order for it to be considered
reached. If `None`, 0.75 times the agents step size is used.
"""
pathfinder: hsim.PathFinder
agent: habitat_sim.agent.Agent
goal_radius: Optional[float] = attr.ib(default=None)
action_mapping: Dict[hsim.GreedyFollowerCodes, Any] = attr.ib(
init=False, factory=dict, repr=False
)
impl: hsim.GreedyGeodesicFollowerImpl = attr.ib(
init=False, default=None, repr=False
)
forward_spec: habitat_sim.agent.ActuationSpec = attr.ib(
init=False, default=None, repr=False
)
left_spec: habitat_sim.agent.ActuationSpec = attr.ib(
init=False, default=None, repr=False
)
right_spec: habitat_sim.agent.ActuationSpec = attr.ib(
init=False, default=None, repr=False
)
def __attrs_post_init__(self):
self.action_mapping[hsim.GreedyFollowerCodes.STOP] = None
key, spec = self._find_action("move_forward")
self.forward_spec = spec
self.action_mapping[hsim.GreedyFollowerCodes.FORWARD] = key
key, spec = self._find_action("turn_left")
self.left_spec = spec
self.action_mapping[hsim.GreedyFollowerCodes.LEFT] = key
key, spec = self._find_action("turn_right")
self.right_spec = spec
self.action_mapping[hsim.GreedyFollowerCodes.RIGHT] = key
if self.goal_radius is None:
self.goal_radius = 0.75 * self.forward_spec.amount
self.impl = hsim.GreedyGeodesicFollowerImpl(
self.pathfinder,
self._move_forward,
self._turn_left,
self._turn_right,
self.goal_radius,
self.forward_spec.amount,
np.deg2rad(self.left_spec.amount),
)
def _find_action(self, name):
candidates = list(
filter(
lambda v: v[1].name == name,
self.agent.agent_config.action_space.items(),
)
)
assert (
len(candidates) == 1
), f"Could not find an action spec corresponding to {name}"
return candidates[0][0], candidates[0][1].actuation
def _move_forward(self, obj: hsim.SceneNode):
self.agent.controls(obj, "move_forward", self.forward_spec, True)
def _turn_left(self, obj: hsim.SceneNode):
self.agent.controls(obj, "turn_left", self.left_spec, True)
def _turn_right(self, obj: hsim.SceneNode):
self.agent.controls(obj, "turn_right", self.right_spec, True)
def next_action_along(self, goal_pos: np.array) -> Any:
r"""Find the next action to greedily follow the geodesic shortest path from the agent's current position
to get to the goal
Args:
goal_pos (np.array): The position of the goal
Returns:
Any: the action to take
"""
state = self.agent.state
next_act = self.impl.next_action_along(
state.position, utils.quat_to_coeffs(state.rotation), goal_pos
)
if next_act == hsim.GreedyFollowerCodes.ERROR:
raise errors.GreedyFollowerError()
else:
return self.action_mapping[next_act]
def find_path(self, goal_pos: np.array) -> List[Any]:
r"""Finds the sequence actions that greedily follow the geodesic shortest path
from the agent's current position to get to the goal. This is roughly equivilent to just
calling `next_action_along` until it returns `None`, but is faster
Args:
goal_pos (np.array): The position of the goal
Returns:
List[Any]: The list of actions to take. Ends with `None`
"""
state = self.agent.state
path = self.impl.find_path(
state.position, utils.quat_to_coeffs(state.rotation), goal_pos
)
if len(path) == 0:
raise errors.GreedyFollowerError()
path = list(map(lambda v: self.action_mapping[v], path))
return path
|
the-stack_106_22439 | import random
from hashlib import md5
from getpass import getpass
import pickle
import subprocess as sp
def randomPass():
def randomString():
letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
return ''.join(random.choice(letters) for i in range(4))
def randomNum():
return ''.join(str(random.randint(0,9)) for i in range(4))
return randomString()+'@'+randomNum()
def Inscription():
def Inscription_():
Lname=input("Saisir votre nom: ").upper()
Fname=input("Saisir votre prénom: ").upper()
All=Lname+':'+Fname+':'
F=open('personnes.txt','a')
F.close()
F=open('personnes.txt','r')
for line in F:
l=line.split(':')
if (l[0]==Lname) and (l[1]==Fname):
F.close()
return False
F.close()
F=open('personnes.txt','a')
F.write(All)
F.close()
return True
if(Inscription_()==False) :
print("vous etes deja inscrit.")
return False
F=open('personnes.txt','r+')
i=0
for line in F:
i+=1
if(i<10) :
code='E00'+str(i)
elif(i<100):
code='E0'+str(i)
else:
code='E'+str(i)
paswd=randomPass()
F.write(code+':'+ str(md5(paswd.encode()).hexdigest())+':'+'OK'+'\n')
F.close()
print("inscription réussie\nVoici vos informations:\nLogin:",code,"\tPassword:",paswd)
def Liste_Candidat():
F=open('candidats.txt','r')
print("Liste des candidats: ")
i=1
for line in F:
l=line.split(':')
l[-1]=l[-1][0:-1]
print(l[0],"-->",l[1],l[2])
i+=1
print('C'+str(i),"--> blanc\n")
F.close()
def test_in(choice):
c=[]
C=open('candidats.txt','r')
i=1
for line in C:
i+=1
b=line.split(':')
c.append(b[0])
c.append('C'+str(i))
C.close()
if(choice not in c) :
return False
return True
def Vote():
code=input("Saisir votre code(E...): ").upper()
paswd=str(md5(getpass().encode()).hexdigest())
#paswd=str(md5(input("Saisir votre Mot de Passe: ").encode()).hexdigest())
F=open('personnes.txt','a')
F.close()
F=open('personnes.txt','r+')
lines=""
u=0
for line in F:
l=line.split(':')
if(l[-3]==code) and (l[-2]==paswd) :
u=1
print("Bienvenue",l[1],l[0])
l[-1]=l[-1][0:-1]
if(l[-1]!='OK') :
return False
Liste_Candidat()
choice=input("quelle est votre choix? ").upper()
while(test_in(choice)!=True) :
choice=input("le choix que vous avez entré ne correspond a aucun code des candidats veuillez rentrez votre choix! ").upper()
sure=input("Confirmer votre choix (Y/N): ").upper()
while(sure!='Y') :
if(sure == 'N') :
choice=input("quelle est votre choix? ").upper()
while(test_in(choice)!=True) :
choice=input("le choix que vous avez entré ne correspond a aucun code des candidats veuillez rentrez votre choix! ").upper()
sure=input("Confirmer votre choix (Y/N): ").upper()
else:
sure=input("Confirmer votre choix (Y/N): ").upper()
l[-1]='NO'
line=':'.join(l)+'\n'
A=open('result.bin','rb')
mypick=pickle.Unpickler(A)
D=mypick.load()
A.close()
D[choice]+=1
A=open('result.bin','wb')
mypick2=pickle.Pickler(A)
mypick2.dump(D)
A.close()
lines=lines+line
if(u==0) :
print("code ou mot de passe incorrecte!")
F.close()
F=open('personnes.txt','w')
F.write(lines)
F.close()
def Statistiques():
print("Résultat de l'élection: ")
A=open('result.bin','rb')
mypick=pickle.Unpickler(A)
D=mypick.load()
A.close()
n=0
for k in D:
n+=D[k]
if(n==0) :
n=1
A=open('candidats.txt','r')
i=1
for lin in A:
a=lin.split(':')
print(a[1],a[2][0:-1],"-->",D['C'+str(i)],"votes",D['C'+str(i)]*100/n,"%")
i+=1
A.close()
print("blanc -->",D['C'+str(i)],"votes",D['C'+str(i)]*100/n,"\n")
def clear_():
D=Dic_candidat()
A=open('result.bin','wb')
mypick2=pickle.Pickler(A)
mypick2.dump(D)
A.close()
F=open('personnes.txt','r+')
lines=""
for line in F:
l=line.split(':')
l[-1]='OK'
line=':'.join(l)+'\n'
lines=lines+line
F.close()
F=open('personnes.txt','w')
F.write(lines)
F.close()
def Dic_candidat():
c=[]
C=open('candidats.txt','r')
i=1
for line in C:
i+=1
b=line.split(':')
c.append(b[0])
c.append('C'+str(i))
C.close()
D={}
for e in c:
D[e]=0
return D
def prcp():
print("1)- Inscription\n2)- Liste codes --> candidat\n3)- Voter\n4)- Statistiques\n5)- Clear Result\n6)- Quitter")
n=input()
while(n!='6') :
if(n=='1') :
Inscription()
input("\nappuyer sur ENTRER !!")
elif(n=='2'):
Liste_Candidat()
input("\nappuyer sur ENTRER !!")
elif(n=='3'):
if(Vote()==False) :
print("vous avez deja voté!!! ")
input("\nappuyer sur ENTRER")
elif(n=='4'):
Statistiques()
input("\nappuyer sur ENTRER !!")
elif(n=='5'):
clear_()
else:
print("veuillez entrer votre choix correctement !!!")
for i in range(99999999):
pass
sp.call('cls',shell=True)
print("1)- Inscription\n2)- Liste codes --> candidat\n3)- Voter\n4)- Statistiques\n5)- Clear Result\n6)- Quitter")
n=input()
prcp()
|
the-stack_106_22440 | # Copyright 2012-2013 Eric Ptak - trouch.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from time import sleep
from myDevices.utils.types import toint, signInteger
from myDevices.devices.i2c import I2C
from myDevices.devices.analog import ADC
class ADS1X1X(ADC, I2C):
VALUE = 0x00
CONFIG = 0x01
LO_THRESH = 0x02
HI_THRESH = 0x03
CONFIG_STATUS_MASK = 0x80
CONFIG_CHANNEL_MASK = 0x70
CONFIG_GAIN_MASK = 0x0E
CONFIG_MODE_MASK = 0x01
def __init__(self, slave, channelCount, resolution, name):
I2C.__init__(self, toint(slave))
ADC.__init__(self, channelCount, resolution, 4.096)
self._analogMax = 2**(resolution-1)
self.name = name
config = self.readRegisters(self.CONFIG, 2)
mode = 0 # continuous
config[0] &= ~self.CONFIG_MODE_MASK
config[0] |= mode
gain = 0x1 # FS = +/- 4.096V
config[0] &= ~self.CONFIG_GAIN_MASK
config[0] |= gain << 1
self.writeRegisters(self.CONFIG, config)
def __str__(self):
return "%s(slave=0x%02X)" % (self.name, self.slave)
def __analogRead__(self, channel, diff=False):
config = self.readRegisters(self.CONFIG, 2)
config[0] &= ~self.CONFIG_CHANNEL_MASK
if diff:
config[0] |= channel << 4
else:
config[0] |= (channel + 4) << 4
self.writeRegisters(self.CONFIG, config)
sleep(0.001)
d = self.readRegisters(self.VALUE, 2)
value = (d[0] << 8 | d[1]) >> (16-self._analogResolution)
return signInteger(value, self._analogResolution)
class ADS1014(ADS1X1X):
def __init__(self, slave=0x48):
ADS1X1X.__init__(self, slave, 1, 12, "ADS1014")
class ADS1015(ADS1X1X):
def __init__(self, slave=0x48):
ADS1X1X.__init__(self, slave, 4, 12, "ADS1015")
class ADS1114(ADS1X1X):
def __init__(self, slave=0x48):
ADS1X1X.__init__(self, slave, 1, 16, "ADS1114")
class ADS1115(ADS1X1X):
def __init__(self, slave=0x48):
ADS1X1X.__init__(self, slave, 4, 16, "ADS1115")
|
the-stack_106_22442 | #!/usr/bin/env python
from ansible.module_utils.basic import *
import sys
import re
from collections import defaultdict
MODULE_FIELDS = {
'file': {'type': str, 'default': '/etc/hosts'},
'hosts': {'type': set, 'required': True},
'ips': {'type': set, 'required': True},
'append': {'type': bool, 'required': True}
}
def read_hosts(file):
hostsfile = defaultdict(set)
with open(file, 'rU') as f:
hostslines = [re.split(r'\s+', l.strip().split('#', 2)[0]) for l in f.readlines() if not l.startswith('#') and len(l.strip()) != 0]
for h in hostslines:
hostsfile[h[0]].update(h[1:])
return hostsfile
def main():
module = AnsibleModule(argument_spec=MODULE_FIELDS)
oldhostsfile = read_hosts(module.params['file'])
hostsfile = defaultdict(set)
if module.params['append']:
hostsfile.update(oldhostsfile)
hosts = module.params['hosts']
for ip in module.params['ips']:
hostsfile[ip].update(hosts)
changed = hostsfile != oldhostsfile
if changed:
with open(module.params['file'], 'w') as f:
for ip in hostsfile:
hostname = list(hostsfile[ip])[0]
if hostname == '-':
hostname = list(hostsfile[ip])[-1]
f.write( f'{str(ip)}\t\t{ hostname }\n' )
module.exit_json(changed=changed, meta=hostsfile)
if __name__ == '__main__':
main()
|
the-stack_106_22443 | # -*- coding: utf-8 -*-
__author__ = "Ngoc Huynh Bao"
__email__ = "[email protected]"
import os
import h5py
import numpy as np
import warnings
from deoxys.keras.callbacks import CSVLogger
from ..model.callbacks import DeoxysModelCheckpoint, PredictionCheckpoint, \
DBLogger
from ..model import model_from_full_config, model_from_config, load_model
from deoxys_vis import plot_log_performance_from_csv, mask_prediction, \
plot_images_w_predictions, read_csv
from ..database import Tables, ExperimentAttr, HDF5Attr, SessionAttr, \
SessionStatus
class Experiment:
MODEL_PATH = '/model'
MODEL_NAME = '/model.{epoch:03d}.h5'
BEST_MODEL_PATH = '/best'
PREDICTION_PATH = '/prediction'
PREDICTION_NAME = '/prediction.{epoch:03d}.h5'
LOG_FILE = '/logs.csv'
PERFORMANCE_PATH = '/performance'
PREDICTED_IMAGE_PATH = '/images'
TEST_OUTPUT_PATH = '/test'
PREDICT_TEST_NAME = '/prediction_test.h5'
_max_size = 1
def __init__(self,
log_base_path='logs',
best_model_monitors='val_loss',
best_model_modes='auto'):
self.model = None
self.architecture = None
self.input_params = None
self.model_params = None
self.train_params = None
self.data_reader = None
self.weights_file = None
self.log_base_path = log_base_path
self.best_model_monitors = best_model_monitors if type(
best_model_monitors) == list else [best_model_monitors]
self.best_model_modes = best_model_modes if type(
best_model_modes) == list else \
[best_model_modes] * len(best_model_monitors)
for i, (monitor, mode) in enumerate(zip(self.best_model_monitors,
self.best_model_modes)):
if mode not in ['auto', 'min', 'max']:
warnings.warn('ModelCheckpoint mode %s is unknown, '
'fallback to auto mode.' % (mode),
RuntimeWarning)
mode = 'auto'
if mode == 'auto':
if 'acc' in monitor or \
monitor.startswith('fmeasure') or \
'fbeta' in monitor:
self.best_model_modes[i] = 'max'
else:
self.best_model_modes[i] = 'min'
def from_full_config(self, file, weights_file=None, **kwargs):
self.model = model_from_full_config(file,
weights_file=weights_file,
**kwargs)
return self
def from_config(self, architecture, input_params,
model_params, train_params,
dataset_params, weights_file=None):
self.model = model_from_config(architecture, input_params,
model_params, train_params,
dataset_params,
weights_file=weights_file)
return self
def from_file(self, filename):
self.model = load_model(filename)
return self
def best_model(self):
res = {}
logger_path = self.log_base_path + self.LOG_FILE
if os.path.isfile(logger_path):
df = read_csv(logger_path, index_col='epoch',
usecols=['epoch'] + self.best_model_monitors)
min_df = df.min()
min_epoch = df.idxmin()
max_df = df.max()
max_epoch = df.idxmax()
for monitor, mode in zip(self.best_model_monitors,
self.best_model_modes):
if mode == 'min':
val = min_df[monitor]
epoch = min_epoch[monitor]
else:
val = max_df[monitor]
epoch = max_epoch[monitor]
res[monitor] = {
'best': {
'val': val,
'epoch': epoch + 1
}}
else:
warnings.warn('No log files to check for best model')
return res
def run_experiment(self, train_history_log=True,
model_checkpoint_period=0,
prediction_checkpoint_period=0,
save_origin_images=False,
verbose=1,
epochs=None, initial_epoch=None, **custom_kwargs
):
log_base_path = self.log_base_path
if self._check_run():
if not os.path.exists(log_base_path):
os.makedirs(log_base_path)
kwargs = custom_kwargs or {}
csv_logger_append = False
if epochs:
kwargs['epochs'] = epochs
if initial_epoch:
kwargs['initial_epoch'] = initial_epoch
if initial_epoch > 0:
csv_logger_append = True
callbacks = []
if train_history_log:
callback = self._create_logger(log_base_path,
append=csv_logger_append)
callbacks.append(callback)
if model_checkpoint_period > 0:
if not os.path.exists(log_base_path + self.MODEL_PATH):
os.makedirs(log_base_path + self.MODEL_PATH)
callback = self._create_model_checkpoint(
log_base_path,
period=model_checkpoint_period)
callbacks.append(callback)
if prediction_checkpoint_period > 0:
if not os.path.exists(log_base_path + self.PREDICTION_PATH):
os.makedirs(log_base_path + self.PREDICTION_PATH)
callback = self._create_prediction_checkpoint(
log_base_path,
prediction_checkpoint_period,
use_original=save_origin_images
)
callbacks.append(callback)
kwargs['callbacks'] = callbacks
self.model.fit_train(**kwargs)
return self
def _plot_performance(self, log_base_path):
if not os.path.exists(log_base_path + self.PERFORMANCE_PATH):
os.makedirs(log_base_path + self.PERFORMANCE_PATH)
if os.path.exists(log_base_path + self.LOG_FILE):
print('\nPlotting performance metrics...')
plot_log_performance_from_csv(
filepath=log_base_path + self.LOG_FILE,
output_path=log_base_path + self.PERFORMANCE_PATH)
else:
raise Warning('No log files for plotting performance')
def plot_performance(self):
log_base_path = self.log_base_path
self._plot_performance(log_base_path)
return self
def _plot_prediction(self, log_base_path,
masked_images,
contour=True,
base_image_name='x',
truth_image_name='y',
predicted_image_name='predicted',
predicted_image_title_name='Image {index:05d}',
img_name='{index:05d}.png'):
if os.path.exists(log_base_path + self.PREDICTION_PATH):
print('\nCreating prediction images...')
# mask images
prediced_image_path = log_base_path + self.PREDICTED_IMAGE_PATH
if not os.path.exists(prediced_image_path):
os.makedirs(prediced_image_path)
for filename in os.listdir(
log_base_path + self.PREDICTION_PATH):
if filename.endswith(".h5") or filename.endswith(".hdf5"):
# Create a folder for storing result in that period
images_path = prediced_image_path + '/' + filename
if not os.path.exists(images_path):
os.makedirs(images_path)
self._plot_predicted_images(
data_path=log_base_path + self.PREDICTION_PATH
+ '/' + filename,
out_path=images_path,
images=masked_images,
base_image_name=base_image_name,
truth_image_name=truth_image_name,
predicted_image_name=predicted_image_name,
title=predicted_image_title_name,
contour=contour,
name=img_name)
def plot_prediction(self, masked_images,
contour=True,
base_image_name='x',
truth_image_name='y',
predicted_image_name='predicted',
predicted_image_title_name='Image {index:05d}',
img_name='{index:05d}.png'):
log_base_path = self.log_base_path
self._plot_prediction(
log_base_path,
masked_images,
contour=contour,
base_image_name=base_image_name,
truth_image_name=truth_image_name,
predicted_image_name=predicted_image_name,
predicted_image_title_name=predicted_image_title_name,
img_name=img_name)
return self
def _predict_test(self, filepath, use_original_image=False):
data_info = self.model.data_reader.test_generator.description
total_size = np.product(
data_info[0]['shape']) * data_info[0]['total'] / 1e9
# predict directly for data of size < max_size (1GB)
if len(data_info) == 1 and total_size < self._max_size:
predicted = self.model.predict_test(verbose=1)
# Create the h5 file
hf = h5py.File(filepath, 'w')
hf.create_dataset('predicted', data=predicted)
hf.close()
if use_original_image:
original_data = self.model.data_reader.original_test
for key, val in original_data.items():
hf = h5py.File(filepath, 'a')
hf.create_dataset(key, data=val)
hf.close()
else:
# Create data from test_generator
x = None
y = None
test_gen = self.model.data_reader.test_generator
data_gen = test_gen.generate()
for _ in range(test_gen.total_batch):
next_x, next_y = next(data_gen)
if x is None:
x = next_x
y = next_y
else:
x = np.concatenate((x, next_x))
y = np.concatenate((y, next_y))
hf = h5py.File(filepath, 'a')
hf.create_dataset('x', data=x)
hf.create_dataset('y', data=y)
hf.close()
# for large data of same size, predict each chunk
elif len(data_info) == 1:
test_gen = self.model.data_reader.test_generator
data_gen = test_gen.generate()
next_x, next_y = next(data_gen)
predicted = self.model.predict(next_x, verbose=1)
input_shape = (data_info[0]['total'],) + data_info[0]['shape']
input_chunks = (1,) + data_info[0]['shape']
target_shape = (data_info[0]['total'],) + next_y.shape[1:]
target_chunks = (1,) + next_y.shape[1:]
with h5py.File(filepath, 'w') as hf:
hf.create_dataset('x',
shape=input_shape, chunks=input_chunks,
compression='gzip')
hf.create_dataset('y',
shape=target_shape, chunks=target_chunks,
compression='gzip')
hf.create_dataset('predicted',
shape=target_shape, chunks=target_chunks,
compression='gzip')
with h5py.File(filepath, 'a') as hf:
next_index = len(next_x)
hf['x'][:next_index] = next_x
hf['y'][:next_index] = next_y
hf['predicted'][:next_index] = predicted
for _ in range(test_gen.total_batch - 1):
next_x, next_y = next(data_gen)
predicted = self.model.predict(next_x, verbose=1)
curr_index = next_index
next_index = curr_index + len(next_x)
with h5py.File(filepath, 'a') as hf:
hf['x'][curr_index:next_index] = next_x
hf['y'][curr_index:next_index] = next_y
hf['predicted'][curr_index:next_index] = predicted
# data of different size
else:
test_gen = self.model.data_reader.test_generator
data_gen = test_gen.generate()
for curr_info_idx, info in enumerate(data_info):
next_x, next_y = next(data_gen)
predicted = self.model.predict(next_x, verbose=1)
input_shape = (info['total'],) + info['shape']
input_chunks = (1,) + info['shape']
target_shape = (info['total'],) + next_y.shape[1:]
target_chunks = (1,) + next_y.shape[1:]
if curr_info_idx == 0:
mode = 'w'
else:
mode = 'a'
with h5py.File(filepath, mode) as hf:
hf.create_dataset(f'{curr_info_idx:02d}/x',
shape=input_shape,
chunks=input_chunks,
compression='gzip')
hf.create_dataset(f'{curr_info_idx:02d}/y',
shape=target_shape,
chunks=target_chunks,
compression='gzip')
hf.create_dataset(f'{curr_info_idx:02d}/predicted',
shape=target_shape,
chunks=target_chunks,
compression='gzip')
with h5py.File(filepath, 'a') as hf:
next_index = len(next_x)
hf[f'{curr_info_idx:02d}/x'][:next_index] = next_x
hf[f'{curr_info_idx:02d}/y'][:next_index] = next_y
hf[f'{curr_info_idx:02d}/predicted'][
:next_index] = predicted
while next_index < info['total']:
next_x, next_y = next(data_gen)
predicted = self.model.predict(next_x, verbose=1)
curr_index = next_index
next_index = curr_index + len(next_x)
with h5py.File(filepath, 'a') as hf:
hf[f'{curr_info_idx:02d}/x'][
curr_index:next_index] = next_x
hf[f'{curr_info_idx:02d}/y'][
curr_index:next_index] = next_y
hf[f'{curr_info_idx:02d}/predicted'][
curr_index:next_index] = predicted
def run_test(self, use_best_model=False,
masked_images=None,
use_original_image=False,
contour=True,
base_image_name='x',
truth_image_name='y',
predicted_image_name='predicted',
image_name='{index:05d}.png',
image_title_name='Image {index:05d}'):
log_base_path = self.log_base_path
test_path = log_base_path + self.TEST_OUTPUT_PATH
if not os.path.exists(test_path):
os.makedirs(test_path)
if use_best_model:
raise NotImplementedError
else:
score = self.model.evaluate_test(verbose=1)
print(score)
filepath = test_path + self.PREDICT_TEST_NAME
self._predict_test(filepath, use_original_image=use_original_image)
if masked_images:
self._plot_predicted_images(
data_path=filepath,
out_path=test_path,
images=masked_images,
base_image_name=base_image_name,
truth_image_name=truth_image_name,
predicted_image_name=predicted_image_name,
title=image_title_name,
contour=contour,
name=image_name)
return self
def run_lambda(self, lambda_fn, **kwargs):
"""
Custom action between experiments
"""
lambda_fn(self, **kwargs)
return self
def _create_logger(self, base_path, append=False):
return CSVLogger(filename=base_path + self.LOG_FILE, append=append)
def _create_model_checkpoint(self, base_path, period):
return DeoxysModelCheckpoint(
period=period,
filepath=base_path + self.MODEL_PATH + self.MODEL_NAME)
def _create_prediction_checkpoint(self, base_path, period, use_original):
return PredictionCheckpoint(
filepath=base_path + self.PREDICTION_PATH + self.PREDICTION_NAME,
period=period, use_original=use_original)
def _plot_predicted_images(self, data_path, out_path, images,
contour=True,
base_image_name='x',
truth_image_name='y',
predicted_image_name='predicted',
title='Image {index:05d}',
name='{index:05d}.png'):
hf = h5py.File(data_path, 'r')
keys = list(hf.keys())
while 'create_group' in dir(hf[keys[0]]):
new_keys = []
for key in keys:
new_keys.extend([f'{key}/{k}' for k in hf[key].keys()])
keys = new_keys
for index in images:
kwargs = {key: hf[key][index] for key in keys}
if base_image_name not in kwargs:
for key in kwargs:
if key.endswith(base_image_name):
break
prefix = key[:-len(base_image_name)]
base_image_name = prefix + base_image_name
truth_image_name = prefix + truth_image_name
predicted_image_name = prefix + predicted_image_name
img_name = out_path + '/' + name.format(index=index, **kwargs)
try:
if contour:
mask_prediction(img_name,
image=kwargs[base_image_name],
true_mask=kwargs[truth_image_name],
pred_mask=kwargs[predicted_image_name],
title=title.format(index=index, **kwargs))
else:
plot_images_w_predictions(
img_name,
image=kwargs[base_image_name],
true_mask=kwargs[truth_image_name],
pred_mask=kwargs[predicted_image_name],
title=title.format(index=index, **kwargs))
except Exception as e:
print('An error occurred while plotting prediction', e)
def _check_run(self):
if self.model:
if self.model._data_reader:
if self.model.is_compiled:
return True
raise RuntimeError("Cannot run experiment with incomplete model")
return False
class ExperimentDB(Experiment): # pragma: no cover
def __init__(self, dbclient, experiment_id=None, session_id=None,
log_base_path='logs',
best_model_monitors='val_loss',
best_model_modes='auto'):
"""
An experiment logging performance to a database
Parameters
----------
dbclient : deoxys.database.DBClient
The database client
experiment_id : str, int, or ObjectID the dbclient, optional
Experiment id, by default None
session_id : str, int, or ObjectID depending of the dbclient, optional
Session id, by default None
log_base_path : str, optional
Base path to log files, by default 'logs'
best_model_monitors : str, optional
Attribute to monitor, by default 'val_loss'
best_model_modes : str, optional
One of 'max', 'min', 'auto', by default 'auto'
Raises
------
ValueError
When both `session_id` and `experiment_id` are not set
"""
super().__init__(log_base_path, best_model_monitors, best_model_modes)
self.dbclient = dbclient
if experiment_id is None and session_id is None:
raise ValueError('"session_id" or "experiment_id" must be set')
if session_id:
last_model = self.dbclient.find_max(Tables.MODELS, {
HDF5Attr.SESSION_ID: session_id}, HDF5Attr.EPOCH)
self.curr_epoch = last_model[HDF5Attr.EPOCH]
self.session = dbclient.find_by_id(Tables.SESSIONS, session_id)
self._update_epoch(last_model[HDF5Attr.EPOCH])
self.from_file(last_model[HDF5Attr.FILE_LOCATION])
else:
insert_res = dbclient.insert(Tables.SESSIONS, {
SessionAttr.EXPERIMENT_ID: experiment_id,
SessionAttr.CURRENT_EPOCH: 0,
SessionAttr.STATUS: 'created'
}, time_logs=True)
self.session = self.dbclient.find_by_id(
Tables.SESSIONS, insert_res.inserted_id)
self.curr_epoch = 0
experiment_obj = self.dbclient.find_by_id(
Tables.EXPERIMENTS, experiment_id)
if ExperimentAttr.CONFIG in experiment_obj:
self.from_full_config(experiment_obj[ExperimentAttr.CONFIG])
elif ExperimentAttr.SAVED_MODEL_LOC in experiment_obj:
self.from_file(experiment_obj[ExperimentAttr.SAVED_MODEL_LOC])
self.log_base_path = os.path.join(
log_base_path, str(self.dbclient.get_id(self.session)))
def run_experiment(self, train_history_log=True,
model_checkpoint_period=0,
prediction_checkpoint_period=0,
save_origin_images=False,
verbose=1,
epochs=None
):
log_base_path = self.log_base_path
if self._check_run():
if not os.path.exists(log_base_path):
os.makedirs(log_base_path)
kwargs = {}
if epochs:
kwargs['epochs'] = epochs + self.curr_epoch
kwargs['initial_epoch'] = self.curr_epoch
callbacks = []
if train_history_log:
callback = self._create_logger(log_base_path,
append=self.curr_epoch > 0)
callbacks.append(callback)
callback = self._create_db_logger()
callbacks.append(callback)
if model_checkpoint_period > 0:
if not os.path.exists(log_base_path + self.MODEL_PATH):
os.makedirs(log_base_path + self.MODEL_PATH)
callback = self._create_model_checkpoint(
log_base_path,
period=model_checkpoint_period)
callbacks.append(callback)
if prediction_checkpoint_period > 0:
if not os.path.exists(log_base_path + self.PREDICTION_PATH):
os.makedirs(log_base_path + self.PREDICTION_PATH)
callback = self._create_prediction_checkpoint(
log_base_path,
prediction_checkpoint_period,
use_original=save_origin_images
)
callbacks.append(callback)
kwargs['callbacks'] = callbacks
self._update_status(SessionStatus.TRAINING)
try:
self.model.fit_train(**kwargs)
self.curr_epoch += epochs
self._update_status(SessionStatus.FINISHED)
self._update_epoch(self.curr_epoch)
except Exception:
self._update_status(SessionStatus.FAILED)
return self
def _create_db_logger(self):
return DBLogger(self.dbclient, self.dbclient.get_id(self.session))
def _update_epoch(self, new_epoch):
self.dbclient.update_by_id(
Tables.SESSIONS, self.dbclient.get_id(self.session),
{SessionAttr.CURRENT_EPOCH: new_epoch}, time_logs=True)
def _update_status(self, new_status):
self.dbclient.update_by_id(
Tables.SESSIONS, self.dbclient.get_id(self.session),
{SessionAttr.STATUS: new_status}, time_logs=True)
def _create_model_checkpoint(self, base_path, period):
return DeoxysModelCheckpoint(
period=period,
filepath=base_path + self.MODEL_PATH + self.MODEL_NAME,
dbclient=self.dbclient,
session=self.dbclient.get_id(self.session))
def _create_prediction_checkpoint(self, base_path, period, use_original):
return PredictionCheckpoint(
filepath=base_path + self.PREDICTION_PATH + self.PREDICTION_NAME,
period=period, use_original=use_original,
dbclient=self.dbclient,
session=self.dbclient.get_id(self.session))
|
the-stack_106_22445 | import tensorflow as tf
import time, os, sys
from py.fm_model import LocalFmModel, DistFmModel
PREDICT_BATCH_SIZE = 10000
def _predict(sess, supervisor, is_master_worker, model, model_file, predict_files, score_path, need_to_init):
with sess as sess:
if is_master_worker:
if need_to_init:
sess.run(model.init_vars)
if not os.path.exists(score_path):
os.mkdir(score_path)
model.saver.restore(sess, model_file)
for fname in predict_files:
sess.run(model.file_enqueue_op,
feed_dict={model.epoch_id: 0, model.is_training: False, model.data_file: fname,
model.weight_file: ''})
sess.run(model.file_close_queue_op)
sess.run(model.set_model_loaded)
try:
while not sess.run(model.model_loaded):
print('Waiting for the model to be loaded.')
time.sleep(1)
fid = 0
while True:
_, _, fname, _ = sess.run(model.file_dequeue_op)
score_file = score_path + '/' + os.path.basename(fname) + '.score'
print('Start processing %s, scores written to %s ...' % (fname, score_file))
with open(score_file, 'w') as o:
while True:
pred_score, example_num = sess.run([model.pred_score, model.example_num],
feed_dict={model.file_id: fid, model.data_file: fname,
model.weight_file: ''})
if example_num == 0: break
for score in pred_score:
o.write(str(score) + '\n')
fid += 1
except tf.errors.OutOfRangeError:
pass
except Exception as ex:
if supervisor != None:
supervisor.request_stop(ex)
raise
def dist_predict(ps_hosts, worker_hosts, job_name, task_idx, predict_files, vocabulary_size, vocabulary_block_num,
hash_feature_id, factor_num, model_file, score_path):
cluster = tf.train.ClusterSpec({'ps': ps_hosts, 'worker': worker_hosts})
server = tf.train.Server(cluster, job_name=job_name, task_index=task_idx)
if job_name == 'ps':
server.join()
elif job_name == 'worker':
model = DistFmModel(len(predict_files), cluster, task_idx, 0, vocabulary_size, vocabulary_block_num,
hash_feature_id, factor_num, 0, None, None, PREDICT_BATCH_SIZE, 0, 0)
sv = tf.train.Supervisor(is_chief=(task_idx == 0), init_op=model.init_vars)
_predict(sv.managed_session(server.target, config=tf.ConfigProto(log_device_placement=False)), sv,
task_idx == 0, model, model_file, predict_files, score_path, False)
else:
sys.stderr.write('Invalid Job Name: %s' % job_name)
raise Exception
def local_predict(predict_files, vocabulary_size, vocabulary_block_num, hash_feature_id, factor_num, model_file,
score_path):
model = LocalFmModel(len(predict_files), 0, vocabulary_size, vocabulary_block_num, hash_feature_id, factor_num, 0,
None, None, PREDICT_BATCH_SIZE, 0, 0)
_predict(tf.Session(), None, True, model, model_file, predict_files, score_path, True)
|
the-stack_106_22446 | from __future__ import absolute_import, division, print_function
from stripe import error, util, six
from stripe.stripe_object import StripeObject
from stripe.six.moves.urllib.parse import quote_plus
class APIResource(StripeObject):
@classmethod
def retrieve(cls, id, api_key=None, **params):
instance = cls(id, api_key, **params)
instance.refresh()
return instance
def refresh(self):
self.refresh_from(self.request('get', self.instance_url()))
return self
@classmethod
def class_name(cls):
if cls == APIResource:
raise NotImplementedError(
'APIResource is an abstract class. You should perform '
'actions on its subclasses (e.g. Charge, Customer)')
return str(quote_plus(cls.__name__.lower()))
@classmethod
def class_url(cls):
cls_name = cls.class_name()
return "/v1/%ss" % (cls_name,)
def instance_url(self):
id = self.get('id')
if not isinstance(id, six.string_types):
raise error.InvalidRequestError(
'Could not determine which URL to request: %s instance '
'has invalid ID: %r, %s. ID should be of type `str` (or'
' `unicode`)' % (type(self).__name__, id, type(id)), 'id')
id = util.utf8(id)
base = self.class_url()
extn = quote_plus(id)
return "%s/%s" % (base, extn)
|
the-stack_106_22447 | import math
import warnings
import tlz as toolz
from fsspec.core import get_fs_token_paths
from fsspec.implementations.local import LocalFileSystem
from fsspec.utils import stringify_path
from packaging.version import parse as parse_version
from ....base import compute_as_if_collection, tokenize
from ....delayed import Delayed
from ....highlevelgraph import HighLevelGraph
from ....layers import DataFrameIOLayer
from ....utils import apply, import_required, natural_sort_key, parse_bytes
from ...core import DataFrame, Scalar, new_dd_object
from ...methods import concat
from .utils import _sort_and_analyze_paths
try:
import snappy
snappy.compress
except (ImportError, AttributeError):
snappy = None
__all__ = ("read_parquet", "to_parquet")
NONE_LABEL = "__null_dask_index__"
# ----------------------------------------------------------------------
# User API
class ParquetFunctionWrapper:
"""
Parquet Function-Wrapper Class
Reads parquet data from disk to produce a partition
(given a `part` argument).
"""
def __init__(
self,
engine,
fs,
meta,
columns,
index,
kwargs,
common_kwargs,
):
self.engine = engine
self.fs = fs
self.meta = meta
self.columns = columns
self.index = index
# `kwargs` = user-defined kwargs to be passed
# identically for all partitions.
#
# `common_kwargs` = kwargs set by engine to be
# passed identically for all
# partitions.
self.common_kwargs = toolz.merge(common_kwargs, kwargs or {})
def project_columns(self, columns):
"""Return a new ParquetFunctionWrapper object with
a sub-column projection.
"""
if columns == self.columns:
return self
return ParquetFunctionWrapper(
self.engine,
self.fs,
self.meta,
columns,
self.index,
None, # Already merged into common_kwargs
self.common_kwargs,
)
def __call__(self, part):
if not isinstance(part, list):
part = [part]
return read_parquet_part(
self.fs,
self.engine,
self.meta,
[(p["piece"], p.get("kwargs", {})) for p in part],
self.columns,
self.index,
self.common_kwargs,
)
def read_parquet(
path,
columns=None,
filters=None,
categories=None,
index=None,
storage_options=None,
engine="auto",
gather_statistics=None,
ignore_metadata_file=False,
metadata_task_size=None,
split_row_groups=None,
chunksize=None,
aggregate_files=None,
**kwargs,
):
"""
Read a Parquet file into a Dask DataFrame
This reads a directory of Parquet data into a Dask.dataframe, one file per
partition. It selects the index among the sorted columns if any exist.
Parameters
----------
path : str or list
Source directory for data, or path(s) to individual parquet files.
Prefix with a protocol like ``s3://`` to read from alternative
filesystems. To read from multiple files you can pass a globstring or a
list of paths, with the caveat that they must all have the same
protocol.
columns : str or list, default None
Field name(s) to read in as columns in the output. By default all
non-index fields will be read (as determined by the pandas parquet
metadata, if present). Provide a single field name instead of a list to
read in the data as a Series.
filters : Union[List[Tuple[str, str, Any]], List[List[Tuple[str, str, Any]]]], default None
List of filters to apply, like ``[[('col1', '==', 0), ...], ...]``.
Using this argument will NOT result in row-wise filtering of the final
partitions unless ``engine="pyarrow-dataset"`` is also specified. For
other engines, filtering is only performed at the partition level, i.e.,
to prevent the loading of some row-groups and/or files.
For the "pyarrow" engines, predicates can be expressed in disjunctive
normal form (DNF). This means that the innermost tuple describes a single
column predicate. These inner predicates are combined with an AND
conjunction into a larger predicate. The outer-most list then combines all
of the combined filters with an OR disjunction.
Predicates can also be expressed as a List[Tuple]. These are evaluated
as an AND conjunction. To express OR in predictates, one must use the
(preferred for "pyarrow") List[List[Tuple]] notation.
Note that the "fastparquet" engine does not currently support DNF for
the filtering of partitioned columns (List[Tuple] is required).
index : str, list or False, default None
Field name(s) to use as the output frame index. By default will be
inferred from the pandas parquet file metadata (if present). Use False
to read all fields as columns.
categories : list or dict, default None
For any fields listed here, if the parquet encoding is Dictionary,
the column will be created with dtype category. Use only if it is
guaranteed that the column is encoded as dictionary in all row-groups.
If a list, assumes up to 2**16-1 labels; if a dict, specify the number
of labels expected; if None, will load categories automatically for
data written by dask/fastparquet, not otherwise.
storage_options : dict, default None
Key/value pairs to be passed on to the file-system backend, if any.
engine : str, default 'auto'
Parquet reader library to use. Options include: 'auto', 'fastparquet',
'pyarrow', 'pyarrow-dataset', and 'pyarrow-legacy'. Defaults to 'auto',
which selects the FastParquetEngine if fastparquet is installed (and
ArrowDatasetEngine otherwise). If 'pyarrow' or 'pyarrow-dataset' is
specified, the ArrowDatasetEngine (which leverages the pyarrow.dataset
API) will be used. If 'pyarrow-legacy' is specified, ArrowLegacyEngine
will be used (which leverages the pyarrow.parquet.ParquetDataset API).
NOTE: The 'pyarrow-legacy' option (ArrowLegacyEngine) is deprecated
for pyarrow>=5.
gather_statistics : bool, default None
Gather the statistics for each dataset partition. By default,
this will only be done if the _metadata file is available. Otherwise,
statistics will only be gathered if True, because the footer of
every file will be parsed (which is very slow on some systems).
ignore_metadata_file : bool, default False
Whether to ignore the global ``_metadata`` file (when one is present).
If ``True``, or if the global ``_metadata`` file is missing, the parquet
metadata may be gathered and processed in parallel. Parallel metadata
processing is currently supported for ``ArrowDatasetEngine`` only.
metadata_task_size : int, default configurable
If parquet metadata is processed in parallel (see ``ignore_metadata_file``
description above), this argument can be used to specify the number of
dataset files to be processed by each task in the Dask graph. If this
argument is set to ``0``, parallel metadata processing will be disabled.
The default values for local and remote filesystems can be specified
with the "metadata-task-size-local" and "metadata-task-size-remote"
config fields, respectively (see "dataframe.parquet").
split_row_groups : bool or int, default None
Default is True if a _metadata file is available or if
the dataset is composed of a single file (otherwise defult is False).
If True, then each output dataframe partition will correspond to a single
parquet-file row-group. If False, each partition will correspond to a
complete file. If a positive integer value is given, each dataframe
partition will correspond to that number of parquet row-groups (or fewer).
Only the "pyarrow" engine supports this argument.
chunksize : int or str, default None
The desired size of each output ``DataFrame`` partition in terms of total
(uncompressed) parquet storage space. If specified, adjacent row-groups
and/or files will be aggregated into the same output partition until the
cumulative ``total_byte_size`` parquet-metadata statistic reaches this
value. Use `aggregate_files` to enable/disable inter-file aggregation.
aggregate_files : bool or str, default None
Whether distinct file paths may be aggregated into the same output
partition. This parameter requires `gather_statistics=True`, and is
only used when `chunksize` is specified or when `split_row_groups` is
an integer >1. A setting of True means that any two file paths may be
aggregated into the same output partition, while False means that
inter-file aggregation is prohibited.
For "hive-partitioned" datasets, a "partition"-column name can also be
specified. In this case, we allow the aggregation of any two files
sharing a file path up to, and including, the corresponding directory name.
For example, if ``aggregate_files`` is set to ``"section"`` for the
directory structure below, ``03.parquet`` and ``04.parquet`` may be
aggregated together, but ``01.parquet`` and ``02.parquet`` cannot be.
If, however, ``aggregate_files`` is set to ``"region"``, ``01.parquet``
may be aggregated with ``02.parquet``, and ``03.parquet`` may be aggregated
with ``04.parquet``::
dataset-path/
├── region=1/
│ ├── section=a/
│ │ └── 01.parquet
│ ├── section=b/
│ └── └── 02.parquet
└── region=2/
├── section=a/
│ ├── 03.parquet
└── └── 04.parquet
Note that the default behavior of ``aggregate_files`` is False.
**kwargs: dict (of dicts)
Passthrough key-word arguments for read backend.
The top-level keys correspond to the appropriate operation type, and
the second level corresponds to the kwargs that will be passed on to
the underlying ``pyarrow`` or ``fastparquet`` function.
Supported top-level keys: 'dataset' (for opening a ``pyarrow`` dataset),
'file' (for opening a ``fastparquet`` ``ParquetFile``), 'read' (for the
backend read function), 'arrow_to_pandas' (for controlling the arguments
passed to convert from a ``pyarrow.Table.to_pandas()``)
Examples
--------
>>> df = dd.read_parquet('s3://bucket/my-parquet-data') # doctest: +SKIP
See Also
--------
to_parquet
pyarrow.parquet.ParquetDataset
"""
if "read_from_paths" in kwargs:
warnings.warn(
"`read_from_paths` is no longer supported and will be ignored.",
FutureWarning,
)
if isinstance(columns, str):
df = read_parquet(
path,
columns=[columns],
filters=filters,
categories=categories,
index=index,
storage_options=storage_options,
engine=engine,
gather_statistics=gather_statistics,
ignore_metadata_file=ignore_metadata_file,
split_row_groups=split_row_groups,
chunksize=chunksize,
aggregate_files=aggregate_files,
metadata_task_size=metadata_task_size,
)
return df[columns]
if columns is not None:
columns = list(columns)
label = "read-parquet-"
output_name = label + tokenize(
path,
columns,
filters,
categories,
index,
storage_options,
engine,
gather_statistics,
ignore_metadata_file,
metadata_task_size,
split_row_groups,
chunksize,
aggregate_files,
)
if isinstance(engine, str):
engine = get_engine(engine)
if hasattr(path, "name"):
path = stringify_path(path)
fs, _, paths = get_fs_token_paths(path, mode="rb", storage_options=storage_options)
paths = sorted(paths, key=natural_sort_key) # numeric rather than glob ordering
auto_index_allowed = False
if index is None:
# User is allowing auto-detected index
auto_index_allowed = True
if index and isinstance(index, str):
index = [index]
if chunksize or (
split_row_groups and int(split_row_groups) > 1 and aggregate_files
):
# Require `gather_statistics=True` if `chunksize` is used,
# or if `split_row_groups>1` and we are aggregating files.
if gather_statistics is False:
raise ValueError("read_parquet options require gather_statistics=True")
gather_statistics = True
read_metadata_result = engine.read_metadata(
fs,
paths,
categories=categories,
index=index,
gather_statistics=gather_statistics,
filters=filters,
split_row_groups=split_row_groups,
chunksize=chunksize,
aggregate_files=aggregate_files,
ignore_metadata_file=ignore_metadata_file,
metadata_task_size=metadata_task_size,
**kwargs,
)
# In the future, we may want to give the engine the
# option to return a dedicated element for `common_kwargs`.
# However, to avoid breaking the API, we just embed this
# data in the first element of `parts` for now.
# The logic below is inteded to handle backward and forward
# compatibility with a user-defined engine.
meta, statistics, parts, index = read_metadata_result[:4]
common_kwargs = {}
aggregation_depth = False
if len(parts):
# For now, `common_kwargs` and `aggregation_depth`
# may be stored in the first element of `parts`
common_kwargs = parts[0].pop("common_kwargs", {})
aggregation_depth = parts[0].pop("aggregation_depth", aggregation_depth)
# Parse dataset statistics from metadata (if available)
parts, divisions, index, index_in_columns = process_statistics(
parts,
statistics,
filters,
index,
chunksize,
split_row_groups,
fs,
aggregation_depth,
)
# Account for index and columns arguments.
# Modify `meta` dataframe accordingly
meta, index, columns = set_index_columns(
meta, index, columns, index_in_columns, auto_index_allowed
)
if meta.index.name == NONE_LABEL:
meta.index.name = None
# Set the index that was previously treated as a column
if index_in_columns:
meta = meta.set_index(index)
if meta.index.name == NONE_LABEL:
meta.index.name = None
if len(divisions) < 2:
# empty dataframe - just use meta
graph = {(output_name, 0): meta}
divisions = (None, None)
else:
# Create Blockwise layer
layer = DataFrameIOLayer(
output_name,
columns,
parts,
ParquetFunctionWrapper(
engine,
fs,
meta,
columns,
index,
kwargs,
common_kwargs,
),
label=label,
)
graph = HighLevelGraph({output_name: layer}, {output_name: set()})
return new_dd_object(graph, output_name, meta, divisions)
def check_multi_support(engine):
# Helper function to check that the engine
# supports a multi-partition read
return hasattr(engine, "multi_support") and engine.multi_support()
def read_parquet_part(fs, engine, meta, part, columns, index, kwargs):
"""Read a part of a parquet dataset
This function is used by `read_parquet`."""
if isinstance(part, list):
if len(part) == 1 or part[0][1] or not check_multi_support(engine):
# Part kwargs expected
func = engine.read_partition
dfs = [
func(fs, rg, columns.copy(), index, **toolz.merge(kwargs, kw))
for (rg, kw) in part
]
df = concat(dfs, axis=0) if len(dfs) > 1 else dfs[0]
else:
# No part specific kwargs, let engine read
# list of parts at once
df = engine.read_partition(
fs, [p[0] for p in part], columns.copy(), index, **kwargs
)
else:
# NOTE: `kwargs` are the same for all parts, while `part_kwargs` may
# be different for each part.
rg, part_kwargs = part
df = engine.read_partition(
fs, rg, columns, index, **toolz.merge(kwargs, part_kwargs)
)
if meta.columns.name:
df.columns.name = meta.columns.name
columns = columns or []
index = index or []
df = df[[c for c in columns if c not in index]]
if index == [NONE_LABEL]:
df.index.name = None
return df
def to_parquet(
df,
path,
engine="auto",
compression="default",
write_index=True,
append=False,
overwrite=False,
ignore_divisions=False,
partition_on=None,
storage_options=None,
custom_metadata=None,
write_metadata_file=True,
compute=True,
compute_kwargs=None,
schema=None,
**kwargs,
):
"""Store Dask.dataframe to Parquet files
Notes
-----
Each partition will be written to a separate file.
Parameters
----------
df : dask.dataframe.DataFrame
path : string or pathlib.Path
Destination directory for data. Prepend with protocol like ``s3://``
or ``hdfs://`` for remote data.
engine : {'auto', 'fastparquet', 'pyarrow'}, default 'auto'
Parquet library to use. If only one library is installed, it will use
that one; if both, it will use 'fastparquet'.
compression : string or dict, default 'default'
Either a string like ``"snappy"`` or a dictionary mapping column names
to compressors like ``{"name": "gzip", "values": "snappy"}``. The
default is ``"default"``, which uses the default compression for
whichever engine is selected.
write_index : boolean, default True
Whether or not to write the index. Defaults to True.
append : bool, default False
If False (default), construct data-set from scratch. If True, add new
row-group(s) to an existing data-set. In the latter case, the data-set
must exist, and the schema must match the input data.
overwrite : bool, default False
Whether or not to remove the contents of `path` before writing the dataset.
The default is False. If True, the specified path must correspond to
a directory (but not the current working directory). This option cannot
be set to True if `append=True`.
NOTE: `overwrite=True` will remove the original data even if the current
write operation fails. Use at your own risk.
ignore_divisions : bool, default False
If False (default) raises error when previous divisions overlap with
the new appended divisions. Ignored if append=False.
partition_on : list, default None
Construct directory-based partitioning by splitting on these fields'
values. Each dask partition will result in one or more datafiles,
there will be no global groupby.
storage_options : dict, default None
Key/value pairs to be passed on to the file-system backend, if any.
custom_metadata : dict, default None
Custom key/value metadata to include in all footer metadata (and
in the global "_metadata" file, if applicable). Note that the custom
metadata may not contain the reserved b"pandas" key.
write_metadata_file : bool, default True
Whether to write the special "_metadata" file.
compute : bool, default True
If :obj:`True` (default) then the result is computed immediately. If :obj:`False`
then a ``dask.dataframe.Scalar`` object is returned for future computation.
compute_kwargs : dict, default True
Options to be passed in to the compute method
schema : Schema object, dict, or {"infer", None}, default None
Global schema to use for the output dataset. Alternatively, a `dict`
of pyarrow types can be specified (e.g. `schema={"id": pa.string()}`).
For this case, fields excluded from the dictionary will be inferred
from `_meta_nonempty`. If "infer", the first non-empty and non-null
partition will be used to infer the type for "object" columns. If
None (default), we let the backend infer the schema for each distinct
output partition. If the partitions produce inconsistent schemas,
pyarrow will throw an error when writing the shared _metadata file.
Note that this argument is ignored by the "fastparquet" engine.
**kwargs :
Extra options to be passed on to the specific backend.
Examples
--------
>>> df = dd.read_csv(...) # doctest: +SKIP
>>> df.to_parquet('/path/to/output/', ...) # doctest: +SKIP
See Also
--------
read_parquet: Read parquet data to dask.dataframe
"""
compute_kwargs = compute_kwargs or {}
if compression == "default":
if snappy is not None:
compression = "snappy"
else:
compression = None
partition_on = partition_on or []
if isinstance(partition_on, str):
partition_on = [partition_on]
if set(partition_on) - set(df.columns):
raise ValueError(
"Partitioning on non-existent column. "
"partition_on=%s ."
"columns=%s" % (str(partition_on), str(list(df.columns)))
)
if isinstance(engine, str):
engine = get_engine(engine)
if hasattr(path, "name"):
path = stringify_path(path)
fs, _, _ = get_fs_token_paths(path, mode="wb", storage_options=storage_options)
# Trim any protocol information from the path before forwarding
path = fs._strip_protocol(path)
if overwrite:
if isinstance(fs, LocalFileSystem):
working_dir = fs.expand_path(".")[0]
if path.rstrip("/") == working_dir.rstrip("/"):
raise ValueError(
"Cannot clear the contents of the current working directory!"
)
if append:
raise ValueError("Cannot use both `overwrite=True` and `append=True`!")
if fs.exists(path) and fs.isdir(path):
# Only remove path contents if
# (1) The path exists
# (2) The path is a directory
# (3) The path is not the current working directory
fs.rm(path, recursive=True)
# Save divisions and corresponding index name. This is necessary,
# because we may be resetting the index to write the file
division_info = {"divisions": df.divisions, "name": df.index.name}
if division_info["name"] is None:
# As of 0.24.2, pandas will rename an index with name=None
# when df.reset_index() is called. The default name is "index",
# but dask will always change the name to the NONE_LABEL constant
if NONE_LABEL not in df.columns:
division_info["name"] = NONE_LABEL
elif write_index:
raise ValueError(
"Index must have a name if __null_dask_index__ is a column."
)
else:
warnings.warn(
"If read back by Dask, column named __null_dask_index__ "
"will be set to the index (and renamed to None)."
)
# There are some "resrved" names that may be used as the default column
# name after resetting the index. However, we don't want to treat it as
# a "special" name if the string is already used as a "real" column name.
reserved_names = []
for name in ["index", "level_0"]:
if name not in df.columns:
reserved_names.append(name)
# If write_index==True (default), reset the index and record the
# name of the original index in `index_cols` (we will set the name
# to the NONE_LABEL constant if it is originally `None`).
# `fastparquet` will use `index_cols` to specify the index column(s)
# in the metadata. `pyarrow` will revert the `reset_index` call
# below if `index_cols` is populated (because pyarrow will want to handle
# index preservation itself). For both engines, the column index
# will be written to "pandas metadata" if write_index=True
index_cols = []
if write_index:
real_cols = set(df.columns)
none_index = list(df._meta.index.names) == [None]
df = df.reset_index()
if none_index:
df.columns = [
c if c not in reserved_names else NONE_LABEL for c in df.columns
]
index_cols = [c for c in set(df.columns) - real_cols]
else:
# Not writing index - might as well drop it
df = df.reset_index(drop=True)
_to_parquet_kwargs = {
"engine",
"compression",
"write_index",
"append",
"ignore_divisions",
"partition_on",
"storage_options",
"write_metadata_file",
"compute",
}
kwargs_pass = {k: v for k, v in kwargs.items() if k not in _to_parquet_kwargs}
# Engine-specific initialization steps to write the dataset.
# Possibly create parquet metadata, and load existing stuff if appending
meta, schema, i_offset = engine.initialize_write(
df,
fs,
path,
append=append,
ignore_divisions=ignore_divisions,
partition_on=partition_on,
division_info=division_info,
index_cols=index_cols,
schema=schema,
**kwargs_pass,
)
# Use i_offset and df.npartitions to define file-name list
filenames = ["part.%i.parquet" % (i + i_offset) for i in range(df.npartitions)]
# Construct IO graph
dsk = {}
name = "to-parquet-" + tokenize(
df,
fs,
path,
append,
ignore_divisions,
partition_on,
division_info,
index_cols,
schema,
)
part_tasks = []
kwargs_pass["fmd"] = meta
kwargs_pass["compression"] = compression
kwargs_pass["index_cols"] = index_cols
kwargs_pass["schema"] = schema
if custom_metadata:
if b"pandas" in custom_metadata.keys():
raise ValueError(
"User-defined key/value metadata (custom_metadata) can not "
"contain a b'pandas' key. This key is reserved by Pandas, "
"and overwriting the corresponding value can render the "
"entire dataset unreadable."
)
kwargs_pass["custom_metadata"] = custom_metadata
for d, filename in enumerate(filenames):
dsk[(name, d)] = (
apply,
engine.write_partition,
[
(df._name, d),
path,
fs,
filename,
partition_on,
write_metadata_file,
],
toolz.merge(kwargs_pass, {"head": True}) if d == 0 else kwargs_pass,
)
part_tasks.append((name, d))
final_name = "metadata-" + name
# Collect metadata and write _metadata
if write_metadata_file:
dsk[(final_name, 0)] = (
apply,
engine.write_metadata,
[
part_tasks,
meta,
fs,
path,
],
{"append": append, "compression": compression},
)
else:
dsk[(final_name, 0)] = (lambda x: None, part_tasks)
graph = HighLevelGraph.from_collections(final_name, dsk, dependencies=[df])
if compute:
return compute_as_if_collection(
Scalar, graph, [(final_name, 0)], **compute_kwargs
)
else:
return Scalar(graph, final_name, "")
def create_metadata_file(
paths,
root_dir=None,
out_dir=None,
engine="pyarrow",
storage_options=None,
split_every=32,
compute=True,
compute_kwargs=None,
fs=None,
):
"""Construct a global _metadata file from a list of parquet files.
Dask's read_parquet function is designed to leverage a global
_metadata file whenever one is available. The to_parquet
function will generate this file automatically by default, but it
may not exist if the dataset was generated outside of Dask. This
utility provides a mechanism to generate a _metadata file from a
list of existing parquet files.
NOTE: This utility is not yet supported for the "fastparquet" engine.
Parameters
----------
paths : list(string)
List of files to collect footer metadata from.
root_dir : string, optional
Root directory of dataset. The `file_path` fields in the new
_metadata file will relative to this directory. If None, a common
root directory will be inferred.
out_dir : string or False, optional
Directory location to write the final _metadata file. By default,
this will be set to `root_dir`. If False is specified, the global
metadata will be returned as an in-memory object (and will not be
written to disk).
engine : str or Engine, default 'pyarrow'
Parquet Engine to use. Only 'pyarrow' is supported if a string
is passed.
storage_options : dict, optional
Key/value pairs to be passed on to the file-system backend, if any.
split_every : int, optional
The final metadata object that is written to _metadata can be much
smaller than the list of footer metadata. In order to avoid the
aggregation of all metadata within a single task, a tree reduction
is used. This argument specifies the maximum number of metadata
inputs to be handled by any one task in the tree. Defaults to 32.
compute : bool, optional
If True (default) then the result is computed immediately. If False
then a ``dask.delayed`` object is returned for future computation.
compute_kwargs : dict, optional
Options to be passed in to the compute method
fs : fsspec object, optional
File-system instance to use for file handling. If prefixes have
been removed from the elements of ``paths`` before calling this
function, an ``fs`` argument must be provided to ensure correct
behavior on remote file systems ("naked" paths cannot be used
to infer file-system information).
"""
# Get engine.
# Note that "fastparquet" is not yet supported
if isinstance(engine, str):
if engine not in ("pyarrow", "arrow"):
raise ValueError(
f"{engine} is not a supported engine for create_metadata_file "
"Try engine='pyarrow'."
)
engine = get_engine(engine)
# Process input path list
if fs is None:
# Only do this if an fsspec file-system object is not
# already defined. The prefixes may already be stripped.
fs, _, paths = get_fs_token_paths(
paths, mode="rb", storage_options=storage_options
)
ap_kwargs = {"root": root_dir} if root_dir else {}
paths, root_dir, fns = _sort_and_analyze_paths(paths, fs, **ap_kwargs)
out_dir = root_dir if out_dir is None else out_dir
# Start constructing a raw graph
dsk = {}
name = "gen-metadata-" + tokenize(paths, fs)
collect_name = "collect-" + name
agg_name = "agg-" + name
# Define a "collect" task for each file in the input list.
# Each tasks will:
# 1. Extract the footer metadata from a distinct file
# 2. Populate the `file_path` field in the metadata
# 3. Return the extracted/modified metadata
for p, (fn, path) in enumerate(zip(fns, paths)):
key = (collect_name, p, 0)
dsk[key] = (engine.collect_file_metadata, path, fs, fn)
# Build a reduction tree to aggregate all footer metadata
# into a single metadata object. Each task in the tree
# will take in a list of metadata objects as input, and will
# usually output a single (aggregated) metadata object.
# The final task in the tree will write the result to disk
# instead of returning it (this behavior is triggered by
# passing a file path to `engine.aggregate_metadata`).
parts = len(paths)
widths = [parts]
while parts > 1:
parts = math.ceil(parts / split_every)
widths.append(parts)
height = len(widths)
for depth in range(1, height):
for group in range(widths[depth]):
p_max = widths[depth - 1]
lstart = split_every * group
lstop = min(lstart + split_every, p_max)
dep_task_name = collect_name if depth == 1 else agg_name
node_list = [(dep_task_name, p, depth - 1) for p in range(lstart, lstop)]
if depth == height - 1:
assert group == 0
dsk[name] = (engine.aggregate_metadata, node_list, fs, out_dir)
else:
dsk[(agg_name, group, depth)] = (
engine.aggregate_metadata,
node_list,
None,
None,
)
# There will be no aggregation tasks if there is only one file
if len(paths) == 1:
dsk[name] = (engine.aggregate_metadata, [(collect_name, 0, 0)], fs, out_dir)
# Convert the raw graph to a `Delayed` object
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[])
out = Delayed(name, graph)
# Optionally compute the result
if compute:
if compute_kwargs is None:
compute_kwargs = dict()
out = out.compute(**compute_kwargs)
return out
_ENGINES = {}
def get_engine(engine):
"""Get the parquet engine backend implementation.
Parameters
----------
engine : str, default 'auto'
Backend parquet library to use. Options include: 'auto', 'fastparquet',
'pyarrow', 'pyarrow-dataset', and 'pyarrow-legacy'. Defaults to 'auto',
which selects the FastParquetEngine if fastparquet is installed (and
ArrowLegacyEngine otherwise). If 'pyarrow-dataset' is specified, the
ArrowDatasetEngine (which leverages the pyarrow.dataset API) will be used
for newer PyArrow versions (>=1.0.0). If 'pyarrow' or 'pyarrow-legacy' are
specified, the ArrowLegacyEngine will be used (which leverages the
pyarrow.parquet.ParquetDataset API).
NOTE: 'pyarrow-dataset' enables row-wise filtering, but requires
pyarrow>=1.0. The behavior of 'pyarrow' will most likely change to
ArrowDatasetEngine in a future release, and the 'pyarrow-legacy'
option will be deprecated once the ParquetDataset API is deprecated.
gather_statistics : bool or None (default).
Returns
-------
A dict containing a ``'read'`` and ``'write'`` function.
"""
if engine in _ENGINES:
return _ENGINES[engine]
if engine == "auto":
for eng in ["fastparquet", "pyarrow"]:
try:
return get_engine(eng)
except RuntimeError:
pass
else:
raise RuntimeError("Please install either fastparquet or pyarrow")
elif engine == "fastparquet":
import_required("fastparquet", "`fastparquet` not installed")
from .fastparquet import FastParquetEngine
_ENGINES["fastparquet"] = eng = FastParquetEngine
return eng
elif engine in ("pyarrow", "arrow", "pyarrow-legacy", "pyarrow-dataset"):
pa = import_required("pyarrow", "`pyarrow` not installed")
pa_version = parse_version(pa.__version__)
if engine in ("pyarrow", "arrow"):
engine = "pyarrow-dataset"
elif pa_version.major >= 5 and engine == "pyarrow-legacy":
warnings.warn(
"`ArrowLegacyEngine` ('pyarrow-legacy') is deprecated for "
"pyarrow>=5 and will be removed in a future release. Please "
"use `engine='pyarrow'` or `engine='pyarrow-dataset'`.",
FutureWarning,
)
if engine == "pyarrow-dataset":
from .arrow import ArrowDatasetEngine
_ENGINES[engine] = eng = ArrowDatasetEngine
else:
from .arrow import ArrowLegacyEngine
_ENGINES[engine] = eng = ArrowLegacyEngine
return eng
else:
raise ValueError(
f'Unsupported engine: "{engine}".'
' Valid choices include "pyarrow" and "fastparquet".'
)
#####################
# Utility Functions #
#####################
def sorted_columns(statistics):
"""Find sorted columns given row-group statistics
This finds all columns that are sorted, along with appropriate divisions
values for those columns
Returns
-------
out: List of {'name': str, 'divisions': List[str]} dictionaries
"""
if not statistics:
return []
out = []
for i, c in enumerate(statistics[0]["columns"]):
if not all(
"min" in s["columns"][i] and "max" in s["columns"][i] for s in statistics
):
continue
divisions = [c["min"]]
max = c["max"]
success = c["min"] is not None
for stats in statistics[1:]:
c = stats["columns"][i]
if c["min"] is None:
success = False
break
if c["min"] >= max:
divisions.append(c["min"])
max = c["max"]
else:
success = False
break
if success:
divisions.append(max)
assert divisions == sorted(divisions)
out.append({"name": c["name"], "divisions": divisions})
return out
def apply_filters(parts, statistics, filters):
"""Apply filters onto parts/statistics pairs
Parameters
----------
parts: list
Tokens corresponding to row groups to read in the future
statistics: List[dict]
List of statistics for each part, including min and max values
filters: Union[List[Tuple[str, str, Any]], List[List[Tuple[str, str, Any]]]]
List of filters to apply, like ``[[('x', '=', 0), ...], ...]``. This
implements partition-level (hive) filtering only, i.e., to prevent the
loading of some row-groups and/or files.
Predicates can be expressed in disjunctive normal form (DNF). This means
that the innermost tuple describes a single column predicate. These
inner predicates are combined with an AND conjunction into a larger
predicate. The outer-most list then combines all of the combined
filters with an OR disjunction.
Predicates can also be expressed as a List[Tuple]. These are evaluated
as an AND conjunction. To express OR in predictates, one must use the
(preferred) List[List[Tuple]] notation.
Note that the "fastparquet" engine does not currently support DNF for
the filtering of partitioned columns (List[Tuple] is required).
Returns
-------
parts, statistics: the same as the input, but possibly a subset
"""
def apply_conjunction(parts, statistics, conjunction):
for column, operator, value in conjunction:
out_parts = []
out_statistics = []
for part, stats in zip(parts, statistics):
if "filter" in stats and stats["filter"]:
continue # Filtered by engine
try:
c = toolz.groupby("name", stats["columns"])[column][0]
min = c["min"]
max = c["max"]
except KeyError:
out_parts.append(part)
out_statistics.append(stats)
else:
if (
operator in ("==", "=")
and min <= value <= max
or operator == "<"
and min < value
or operator == "<="
and min <= value
or operator == ">"
and max > value
or operator == ">="
and max >= value
or operator == "in"
and any(min <= item <= max for item in value)
):
out_parts.append(part)
out_statistics.append(stats)
parts, statistics = out_parts, out_statistics
return parts, statistics
conjunction, *disjunction = filters if isinstance(filters[0], list) else [filters]
out_parts, out_statistics = apply_conjunction(parts, statistics, conjunction)
for conjunction in disjunction:
for part, stats in zip(*apply_conjunction(parts, statistics, conjunction)):
if part not in out_parts:
out_parts.append(part)
out_statistics.append(stats)
return out_parts, out_statistics
def process_statistics(
parts,
statistics,
filters,
index,
chunksize,
split_row_groups,
fs,
aggregation_depth,
):
"""Process row-group column statistics in metadata
Used in read_parquet.
"""
index_in_columns = False
if statistics:
result = list(
zip(
*[
(part, stats)
for part, stats in zip(parts, statistics)
if stats["num-rows"] > 0
]
)
)
parts, statistics = result or [[], []]
if filters:
parts, statistics = apply_filters(parts, statistics, filters)
# Aggregate parts/statistics if we are splitting by row-group
if chunksize or (split_row_groups and int(split_row_groups) > 1):
parts, statistics = aggregate_row_groups(
parts, statistics, chunksize, split_row_groups, fs, aggregation_depth
)
out = sorted_columns(statistics)
if index and isinstance(index, str):
index = [index]
if index and out:
# Only one valid column
out = [o for o in out if o["name"] in index]
if index is not False and len(out) == 1:
# Use only sorted column with statistics as the index
divisions = out[0]["divisions"]
if index is None:
index_in_columns = True
index = [out[0]["name"]]
elif index != [out[0]["name"]]:
raise ValueError(f"Specified index is invalid.\nindex: {index}")
elif index is not False and len(out) > 1:
if any(o["name"] == NONE_LABEL for o in out):
# Use sorted column matching NONE_LABEL as the index
[o] = [o for o in out if o["name"] == NONE_LABEL]
divisions = o["divisions"]
if index is None:
index = [o["name"]]
index_in_columns = True
elif index != [o["name"]]:
raise ValueError(f"Specified index is invalid.\nindex: {index}")
else:
# Multiple sorted columns found, cannot autodetect the index
warnings.warn(
"Multiple sorted columns found %s, cannot\n "
"autodetect index. Will continue without an index.\n"
"To pick an index column, use the index= keyword; to \n"
"silence this warning use index=False."
"" % [o["name"] for o in out],
RuntimeWarning,
)
index = False
divisions = [None] * (len(parts) + 1)
else:
divisions = [None] * (len(parts) + 1)
else:
divisions = [None] * (len(parts) + 1)
return parts, divisions, index, index_in_columns
def set_index_columns(meta, index, columns, index_in_columns, auto_index_allowed):
"""Handle index/column arguments, and modify `meta`
Used in read_parquet.
"""
ignore_index_column_intersection = False
if columns is None:
# User didn't specify columns, so ignore any intersection
# of auto-detected values with the index (if necessary)
ignore_index_column_intersection = True
# Do not allow "un-named" fields to be read in as columns.
# These were intended to be un-named indices at write time.
_index = index or []
columns = [
c for c in meta.columns if c not in (None, NONE_LABEL) or c in _index
]
if not set(columns).issubset(set(meta.columns)):
raise ValueError(
"The following columns were not found in the dataset %s\n"
"The following columns were found %s"
% (set(columns) - set(meta.columns), meta.columns)
)
if index:
if isinstance(index, str):
index = [index]
if isinstance(columns, str):
columns = [columns]
if ignore_index_column_intersection:
columns = [col for col in columns if col not in index]
if set(index).intersection(columns):
if auto_index_allowed:
raise ValueError(
"Specified index and column arguments must not intersect"
" (set index=False or remove the detected index from columns).\n"
"index: {} | column: {}".format(index, columns)
)
else:
raise ValueError(
"Specified index and column arguments must not intersect.\n"
"index: {} | column: {}".format(index, columns)
)
# Leaving index as a column in `meta`, because the index
# will be reset below (in case the index was detected after
# meta was created)
if index_in_columns:
meta = meta[columns + index]
else:
meta = meta[columns]
else:
meta = meta[list(columns)]
return meta, index, columns
def aggregate_row_groups(
parts, stats, chunksize, split_row_groups, fs, aggregation_depth
):
if not stats[0].get("file_path_0", None):
return parts, stats
parts_agg = []
stats_agg = []
use_row_group_criteria = split_row_groups and int(split_row_groups) > 1
use_chunksize_criteria = bool(chunksize)
if use_chunksize_criteria:
chunksize = parse_bytes(chunksize)
next_part, next_stat = [parts[0].copy()], stats[0].copy()
for i in range(1, len(parts)):
stat, part = stats[i], parts[i]
# Criteria #1 for aggregating parts: parts are within the same file
same_path = stat["file_path_0"] == next_stat["file_path_0"]
multi_path_allowed = False
if aggregation_depth:
# Criteria #2 for aggregating parts: The part does not include
# row-group information, or both parts include the same kind
# of row_group aggregation (all None, or all indices)
multi_path_allowed = len(part["piece"]) == 1
if not (same_path or multi_path_allowed):
rgs = set(list(part["piece"][1]) + list(next_part[-1]["piece"][1]))
multi_path_allowed = (rgs == {None}) or (None not in rgs)
# Criteria #3 for aggregating parts: The parts share a
# directory at the "depth" allowed by `aggregation_depth`
if not same_path and multi_path_allowed:
if aggregation_depth is True:
multi_path_allowed = True
elif isinstance(aggregation_depth, int):
# Make sure files share the same directory
root = stat["file_path_0"].split(fs.sep)[:-aggregation_depth]
next_root = next_stat["file_path_0"].split(fs.sep)[
:-aggregation_depth
]
multi_path_allowed = root == next_root
else:
raise ValueError(
f"{aggregation_depth} not supported for `aggregation_depth`"
)
def _check_row_group_criteria(stat, next_stat):
if use_row_group_criteria:
return (next_stat["num-row-groups"] + stat["num-row-groups"]) <= int(
split_row_groups
)
else:
return False
def _check_chunksize_criteria(stat, next_stat):
if use_chunksize_criteria:
return (
next_stat["total_byte_size"] + stat["total_byte_size"]
) <= chunksize
else:
return False
stat["num-row-groups"] = stat.get("num-row-groups", 1)
next_stat["num-row-groups"] = next_stat.get("num-row-groups", 1)
if (same_path or multi_path_allowed) and (
_check_row_group_criteria(stat, next_stat)
or _check_chunksize_criteria(stat, next_stat)
):
# Update part list
next_part.append(part)
# Update Statistics
next_stat["total_byte_size"] += stat["total_byte_size"]
next_stat["num-rows"] += stat["num-rows"]
next_stat["num-row-groups"] += stat["num-row-groups"]
for col, col_add in zip(next_stat["columns"], stat["columns"]):
if col["name"] != col_add["name"]:
raise ValueError("Columns are different!!")
if "min" in col:
col["min"] = min(col["min"], col_add["min"])
if "max" in col:
col["max"] = max(col["max"], col_add["max"])
else:
parts_agg.append(next_part)
stats_agg.append(next_stat)
next_part, next_stat = [part.copy()], stat.copy()
parts_agg.append(next_part)
stats_agg.append(next_stat)
return parts_agg, stats_agg
DataFrame.to_parquet.__doc__ = to_parquet.__doc__
|
the-stack_106_22448 | import eel
import os
from FaceRecogniser import FaceRecogniser
sr = None
@eel.expose
def set_known_folder(folder):
if folder:
try:
global sr
sr = FaceRecogniser(folder)
if sr:
return "Success initializing"
except Exception as ex:
return f"Error initializing -- {ex}"
else:
return "Error initializing -- No folder"
@eel.expose
def find_by_picture(picture_location):
if picture_location:
try:
if os.path.exists("interface/temp.png"):
os.remove("interface/temp.png")
global sr
if sr:
res, picture = sr.find_by_picture(picture_location)
return [res, picture]
else:
return ["Error finding -- Initialize known photos first", None]
except Exception as ex:
return [f"Error finding -- {ex}", None]
else:
return ["Error finding -- No picture", None]
@eel.expose
def find_by_screenshot(delay):
try:
if os.path.exists("interface/temp.png"):
os.remove("interface/temp.png")
global sr
if sr:
if delay or delay == 0:
res, picture = sr.find_by_screenshot(int(delay))
else:
res, picture = sr.find_by_screenshot()
return [res, picture]
else:
return ["Error finding -- Initialize known photos first", None]
except Exception as ex:
return [f"Error finding -- {ex}", None]
eel.init("interface")
eel.start("main.html", size=(640, 768))
# test_pictures/Ronnie_Radke_June_2015_outtake.jpg |
the-stack_106_22452 | import tensorflow as tf
from .support import initializer, visualize_filters
import numpy as np
def softmax_layer (input, name = 'softmax'):
"""
Creates the softmax normalization
Args:
input: Where is the input of the layer coming from
name: Name scope of the layer
Returns:
tuple: ``(softmax, prediction)``, A softmax output node and prediction output node
"""
with tf.variable_scope(name) as scope:
inference = tf.nn.softmax(input, name = 'inference')
predictions = tf.argmax(inference, 1, name = 'predictions')
return (inference, predictions)
def dot_product_layer(input, params = None, neurons = 1200, name = 'fc', activation = 'relu'):
"""
Creates a fully connected layer
Args:
input: Where is the input of the layer coming from
neurons: Number of neurons in the layer.
params: List of tensors, if supplied will use those params.
name: name scope of the layer
activation: What kind of activation to use.
Returns:
tuple: The output node and A list of parameters that are learnable
"""
with tf.variable_scope(name) as scope:
if params is None:
weights = tf.Variable(initializer([input.shape[1].value,neurons], name = 'xavier_weights'),\
name = 'weights')
bias = tf.Variable(initializer([neurons], name = 'xavier_bias'), name = 'bias')
else:
weights = params[0]
bias = params[1]
dot = tf.nn.bias_add(tf.matmul(input, weights, name = 'dot'), bias, name = 'pre-activation')
if activation == 'relu':
activity = tf.nn.relu(dot, name = 'activity' )
elif activation == 'sigmoid':
activity = tf.nn.sigmoid(dot, name = 'activity' )
elif activation == 'identity':
activity = dot
params = [weights, bias]
tf.summary.histogram('weights', weights)
tf.summary.histogram('bias', bias)
tf.summary.histogram('activity', activity)
return (activity, params)
def conv_2d_layer (input,
neurons = 20,
filter_size = (5,5),
stride = (1,1,1,1),
padding = 'VALID',
name = 'conv',
activation = 'relu',
visualize = False):
"""
Creates a convolution layer
Args:
input: (NHWC) Where is the input of the layer coming from
neurons: Number of neurons in the layer.
name: name scope of the layer
filter_size: A tuple of filter size ``(5,5)`` is default.
stride: A tuple of x and y axis strides. ``(1,1,1,1)`` is default.
name: A name for the scope of tensorflow
visualize: If True, will add to summary. Only for first layer at the moment.
activation: Activation for the outputs.
padding: Padding to be used in convolution. "VALID" is default.
Returns:
tuple: The output node and A list of parameters that are learnable
"""
f_shp = [filter_size[0], filter_size[1], input.shape[3].value, neurons]
with tf.variable_scope(name) as scope:
weights = tf.Variable(initializer( f_shp,
name = 'xavier_weights'),\
name = 'weights')
bias = tf.Variable(initializer([neurons], name = 'xavier_bias'), name = 'bias')
c_out = tf.nn.conv2d( input = input,
filter = weights,
strides = stride,
padding = padding,
name = scope.name )
c_out_bias = tf.nn.bias_add(c_out, bias, name = 'pre-activation')
if activation == 'relu':
activity = tf.nn.relu(c_out_bias, name = 'activity' )
elif activation == 'sigmoid':
activity = tf.nn.sigmoid(c_out_bias, name = 'activity' )
elif activation == 'identity':
activity = c_out_bias
params = [weights, bias]
tf.summary.histogram('weights', weights)
tf.summary.histogram('bias', bias)
tf.summary.histogram('activity', activity)
if visualize is True:
visualize_filters(weights, name = 'filters_' + name)
return (activity, params)
def flatten_layer (input, name = 'flatten'):
"""
This layer returns the flattened output
Args:
input: a 4D node.
name: name scope of the layer.
Returns:
tensorflow tensor: a 2D node.
"""
with tf.variable_scope(name) as scope:
in_shp = input.get_shape().as_list()
output = tf.reshape(input, [-1, in_shp[1]*in_shp[2]*in_shp[3]])
return output
def max_pool_2d_layer ( input,
pool_size = (1,2,2,1),
stride = (1,2,2,1),
padding = 'VALID',
name = 'pool' ):
"""
Creates a max pooling layer
Args:
input: (NHWC) Where is the input of the layer coming from
name: name scope of the layer
pool_size: A tuple of filter size ``(5,5)`` is default.
stride: A tuple of x and y axis strides. ``(1,1,1,1)`` is default.
name: A name for the scope of tensorflow
padding: Padding to be used in convolution. "VALID" is default.
Returns:
tensorflow tensor: The output node
"""
with tf.variable_scope(name) as scope:
output = tf.nn.max_pool ( value = input,
ksize = pool_size,
strides = stride,
padding = padding,
name = name )
return output
def local_response_normalization_layer (input, name = 'lrn'):
"""
This layer returns the flattened output
Args:
input: a 4D node.
name: name scope of the layer.
Returns:
tensorflow tensor: a 2D node.
"""
with tf.variable_scope(name) as scope:
output = tf.nn.lrn(input)
return output
def unflatten_layer (input, channels = 1, name = 'unflatten'):
"""
This layer returns the unflattened output
Args:
input: a 2D node.
chanels: How many channels are there in the image. (Default = ``1``)
name: name scope of the layer.
Returns:
tensorflow tensor: a 4D node in (NHWC) format that is square in shape.
"""
with tf.variable_scope(name) as scope:
dim = int( np.sqrt( input.shape[1].value / channels ) )
output = tf.reshape(input, [-1, dim, dim, channels])
return output
def dropout_layer (input, prob, name ='dropout'):
"""
This layer drops out nodes with the probability of 0.5
During training time, run a probability of 0.5.
During test time run a probability of 1.0.
To do this, ensure that the ``prob`` is a ``tf.placeholder``.
You can supply this probability with ``feed_dict`` in trainer.
Args:
input: a 2D node.
prob: Probability feeder.
name: name scope of the layer.
Returns:
tensorflow tensor: An output node
"""
with tf.variable_scope (name) as scope:
output = tf.nn.dropout (input, prob)
return output
if __name__ == '__main__':
pass |
the-stack_106_22453 | # -*- coding: utf-8 -*-
"""Extract *all* xrefs from OBO documents available."""
import gzip
import os
from collections import Counter
import click
import pandas as pd
from .xrefs_pipeline import Canonicalizer, _iter_ooh_na_na, _iter_synonyms, get_xref_df, summarize_xref_df
from ..cli_utils import verbose_option
from ..constants import PYOBO_HOME
from ..identifier_utils import UNHANDLED_NAMESPACES
directory_option = click.option(
'-d', '--directory',
type=click.Path(dir_okay=True, file_okay=False, exists=True),
default=PYOBO_HOME,
)
@click.group()
def output():
"""Output all OBO documents available."""
@output.command()
@directory_option
@verbose_option
def javerts_xrefs(directory: str): # noqa: D202
"""Make the xref dump."""
def _write_tsv(df: pd.DataFrame, name: str) -> None:
df.to_csv(os.path.join(directory, name), sep='\t', index=False)
xrefs_df = get_xref_df()
# Export all xrefs
_write_tsv(xrefs_df, 'inspector_javerts_xrefs.tsv.gz')
# Export a sample of xrefs
_write_tsv(xrefs_df.head(), 'inspector_javerts_xrefs_sample.tsv')
# Export a summary dataframe
summary_df = summarize_xref_df(xrefs_df)
_write_tsv(summary_df, 'inspector_javerts_xrefs_summary.tsv')
# Export the namespaces that haven't been handled yet
unmapped_path = os.path.join(directory, 'inspector_javerts_unmapped_xrefs.tsv')
with open(unmapped_path, 'w') as file:
for namespace, items in sorted(UNHANDLED_NAMESPACES.items()):
for curie, xref in items:
print(curie, namespace, xref, file=file, sep='\t')
@output.command()
@directory_option
@verbose_option
def ooh_na_na(directory: str):
"""Make the prefix-identifier-name dump."""
c = Counter()
db_path = os.path.join(directory, 'ooh_na_na.tsv.gz')
click.echo(f'Writing Ooh-Na-Na to {db_path}')
with gzip.open(db_path, mode='wt') as gzipped_file:
print('prefix', 'identifier', 'name', sep='\t', file=gzipped_file)
for prefix, identifier, name in _iter_ooh_na_na():
c[prefix] += 1
print(prefix, identifier, name, sep='\t', file=gzipped_file)
summary_path = os.path.join(directory, 'ooh_na_na_summary.tsv')
click.echo(f'Writing Ooh-Na-Na summary to {summary_path}')
with open(summary_path, 'w') as file:
for k, v in c.most_common():
print(k, v, sep='\t', file=file)
@output.command()
@directory_option
@verbose_option
def synonymsdb(directory: str):
"""Make the prefix-identifier-synonym dump."""
c = Counter()
db_path = os.path.join(directory, 'synonymdb.tsv.gz')
click.echo(f'Writing SynonymDB to {db_path}')
with gzip.open(db_path, mode='wt') as gzipped_file:
print('prefix', 'identifier', 'name', sep='\t', file=gzipped_file)
for prefix, identifier, name in _iter_synonyms():
c[prefix] += 1
print(prefix, identifier, name, sep='\t', file=gzipped_file)
summary_path = os.path.join(directory, 'synonym_summary.tsv')
click.echo(f'Writing SynonymDB summary to {summary_path}')
with open(summary_path, 'w') as file:
for k, v in c.most_common():
print(k, v, sep='\t', file=file)
@output.command()
@verbose_option
@click.option('-f', '--file', type=click.File('w'))
def javerts_remapping(file):
"""Make a canonical remapping."""
canonicalizer = Canonicalizer.get_default()
print('input', 'canonical', sep='\t', file=file)
for source, target in canonicalizer.iterate_flat_mapping():
print(source, target, sep='\t', file=file)
if __name__ == '__main__':
output()
|
the-stack_106_22454 | import hashlib
import os
import urlparse
from abc import ABCMeta, abstractmethod
from Queue import Empty
from collections import defaultdict, deque
from multiprocessing import Queue
import manifestinclude
import manifestexpected
import wpttest
from mozlog import structured
manifest = None
manifest_update = None
download_from_github = None
def do_delayed_imports():
# This relies on an already loaded module having set the sys.path correctly :(
global manifest, manifest_update, download_from_github
from manifest import manifest
from manifest import update as manifest_update
from manifest.download import download_from_github
class TestChunker(object):
def __init__(self, total_chunks, chunk_number):
self.total_chunks = total_chunks
self.chunk_number = chunk_number
assert self.chunk_number <= self.total_chunks
self.logger = structured.get_default_logger()
assert self.logger
def __call__(self, manifest):
raise NotImplementedError
class Unchunked(TestChunker):
def __init__(self, *args, **kwargs):
TestChunker.__init__(self, *args, **kwargs)
assert self.total_chunks == 1
def __call__(self, manifest):
for item in manifest:
yield item
class HashChunker(TestChunker):
def __call__(self, manifest):
chunk_index = self.chunk_number - 1
for test_type, test_path, tests in manifest:
h = int(hashlib.md5(test_path).hexdigest(), 16)
if h % self.total_chunks == chunk_index:
yield test_type, test_path, tests
class DirectoryHashChunker(TestChunker):
"""Like HashChunker except the directory is hashed.
This ensures that all tests in the same directory end up in the same
chunk.
"""
def __call__(self, manifest):
chunk_index = self.chunk_number - 1
for test_type, test_path, tests in manifest:
h = int(hashlib.md5(os.path.dirname(test_path)).hexdigest(), 16)
if h % self.total_chunks == chunk_index:
yield test_type, test_path, tests
class TestFilter(object):
def __init__(self, test_manifests, include=None, exclude=None, manifest_path=None, explicit=False):
if manifest_path is None or include or explicit:
self.manifest = manifestinclude.IncludeManifest.create()
self.manifest.set_defaults()
else:
self.manifest = manifestinclude.get_manifest(manifest_path)
if include or explicit:
self.manifest.set("skip", "true")
if include:
for item in include:
self.manifest.add_include(test_manifests, item)
if exclude:
for item in exclude:
self.manifest.add_exclude(test_manifests, item)
def __call__(self, manifest_iter):
for test_type, test_path, tests in manifest_iter:
include_tests = set()
for test in tests:
if self.manifest.include(test):
include_tests.add(test)
if include_tests:
yield test_type, test_path, include_tests
class TagFilter(object):
def __init__(self, tags):
self.tags = set(tags)
def __call__(self, test_iter):
for test in test_iter:
if test.tags & self.tags:
yield test
class ManifestLoader(object):
def __init__(self, test_paths, force_manifest_update=False, manifest_download=False,
types=None, meta_filters=None):
do_delayed_imports()
self.test_paths = test_paths
self.force_manifest_update = force_manifest_update
self.manifest_download = manifest_download
self.types = types
self.logger = structured.get_default_logger()
self.meta_filters = meta_filters
if self.logger is None:
self.logger = structured.structuredlog.StructuredLogger("ManifestLoader")
def load(self):
rv = {}
for url_base, paths in self.test_paths.iteritems():
manifest_file = self.load_manifest(url_base=url_base,
**paths)
path_data = {"url_base": url_base}
path_data.update(paths)
rv[manifest_file] = path_data
return rv
def load_manifest(self, tests_path, manifest_path, metadata_path, url_base="/", **kwargs):
cache_root = os.path.join(metadata_path, ".cache")
if self.manifest_download:
download_from_github(manifest_path, tests_path)
return manifest.load_and_update(tests_path, manifest_path, url_base,
cache_root=cache_root, update=self.force_manifest_update,
meta_filters=self.meta_filters, types=self.types)
def iterfilter(filters, iter):
for f in filters:
iter = f(iter)
for item in iter:
yield item
class TestLoader(object):
def __init__(self,
test_manifests,
test_types,
run_info,
manifest_filters=None,
chunk_type="none",
total_chunks=1,
chunk_number=1,
include_https=True,
skip_timeout=False):
self.test_types = test_types
self.run_info = run_info
self.manifest_filters = manifest_filters if manifest_filters is not None else []
self.manifests = test_manifests
self.tests = None
self.disabled_tests = None
self.include_https = include_https
self.skip_timeout = skip_timeout
self.chunk_type = chunk_type
self.total_chunks = total_chunks
self.chunk_number = chunk_number
self.chunker = {"none": Unchunked,
"hash": HashChunker,
"dir_hash": DirectoryHashChunker}[chunk_type](total_chunks,
chunk_number)
self._test_ids = None
self.directory_manifests = {}
self._load_tests()
@property
def test_ids(self):
if self._test_ids is None:
self._test_ids = []
for test_dict in [self.disabled_tests, self.tests]:
for test_type in self.test_types:
self._test_ids += [item.id for item in test_dict[test_type]]
return self._test_ids
def get_test(self, manifest_file, manifest_test, inherit_metadata, test_metadata):
if test_metadata is not None:
inherit_metadata.append(test_metadata)
test_metadata = test_metadata.get_test(manifest_test.id)
return wpttest.from_manifest(manifest_file, manifest_test, inherit_metadata, test_metadata)
def load_dir_metadata(self, test_manifest, metadata_path, test_path):
rv = []
path_parts = os.path.dirname(test_path).split(os.path.sep)
for i in xrange(len(path_parts) + 1):
path = os.path.join(metadata_path, os.path.sep.join(path_parts[:i]), "__dir__.ini")
if path not in self.directory_manifests:
self.directory_manifests[path] = manifestexpected.get_dir_manifest(path,
self.run_info)
manifest = self.directory_manifests[path]
if manifest is not None:
rv.append(manifest)
return rv
def load_metadata(self, test_manifest, metadata_path, test_path):
inherit_metadata = self.load_dir_metadata(test_manifest, metadata_path, test_path)
test_metadata = manifestexpected.get_manifest(
metadata_path, test_path, test_manifest.url_base, self.run_info)
return inherit_metadata, test_metadata
def iter_tests(self):
manifest_items = []
manifests_by_url_base = {}
for manifest in sorted(self.manifests.keys(), key=lambda x:x.url_base):
manifest_iter = iterfilter(self.manifest_filters,
manifest.itertypes(*self.test_types))
manifest_items.extend(manifest_iter)
manifests_by_url_base[manifest.url_base] = manifest
if self.chunker is not None:
manifest_items = self.chunker(manifest_items)
for test_type, test_path, tests in manifest_items:
manifest_file = manifests_by_url_base[iter(tests).next().url_base]
metadata_path = self.manifests[manifest_file]["metadata_path"]
inherit_metadata, test_metadata = self.load_metadata(manifest_file, metadata_path, test_path)
for test in tests:
yield test_path, test_type, self.get_test(manifest_file, test, inherit_metadata, test_metadata)
def _load_tests(self):
"""Read in the tests from the manifest file and add them to a queue"""
tests = {"enabled":defaultdict(list),
"disabled":defaultdict(list)}
for test_path, test_type, test in self.iter_tests():
enabled = not test.disabled()
if not self.include_https and test.environment["protocol"] == "https":
enabled = False
if self.skip_timeout and test.expected() == "TIMEOUT":
enabled = False
key = "enabled" if enabled else "disabled"
tests[key][test_type].append(test)
self.tests = tests["enabled"]
self.disabled_tests = tests["disabled"]
def groups(self, test_types, chunk_type="none", total_chunks=1, chunk_number=1):
groups = set()
for test_type in test_types:
for test in self.tests[test_type]:
group = test.url.split("/")[1]
groups.add(group)
return groups
class TestSource(object):
__metaclass__ = ABCMeta
def __init__(self, test_queue):
self.test_queue = test_queue
self.current_group = None
self.current_metadata = None
@abstractmethod
# noqa: N805
#@classmethod (doesn't compose with @abstractmethod)
def make_queue(cls, tests, **kwargs):
pass
@classmethod
def group_metadata(cls, state):
return {"scope": "/"}
def group(self):
if not self.current_group or len(self.current_group) == 0:
try:
self.current_group, self.current_metadata = self.test_queue.get(block=False)
except Empty:
return None, None
return self.current_group, self.current_metadata
class GroupedSource(TestSource):
@classmethod
def new_group(cls, state, test, **kwargs):
raise NotImplementedError
@classmethod
def make_queue(cls, tests, **kwargs):
test_queue = Queue()
groups = []
state = {}
for test in tests:
if cls.new_group(state, test, **kwargs):
group_metadata = cls.group_metadata(state)
groups.append((deque(), group_metadata))
group, metadata = groups[-1]
group.append(test)
test.update_metadata(metadata)
for item in groups:
test_queue.put(item)
return test_queue
class SingleTestSource(TestSource):
@classmethod
def make_queue(cls, tests, **kwargs):
test_queue = Queue()
processes = kwargs["processes"]
queues = [deque([]) for _ in xrange(processes)]
metadatas = [cls.group_metadata(None) for _ in xrange(processes)]
for test in tests:
idx = hash(test.id) % processes
group = queues[idx]
metadata = metadatas[idx]
group.append(test)
test.update_metadata(metadata)
for item in zip(queues, metadatas):
test_queue.put(item)
return test_queue
class PathGroupedSource(GroupedSource):
@classmethod
def new_group(cls, state, test, **kwargs):
depth = kwargs.get("depth")
if depth is True or depth == 0:
depth = None
path = urlparse.urlsplit(test.url).path.split("/")[1:-1][:depth]
rv = path != state.get("prev_path")
state["prev_path"] = path
return rv
@classmethod
def group_metadata(cls, state):
return {"scope": "/%s" % "/".join(state["prev_path"])}
|
the-stack_106_22456 | """
Modder classes used for domain randomization. Largely based off of the mujoco-py
implementation below.
https://github.com/openai/mujoco-py/blob/1fe312b09ae7365f0dd9d4d0e453f8da59fae0bf/mujoco_py/modder.py
"""
import os
import numpy as np
from collections import defaultdict
from PIL import Image
from mujoco_py import cymj
import robosuite
import robosuite.utils.transform_utils as trans
class BaseModder():
"""
Base class meant to modify simulation attributes mid-sim.
Using @random_state ensures that sampling here won't be affected
by sampling that happens outside of the modders.
Args:
sim (MjSim): simulation object
random_state (RandomState): instance of np.random.RandomState, specific
seed used to randomize these modifications without impacting other
numpy seeds / randomizations
"""
def __init__(self, sim, random_state=None):
self.sim = sim
if random_state is None:
# default to global RandomState instance
self.random_state = np.random.mtrand._rand
else:
self.random_state = random_state
def update_sim(self, sim):
"""
Setter function to update internal sim variable
Args:
sim (MjSim): MjSim object
"""
self.sim = sim
@property
def model(self):
"""
Returns:
MjModel: Mujoco sim model
"""
# Available for quick convenience access
return self.sim.model
class LightingModder(BaseModder):
"""
Modder to modify lighting within a Mujoco simulation.
Args:
sim (MjSim): MjSim object
random_state (RandomState): instance of np.random.RandomState
light_names (None or list of str): list of lights to use for randomization. If not provided, all
lights in the model are randomized.
randomize_position (bool): If True, randomizes position of lighting
randomize_direction (bool): If True, randomizes direction of lighting
randomize_specular (bool): If True, randomizes specular attribute of lighting
randomize_ambient (bool): If True, randomizes ambient attribute of lighting
randomize_diffuse (bool): If True, randomizes diffuse attribute of lighting
randomize_active (bool): If True, randomizes active nature of lighting
position_perturbation_size (float): Magnitude of position randomization
direction_perturbation_size (float): Magnitude of direction randomization
specular_perturbation_size (float): Magnitude of specular attribute randomization
ambient_perturbation_size (float): Magnitude of ambient attribute randomization
diffuse_perturbation_size (float): Magnitude of diffuse attribute randomization
"""
def __init__(
self,
sim,
random_state=None,
light_names=None,
randomize_position=True,
randomize_direction=True,
randomize_specular=True,
randomize_ambient=True,
randomize_diffuse=True,
randomize_active=True,
position_perturbation_size=0.1,
direction_perturbation_size=0.35, # 20 degrees
specular_perturbation_size=0.1,
ambient_perturbation_size=0.1,
diffuse_perturbation_size=0.1,
):
super().__init__(sim, random_state=random_state)
if light_names is None:
light_names = self.sim.model.light_names
self.light_names = light_names
self.randomize_position = randomize_position
self.randomize_direction = randomize_direction
self.randomize_specular = randomize_specular
self.randomize_ambient = randomize_ambient
self.randomize_diffuse = randomize_diffuse
self.randomize_active = randomize_active
self.position_perturbation_size = position_perturbation_size
self.direction_perturbation_size = direction_perturbation_size
self.specular_perturbation_size = specular_perturbation_size
self.ambient_perturbation_size = ambient_perturbation_size
self.diffuse_perturbation_size = diffuse_perturbation_size
self.save_defaults()
def save_defaults(self):
"""
Uses the current MjSim state and model to save default parameter values.
"""
self._defaults = { k : {} for k in self.light_names }
for name in self.light_names:
self._defaults[name]['pos'] = np.array(self.get_pos(name))
self._defaults[name]['dir'] = np.array(self.get_dir(name))
self._defaults[name]['specular'] = np.array(self.get_specular(name))
self._defaults[name]['ambient'] = np.array(self.get_ambient(name))
self._defaults[name]['diffuse'] = np.array(self.get_diffuse(name))
self._defaults[name]['active'] = self.get_active(name)
def restore_defaults(self):
"""
Reloads the saved parameter values.
"""
for name in self.light_names:
self.set_pos(name, self._defaults[name]['pos'])
self.set_dir(name, self._defaults[name]['dir'])
self.set_specular(name, self._defaults[name]['specular'])
self.set_ambient(name, self._defaults[name]['ambient'])
self.set_diffuse(name, self._defaults[name]['diffuse'])
self.set_active(name, self._defaults[name]['active'])
def randomize(self):
"""
Randomizes all requested lighting values within the sim
"""
for name in self.light_names:
if self.randomize_position:
self._randomize_position(name)
if self.randomize_direction:
self._randomize_direction(name)
if self.randomize_specular:
self._randomize_specular(name)
if self.randomize_ambient:
self._randomize_ambient(name)
if self.randomize_diffuse:
self._randomize_diffuse(name)
if self.randomize_active:
self._randomize_active(name)
def _randomize_position(self, name):
"""
Helper function to randomize position of a specific light source
Args:
name (str): Name of the lighting source to randomize for
"""
delta_pos = self.random_state.uniform(
low=-self.position_perturbation_size,
high=self.position_perturbation_size,
size=3,
)
self.set_pos(
name,
self._defaults[name]['pos'] + delta_pos,
)
def _randomize_direction(self, name):
"""
Helper function to randomize direction of a specific light source
Args:
name (str): Name of the lighting source to randomize for
"""
# sample a small, random axis-angle delta rotation
random_axis, random_angle = trans.random_axis_angle(angle_limit=self.direction_perturbation_size, random_state=self.random_state)
random_delta_rot = trans.quat2mat(trans.axisangle2quat(random_axis * random_angle))
# rotate direction by this delta rotation and set the new direction
new_dir = random_delta_rot.dot(self._defaults[name]['dir'])
self.set_dir(
name,
new_dir,
)
def _randomize_specular(self, name):
"""
Helper function to randomize specular attribute of a specific light source
Args:
name (str): Name of the lighting source to randomize for
"""
delta = self.random_state.uniform(
low=-self.specular_perturbation_size,
high=self.specular_perturbation_size,
size=3,
)
self.set_specular(
name,
self._defaults[name]['specular'] + delta,
)
def _randomize_ambient(self, name):
"""
Helper function to randomize ambient attribute of a specific light source
Args:
name (str): Name of the lighting source to randomize for
"""
delta = self.random_state.uniform(
low=-self.ambient_perturbation_size,
high=self.ambient_perturbation_size,
size=3,
)
self.set_ambient(
name,
self._defaults[name]['ambient'] + delta,
)
def _randomize_diffuse(self, name):
"""
Helper function to randomize diffuse attribute of a specific light source
Args:
name (str): Name of the lighting source to randomize for
"""
delta = self.random_state.uniform(
low=-self.diffuse_perturbation_size,
high=self.diffuse_perturbation_size,
size=3,
)
self.set_diffuse(
name,
self._defaults[name]['diffuse'] + delta,
)
def _randomize_active(self, name):
"""
Helper function to randomize active nature of a specific light source
Args:
name (str): Name of the lighting source to randomize for
"""
active = int(self.random_state.uniform() > 0.5)
self.set_active(
name,
active
)
def get_pos(self, name):
"""
Grabs position of a specific light source
Args:
name (str): Name of the lighting source
Returns:
np.array: (x,y,z) position of lighting source
Raises:
AssertionError: Invalid light name
"""
lightid = self.get_lightid(name)
assert lightid > -1, "Unkwnown light %s" % name
return self.model.light_pos[lightid]
def set_pos(self, name, value):
"""
Sets position of a specific light source
Args:
name (str): Name of the lighting source
value (np.array): (x,y,z) position to set lighting source to
Raises:
AssertionError: Invalid light name
AssertionError: Invalid @value
"""
lightid = self.get_lightid(name)
assert lightid > -1, "Unkwnown light %s" % name
value = list(value)
assert len(value) == 3, "Expected 3-dim value, got %s" % value
self.model.light_pos[lightid] = value
def get_dir(self, name):
"""
Grabs direction of a specific light source
Args:
name (str): Name of the lighting source
Returns:
np.array: (x,y,z) direction of lighting source
Raises:
AssertionError: Invalid light name
"""
lightid = self.get_lightid(name)
assert lightid > -1, "Unkwnown light %s" % name
return self.model.light_dir[lightid]
def set_dir(self, name, value):
"""
Sets direction of a specific light source
Args:
name (str): Name of the lighting source
value (np.array): (ax,ay,az) direction to set lighting source to
Raises:
AssertionError: Invalid light name
AssertionError: Invalid @value
"""
lightid = self.get_lightid(name)
assert lightid > -1, "Unkwnown light %s" % name
value = list(value)
assert len(value) == 3, "Expected 3-dim value, got %s" % value
self.model.light_dir[lightid] = value
def get_active(self, name):
"""
Grabs active nature of a specific light source
Args:
name (str): Name of the lighting source
Returns:
int: Whether light source is active (1) or not (0)
Raises:
AssertionError: Invalid light name
"""
lightid = self.get_lightid(name)
assert lightid > -1, "Unkwnown light %s" % name
return self.model.light_active[lightid]
def set_active(self, name, value):
"""
Sets active nature of a specific light source
Args:
name (str): Name of the lighting source
value (int): Whether light source is active (1) or not (0)
Raises:
AssertionError: Invalid light name
"""
lightid = self.get_lightid(name)
assert lightid > -1, "Unkwnown light %s" % name
self.model.light_active[lightid] = value
def get_specular(self, name):
"""
Grabs specular attribute of a specific light source
Args:
name (str): Name of the lighting source
Returns:
np.array: (r,g,b) specular color of lighting source
Raises:
AssertionError: Invalid light name
"""
lightid = self.get_lightid(name)
assert lightid > -1, "Unkwnown light %s" % name
return self.model.light_specular[lightid]
def set_specular(self, name, value):
"""
Sets specular attribute of a specific light source
Args:
name (str): Name of the lighting source
value (np.array): (r,g,b) specular color to set lighting source to
Raises:
AssertionError: Invalid light name
AssertionError: Invalid @value
"""
lightid = self.get_lightid(name)
assert lightid > -1, "Unkwnown light %s" % name
value = list(value)
assert len(value) == 3, "Expected 3-dim value, got %s" % value
self.model.light_specular[lightid] = value
def get_ambient(self, name):
"""
Grabs ambient attribute of a specific light source
Args:
name (str): Name of the lighting source
Returns:
np.array: (r,g,b) ambient color of lighting source
Raises:
AssertionError: Invalid light name
"""
lightid = self.get_lightid(name)
assert lightid > -1, "Unkwnown light %s" % name
return self.model.light_ambient[lightid]
def set_ambient(self, name, value):
"""
Sets ambient attribute of a specific light source
Args:
name (str): Name of the lighting source
value (np.array): (r,g,b) ambient color to set lighting source to
Raises:
AssertionError: Invalid light name
AssertionError: Invalid @value
"""
lightid = self.get_lightid(name)
assert lightid > -1, "Unkwnown light %s" % name
value = list(value)
assert len(value) == 3, "Expected 3-dim value, got %s" % value
self.model.light_ambient[lightid] = value
def get_diffuse(self, name):
"""
Grabs diffuse attribute of a specific light source
Args:
name (str): Name of the lighting source
Returns:
np.array: (r,g,b) diffuse color of lighting source
Raises:
AssertionError: Invalid light name
"""
lightid = self.get_lightid(name)
assert lightid > -1, "Unkwnown light %s" % name
return self.model.light_diffuse[lightid]
def set_diffuse(self, name, value):
"""
Sets diffuse attribute of a specific light source
Args:
name (str): Name of the lighting source
value (np.array): (r,g,b) diffuse color to set lighting source to
Raises:
AssertionError: Invalid light name
AssertionError: Invalid @value
"""
lightid = self.get_lightid(name)
assert lightid > -1, "Unkwnown light %s" % name
value = list(value)
assert len(value) == 3, "Expected 3-dim value, got %s" % value
self.model.light_diffuse[lightid] = value
def get_lightid(self, name):
"""
Grabs unique id number of a specific light source
Args:
name (str): Name of the lighting source
Returns:
int: id of lighting source. -1 if not found
"""
return self.model.light_name2id(name)
class CameraModder(BaseModder):
"""
Modder for modifying camera attributes in mujoco sim
Args:
sim (MjSim): MjSim object
random_state (None or RandomState): instance of np.random.RandomState
camera_names (None or list of str): list of camera names to use for randomization. If not provided,
all cameras are used for randomization.
randomize_position (bool): if True, randomize camera position
randomize_rotation (bool): if True, randomize camera rotation
randomize_fovy (bool): if True, randomize camera fovy
position_perturbation_size (float): size of camera position perturbations to each dimension
rotation_perturbation_size (float): magnitude of camera rotation perturbations in axis-angle.
Default corresponds to around 5 degrees.
fovy_perturbation_size (float): magnitude of camera fovy perturbations (corresponds to focusing)
Raises:
AssertionError: [No randomization selected]
"""
def __init__(
self,
sim,
random_state=None,
camera_names=None,
randomize_position=True,
randomize_rotation=True,
randomize_fovy=True,
position_perturbation_size=0.01,
rotation_perturbation_size=0.087,
fovy_perturbation_size=5.,
):
super().__init__(sim, random_state=random_state)
assert randomize_position or randomize_rotation or randomize_fovy
if camera_names is None:
camera_names = self.sim.model.camera_names
self.camera_names = camera_names
self.randomize_position = randomize_position
self.randomize_rotation = randomize_rotation
self.randomize_fovy = randomize_fovy
self.position_perturbation_size = position_perturbation_size
self.rotation_perturbation_size = rotation_perturbation_size
self.fovy_perturbation_size = fovy_perturbation_size
self.save_defaults()
def save_defaults(self):
"""
Uses the current MjSim state and model to save default parameter values.
"""
self._defaults = { k : {} for k in self.camera_names }
for camera_name in self.camera_names:
self._defaults[camera_name]['pos'] = np.array(self.get_pos(camera_name))
self._defaults[camera_name]['quat'] = np.array(self.get_quat(camera_name))
self._defaults[camera_name]['fovy'] = self.get_fovy(camera_name)
def restore_defaults(self):
"""
Reloads the saved parameter values.
"""
for camera_name in self.camera_names:
self.set_pos(camera_name, self._defaults[camera_name]['pos'])
self.set_quat(camera_name, self._defaults[camera_name]['quat'])
self.set_fovy(camera_name, self._defaults[camera_name]['fovy'])
def randomize(self):
"""
Randomizes all requested camera values within the sim
"""
for camera_name in self.camera_names:
if self.randomize_position:
self._randomize_position(camera_name)
if self.randomize_rotation:
self._randomize_rotation(camera_name)
if self.randomize_fovy:
self._randomize_fovy(camera_name)
def _randomize_position(self, name):
"""
Helper function to randomize position of a specific camera
Args:
name (str): Name of the camera to randomize for
"""
delta_pos = self.random_state.uniform(
low=-self.position_perturbation_size,
high=self.position_perturbation_size,
size=3,
)
self.set_pos(
name,
self._defaults[name]['pos'] + delta_pos,
)
def _randomize_rotation(self, name):
"""
Helper function to randomize orientation of a specific camera
Args:
name (str): Name of the camera to randomize for
"""
# sample a small, random axis-angle delta rotation
random_axis, random_angle = trans.random_axis_angle(angle_limit=self.rotation_perturbation_size, random_state=self.random_state)
random_delta_rot = trans.quat2mat(trans.axisangle2quat(random_axis * random_angle))
# compute new rotation and set it
base_rot = trans.quat2mat(trans.convert_quat(self._defaults[name]['quat'], to='xyzw'))
new_rot = random_delta_rot.T.dot(base_rot)
new_quat = trans.convert_quat(trans.mat2quat(new_rot), to='wxyz')
self.set_quat(
name,
new_quat,
)
def _randomize_fovy(self, name):
"""
Helper function to randomize fovy of a specific camera
Args:
name (str): Name of the camera to randomize for
"""
delta_fovy = self.random_state.uniform(
low=-self.fovy_perturbation_size,
high=self.fovy_perturbation_size,
)
self.set_fovy(
name,
self._defaults[name]['fovy'] + delta_fovy,
)
def get_fovy(self, name):
"""
Grabs fovy of a specific camera
Args:
name (str): Name of the camera
Returns:
float: vertical field of view of the camera, expressed in degrees
Raises:
AssertionError: Invalid camera name
"""
camid = self.get_camid(name)
assert camid > -1, "Unknown camera %s" % name
return self.model.cam_fovy[camid]
def set_fovy(self, name, value):
"""
Sets fovy of a specific camera
Args:
name (str): Name of the camera
value (float): vertical field of view of the camera, expressed in degrees
Raises:
AssertionError: Invalid camera name
AssertionError: Invalid value
"""
camid = self.get_camid(name)
assert 0 < value < 180
assert camid > -1, "Unknown camera %s" % name
self.model.cam_fovy[camid] = value
def get_quat(self, name):
"""
Grabs orientation of a specific camera
Args:
name (str): Name of the camera
Returns:
np.array: (w,x,y,z) orientation of the camera, expressed in quaternions
Raises:
AssertionError: Invalid camera name
"""
camid = self.get_camid(name)
assert camid > -1, "Unknown camera %s" % name
return self.model.cam_quat[camid]
def set_quat(self, name, value):
"""
Sets orientation of a specific camera
Args:
name (str): Name of the camera
value (np.array): (w,x,y,z) orientation of the camera, expressed in quaternions
Raises:
AssertionError: Invalid camera name
AssertionError: Invalid value
"""
value = list(value)
assert len(value) == 4, (
"Expectd value of length 4, instead got %s" % value)
camid = self.get_camid(name)
assert camid > -1, "Unknown camera %s" % name
self.model.cam_quat[camid] = value
def get_pos(self, name):
"""
Grabs position of a specific camera
Args:
name (str): Name of the camera
Returns:
np.array: (x,y,z) position of the camera
Raises:
AssertionError: Invalid camera name
"""
camid = self.get_camid(name)
assert camid > -1, "Unknown camera %s" % name
return self.model.cam_pos[camid]
def set_pos(self, name, value):
"""
Sets position of a specific camera
Args:
name (str): Name of the camera
value (np.array): (x,y,z) position of the camera
Raises:
AssertionError: Invalid camera name
AssertionError: Invalid value
"""
value = list(value)
assert len(value) == 3, (
"Expected value of length 3, instead got %s" % value)
camid = self.get_camid(name)
assert camid > -1
self.model.cam_pos[camid] = value
def get_camid(self, name):
"""
Grabs unique id number of a specific camera
Args:
name (str): Name of the camera
Returns:
int: id of camera. -1 if not found
"""
return self.model.camera_name2id(name)
class TextureModder(BaseModder):
"""
Modify textures in model. Example use:
sim = MjSim(...)
modder = TextureModder(sim)
modder.whiten_materials() # ensures materials won't impact colors
modder.set_checker('some_geom', (255, 0, 0), (0, 0, 0))
modder.rand_all('another_geom')
Note: in order for the textures to take full effect, you'll need to set
the rgba values for all materials to [1, 1, 1, 1], otherwise the texture
colors will be modulated by the material colors. Call the
`whiten_materials` helper method to set all material colors to white.
Args:
sim (MjSim): MjSim object
random_state (RandomState): instance of np.random.RandomState
geom_names ([string]): list of geom names to use for randomization. If not provided,
all geoms are used for randomization.
randomize_local (bool): if True, constrain RGB color variations to be close to the
original RGB colors per geom and texture. Otherwise, RGB color values will
be sampled uniformly at random.
randomize_material (bool): if True, randomizes material properties associated with a
given texture (reflectance, shininess, specular)
local_rgb_interpolation (float): determines the size of color variations from
the base geom colors when @randomize_local is True.
local_material_interpolation (float): determines the size of material variations from
the base material when @randomize_local and @randomize_material are both True.
texture_variations (list of str): a list of texture variation strings. Each string
must be either 'rgb', 'checker', 'noise', or 'gradient' and corresponds to
a specific kind of texture randomization. For each geom that has a material
and texture, a random variation from this list is sampled and applied.
randomize_skybox (bool): if True, apply texture variations to the skybox as well.
"""
def __init__(
self,
sim,
random_state=None,
geom_names=None,
randomize_local=False,
randomize_material=False,
local_rgb_interpolation=0.1,
local_material_interpolation=0.2,
texture_variations=('rgb', 'checker', 'noise', 'gradient'),
randomize_skybox=True,
):
super().__init__(sim, random_state=random_state)
if geom_names is None:
geom_names = self.sim.model.geom_names
self.geom_names = geom_names
self.randomize_local = randomize_local
self.randomize_material = randomize_material
self.local_rgb_interpolation = local_rgb_interpolation
self.local_material_interpolation = local_material_interpolation
self.texture_variations = list(texture_variations)
self.randomize_skybox = randomize_skybox
self._all_texture_variation_callbacks = {
'rgb' : self.rand_rgb,
'checker' : self.rand_checker,
'noise' : self.rand_noise,
'gradient' : self.rand_gradient,
}
self._texture_variation_callbacks = {
k : self._all_texture_variation_callbacks[k]
for k in self.texture_variations
}
self.save_defaults()
def save_defaults(self):
"""
Uses the current MjSim state and model to save default parameter values.
"""
self.textures = [Texture(self.model, i)
for i in range(self.model.ntex)]
# self._build_tex_geom_map()
# save copy of original texture bitmaps
self._default_texture_bitmaps = [np.array(text.bitmap) for text in self.textures]
# These matrices will be used to rapidly synthesize
# checker pattern bitmaps
self._cache_checker_matrices()
self._defaults = { k : {} for k in self.geom_names }
if self.randomize_skybox:
self._defaults['skybox'] = {}
for name in self.geom_names:
if self._check_geom_for_texture(name):
# store the texture bitmap for this geom
tex_id = self._name_to_tex_id(name)
self._defaults[name]['texture'] = self._default_texture_bitmaps[tex_id]
# store material properties as well (in tuple (reflectance, shininess, specular) form)
self._defaults[name]['material'] = self.get_material(name)
else:
# store geom color
self._defaults[name]['rgb'] = np.array(self.get_geom_rgb(name))
if self.randomize_skybox:
tex_id = self._name_to_tex_id('skybox')
self._defaults['skybox']['texture'] = self._default_texture_bitmaps[tex_id]
def restore_defaults(self):
"""
Reloads the saved parameter values.
"""
for name in self.geom_names:
if self._check_geom_for_texture(name):
self.set_texture(name, self._defaults[name]['texture'], perturb=False)
self.set_material(name, self._defaults[name]['material'], perturb=False)
else:
self.set_geom_rgb(name, self._defaults[name]['rgb'])
if self.randomize_skybox:
self.set_texture('skybox', self._defaults['skybox']['texture'], perturb=False)
def randomize(self):
"""
Overrides mujoco-py implementation to also randomize color
for geoms that have no material.
"""
self.whiten_materials()
for name in self.geom_names:
if self._check_geom_for_texture(name):
# geom has valid texture that can be randomized
self._randomize_texture(name)
# randomize material if requested
if self.randomize_material:
self._randomize_material(name)
else:
# randomize geom color
self._randomize_geom_color(name)
if self.randomize_skybox:
self._randomize_texture("skybox")
def _randomize_geom_color(self, name):
"""
Helper function to randomize color of a specific geom
Args:
name (str): Name of the geom to randomize for
"""
if self.randomize_local:
random_color = self.random_state.uniform(0, 1, size=3)
rgb = (1. - self.local_rgb_interpolation) * self._defaults[name]['rgb'] + self.local_rgb_interpolation * random_color
else:
rgb = self.random_state.uniform(0, 1, size=3)
self.set_geom_rgb(name, rgb)
def _randomize_texture(self, name):
"""
Helper function to randomize texture of a specific geom
Args:
name (str): Name of the geom to randomize for
"""
keys = list(self._texture_variation_callbacks.keys())
choice = keys[self.random_state.randint(len(keys))]
self._texture_variation_callbacks[choice](name)
def _randomize_material(self, name):
"""
Helper function to randomize material of a specific geom
Args:
name (str): Name of the geom to randomize for
"""
# Return immediately if this is the skybox
if name == 'skybox':
return
# Grab material id
mat_id = self._name_to_mat_id(name)
# Randomize reflectance, shininess, and specular
material = self.random_state.uniform(0, 1, size=3) # (reflectance, shininess, specular)
self.set_material(name, material, perturb=self.randomize_local)
def rand_checker(self, name):
"""
Generates a random checker pattern for a specific geom
Args:
name (str): Name of the geom to randomize for
"""
rgb1, rgb2 = self.get_rand_rgb(2)
self.set_checker(name, rgb1, rgb2, perturb=self.randomize_local)
def rand_gradient(self, name):
"""
Generates a random gradient pattern for a specific geom
Args:
name (str): Name of the geom to randomize for
"""
rgb1, rgb2 = self.get_rand_rgb(2)
vertical = bool(self.random_state.uniform() > 0.5)
self.set_gradient(name, rgb1, rgb2, vertical=vertical, perturb=self.randomize_local)
def rand_rgb(self, name):
"""
Generates a random RGB color for a specific geom
Args:
name (str): Name of the geom to randomize for
"""
rgb = self.get_rand_rgb()
self.set_rgb(name, rgb, perturb=self.randomize_local)
def rand_noise(self, name):
"""
Generates a random RGB noise pattern for a specific geom
Args:
name (str): Name of the geom to randomize for
"""
fraction = 0.1 + self.random_state.uniform() * 0.8
rgb1, rgb2 = self.get_rand_rgb(2)
self.set_noise(name, rgb1, rgb2, fraction, perturb=self.randomize_local)
def whiten_materials(self):
"""
Extends modder.TextureModder to also whiten geom_rgba
Helper method for setting all material colors to white, otherwise
the texture modifications won't take full effect.
"""
for name in self.geom_names:
# whiten geom
geom_id = self.model.geom_name2id(name)
self.model.geom_rgba[geom_id, :] = 1.0
if self._check_geom_for_texture(name):
# whiten material
mat_id = self.model.geom_matid[geom_id]
self.model.mat_rgba[mat_id, :] = 1.0
def get_geom_rgb(self, name):
"""
Grabs rgb color of a specific geom
Args:
name (str): Name of the geom
Returns:
np.array: (r,g,b) geom colors
"""
geom_id = self.model.geom_name2id(name)
return self.model.geom_rgba[geom_id, :3]
def set_geom_rgb(self, name, rgb):
"""
Sets rgb color of a specific geom
Args:
name (str): Name of the geom
rgb (np.array): (r,g,b) geom colors
"""
geom_id = self.model.geom_name2id(name)
self.model.geom_rgba[geom_id, :3] = rgb
def get_rand_rgb(self, n=1):
"""
Grabs a batch of random rgb tuple combos
Args:
n (int): How many sets of rgb tuples to randomly generate
Returns:
np.array or n-tuple: if n > 1, each tuple entry is a rgb tuple. else, single (r,g,b) array
"""
def _rand_rgb():
return np.array(self.random_state.uniform(size=3) * 255,
dtype=np.uint8)
if n == 1:
return _rand_rgb()
else:
return tuple(_rand_rgb() for _ in range(n))
def get_texture(self, name):
"""
Grabs texture of a specific geom
Args:
name (str): Name of the geom
Returns:
Texture: texture associated with the geom
"""
tex_id = self._name_to_tex_id(name)
texture = self.textures[tex_id]
return texture
def set_texture(self, name, bitmap, perturb=False):
"""
Sets the bitmap for the texture that corresponds
to geom @name.
If @perturb is True, then use the computed bitmap
to perturb the default bitmap slightly, instead
of replacing it.
Args:
name (str): Name of the geom
bitmap (np.array): 3d-array representing rgb pixel-wise values
perturb (bool): Whether to perturb the inputted bitmap or not
"""
bitmap_to_set = self.get_texture(name).bitmap
if perturb:
bitmap = (1. - self.local_rgb_interpolation) * self._defaults[name]['texture'] + self.local_rgb_interpolation * bitmap
bitmap_to_set[:] = bitmap
self.upload_texture(name)
def get_material(self, name):
"""
Grabs material of a specific geom
Args:
name (str): Name of the geom
Returns:
np.array: (reflectance, shininess, specular) material properties associated with the geom
"""
mat_id = self._name_to_mat_id(name)
# Material is in tuple form (reflectance, shininess, specular)
material = np.array((self.model.mat_reflectance[mat_id],
self.model.mat_shininess[mat_id],
self.model.mat_specular[mat_id]))
return material
def set_material(self, name, material, perturb=False):
"""
Sets the material that corresponds to geom @name.
If @perturb is True, then use the computed material
to perturb the default material slightly, instead
of replacing it.
Args:
name (str): Name of the geom
material (np.array): (reflectance, shininess, specular) material properties associated with the geom
perturb (bool): Whether to perturb the inputted material properties or not
"""
mat_id = self._name_to_mat_id(name)
if perturb:
material = (1. - self.local_material_interpolation) * self._defaults[name]['material'] + \
self.local_material_interpolation * material
self.model.mat_reflectance[mat_id] = material[0]
self.model.mat_shininess[mat_id] = material[1]
self.model.mat_specular[mat_id] = material[2]
def get_checker_matrices(self, name):
"""
Grabs checker pattern matrix associated with @name.
Args:
name (str): Name of geom
Returns:
np.array: 3d-array representing rgb checker pattern
"""
tex_id = self._name_to_tex_id(name)
return self._texture_checker_mats[tex_id]
def set_checker(self, name, rgb1, rgb2, perturb=False):
"""
Use the two checker matrices to create a checker
pattern from the two colors, and set it as
the texture for geom @name.
Args:
name (str): Name of geom
rgb1 (3-array): (r,g,b) value for one half of checker pattern
rgb2 (3-array): (r,g,b) value for other half of checker pattern
perturb (bool): Whether to perturb the resulting checker pattern or not
"""
cbd1, cbd2 = self.get_checker_matrices(name)
rgb1 = np.asarray(rgb1).reshape([1, 1, -1])
rgb2 = np.asarray(rgb2).reshape([1, 1, -1])
bitmap = rgb1 * cbd1 + rgb2 * cbd2
self.set_texture(name, bitmap, perturb=perturb)
def set_gradient(self, name, rgb1, rgb2, vertical=True, perturb=False):
"""
Creates a linear gradient from rgb1 to rgb2.
Args:
name (str): Name of geom
rgb1 (3-array): start color
rgb2 (3- array): end color
vertical (bool): if True, the gradient in the positive
y-direction, if False it's in the positive x-direction.
perturb (bool): Whether to perturb the resulting gradient pattern or not
"""
# NOTE: MuJoCo's gradient uses a sigmoid. Here we simplify
# and just use a linear gradient... We could change this
# to just use a tanh-sigmoid if needed.
bitmap = self.get_texture(name).bitmap
h, w = bitmap.shape[:2]
if vertical:
p = np.tile(np.linspace(0, 1, h)[:, None], (1, w))
else:
p = np.tile(np.linspace(0, 1, w), (h, 1))
new_bitmap = np.zeros_like(bitmap)
for i in range(3):
new_bitmap[..., i] = rgb2[i] * p + rgb1[i] * (1.0 - p)
self.set_texture(name, new_bitmap, perturb=perturb)
def set_rgb(self, name, rgb, perturb=False):
"""
Just set the texture bitmap for geom @name
to a constant rgb value.
Args:
name (str): Name of geom
rgb (3-array): desired (r,g,b) color
perturb (bool): Whether to perturb the resulting color pattern or not
"""
bitmap = self.get_texture(name).bitmap
new_bitmap = np.zeros_like(bitmap)
new_bitmap[..., :] = np.asarray(rgb)
self.set_texture(name, new_bitmap, perturb=perturb)
def set_noise(self, name, rgb1, rgb2, fraction=0.9, perturb=False):
"""
Sets the texture bitmap for geom @name to a noise pattern
Args:
name (str): name of geom
rgb1 (3-array): background color
rgb2 (3-array): color of random noise foreground color
fraction (float): fraction of pixels with foreground color
perturb (bool): Whether to perturb the resulting color pattern or not
"""
bitmap = self.get_texture(name).bitmap
h, w = bitmap.shape[:2]
mask = self.random_state.uniform(size=(h, w)) < fraction
new_bitmap = np.zeros_like(bitmap)
new_bitmap[..., :] = np.asarray(rgb1)
new_bitmap[mask, :] = np.asarray(rgb2)
self.set_texture(name, new_bitmap, perturb=perturb)
def upload_texture(self, name):
"""
Uploads the texture to the GPU so it's available in the rendering.
Args:
name (str): name of geom
"""
texture = self.get_texture(name)
if not self.sim.render_contexts:
cymj.MjRenderContextOffscreen(self.sim)
for render_context in self.sim.render_contexts:
render_context.upload_texture(texture.id)
def _check_geom_for_texture(self, name):
"""
Helper function to determined if the geom @name has
an assigned material and that the material has
an assigned texture.
Args:
name (str): name of geom
Returns:
bool: True if specific geom has both material and texture associated, else False
"""
geom_id = self.model.geom_name2id(name)
mat_id = self.model.geom_matid[geom_id]
if mat_id < 0:
return False
tex_id = self.model.mat_texid[mat_id]
if tex_id < 0:
return False
return True
def _name_to_tex_id(self, name):
"""
Helper function to get texture id from geom name.
Args:
name (str): name of geom
Returns:
int: id of texture associated with geom
Raises:
AssertionError: [No texture associated with geom]
"""
# handle skybox separately
if name == 'skybox':
skybox_tex_id = -1
for tex_id in range(self.model.ntex):
skybox_textype = 2
if self.model.tex_type[tex_id] == skybox_textype:
skybox_tex_id = tex_id
assert skybox_tex_id >= 0
return skybox_tex_id
assert self._check_geom_for_texture(name)
geom_id = self.model.geom_name2id(name)
mat_id = self.model.geom_matid[geom_id]
tex_id = self.model.mat_texid[mat_id]
return tex_id
def _name_to_mat_id(self, name):
"""
Helper function to get material id from geom name.
Args:
name (str): name of geom
Returns:
int: id of material associated with geom
Raises:
ValueError: [No material associated with skybox]
AssertionError: [No material associated with geom]
"""
# handle skybox separately
if name == 'skybox':
raise ValueError("Error: skybox has no material!")
assert self._check_geom_for_texture(name)
geom_id = self.model.geom_name2id(name)
mat_id = self.model.geom_matid[geom_id]
return mat_id
def _cache_checker_matrices(self):
"""
Cache two matrices of the form [[1, 0, 1, ...],
[0, 1, 0, ...],
...]
and [[0, 1, 0, ...],
[1, 0, 1, ...],
...]
for each texture. To use for fast creation of checkerboard patterns
"""
self._texture_checker_mats = []
for tex_id in range(self.model.ntex):
texture = self.textures[tex_id]
h, w = texture.bitmap.shape[:2]
self._texture_checker_mats.append(self._make_checker_matrices(h, w))
def _make_checker_matrices(self, h, w):
"""
Helper function to quickly generate binary matrices used to create checker patterns
Args:
h (int): Desired height of matrices
w (int): Desired width of matrices
Returns:
2-tuple:
- (np.array): 2d-array representing first half of checker matrix
- (np.array): 2d-array representing second half of checker matrix
"""
re = np.r_[((w + 1) // 2) * [0, 1]]
ro = np.r_[((w + 1) // 2) * [1, 0]]
cbd1 = np.expand_dims(np.row_stack(((h + 1) // 2) * [re, ro]), -1)[:h, :w]
cbd2 = np.expand_dims(np.row_stack(((h + 1) // 2) * [ro, re]), -1)[:h, :w]
return cbd1, cbd2
# From mjtTexture
MJT_TEXTURE_ENUM = ['2d', 'cube', 'skybox']
class Texture:
"""
Helper class for operating on the MuJoCo textures.
Args:
model (MjModel): Mujoco sim model
tex_id (int): id of specific texture in mujoco sim
"""
__slots__ = ['id', 'type', 'height', 'width', 'tex_adr', 'tex_rgb']
def __init__(self, model, tex_id):
self.id = tex_id
self.type = MJT_TEXTURE_ENUM[model.tex_type[tex_id]]
self.height = model.tex_height[tex_id]
self.width = model.tex_width[tex_id]
self.tex_adr = model.tex_adr[tex_id]
self.tex_rgb = model.tex_rgb
@property
def bitmap(self):
"""
Grabs color bitmap associated with this texture from the mujoco sim.
Returns:
np.array: 3d-array representing the rgb texture bitmap
"""
size = self.height * self.width * 3
data = self.tex_rgb[self.tex_adr:self.tex_adr + size]
return data.reshape((self.height, self.width, 3))
class PhysicalParameterModder(BaseModder):
"""
Modder for various physical parameters of the mujoco model
can use to modify parameters stored in MjModel (ie friction, damping, etc.) as
well as optimizer parameters like global friction multipliers (eg solimp, solref, etc)
To modify a parameteter, use the parameter to be changed as a keyword argument to
self.mod and the new value as the value for that argument. Supports arbitray many
modifications in a single step.
:NOTE: It is necesary to perform sim.forward after performing the modification.
:NOTE: Some parameters might not be able to be changed. users are to verify that
after the call to forward that the parameter is indeed changed.
Args:
sim (MjSim): Mujoco sim instance
random_state (RandomState): instance of np.random.RandomState, specific
seed used to randomize these modifications without impacting other
numpy seeds / randomizations
"""
def __init__(self, sim, random_state=None):
super().__init__(sim=sim, random_state=random_state)
@property
def opt(self):
"""
Returns:
?: MjModel sim options
"""
return self.sim.model.opt
def __getattr__(self, name):
try:
opt_attr = getattr(self.opt, name)
except AttributeError:
opt_attr = None
try:
model_attr = getattr(self.model, name)
except AttributeError:
model_attr = None
ret = opt_attr if opt_attr is not None else model_attr
if callable(ret):
def r(*args):
return ret(*args)
return r
return ret
def mod(self, **kwargs):
"""
Method to actually mod. Assumes passing in keyword arguments with key being the parameter to
modify and the value being the value to set
Feel free to add more as we see fit.
Args:
**kwargs (dict): Physical parameters to actually modify mid-sim
"""
for to_mod in kwargs:
val = kwargs[to_mod]
param = to_mod
ind = None
if 'geom_friction' in param:
joint = param.replace('_geom_friction', '')
ind = self.geom_name2id(joint)
param = 'geom_friction'
elif 'dof_damping' in param:
joint = param.replace('_dof_damping', '')
param = 'dof_damping'
joint = self.joint_name2id(joint)
ind = np.zeros(self.nv)
for i in range(self.model.nv):
if self.dof_jntid[i] == joint:
ind[i] = 1
if ind is None:
setattr(self, param, val)
else:
self.__getattr__(param)[ind] = val
|
the-stack_106_22457 | # Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from orquesta import statuses
from orquesta.tests.unit.conducting.native import base
class CyclicWorkflowConductorTest(base.OrchestraWorkflowConductorTest):
def test_cycle(self):
wf_name = "cycle"
expected_task_seq = [
"prep",
"task1",
"task2",
"task3",
"task1",
"task2",
"task3",
"task1",
"task2",
"task3",
]
self.assert_spec_inspection(wf_name)
self.assert_conducting_sequences(wf_name, expected_task_seq)
def test_cycles(self):
wf_name = "cycles"
expected_task_seq = [
"prep",
"task1",
"task2",
"task3",
"task4",
"task2",
"task5",
"task1",
"task2",
"task3",
"task4",
"task2",
"task5",
"task1",
"task2",
"task3",
"task4",
"task2",
"task5",
]
self.assert_spec_inspection(wf_name)
self.assert_conducting_sequences(wf_name, expected_task_seq)
def test_rollback_retry(self):
wf_name = "rollback-retry"
expected_task_seq = ["init", "check", "create", "rollback", "check", "delete"]
mock_statuses = [
statuses.SUCCEEDED, # init
statuses.FAILED, # check
statuses.SUCCEEDED, # create
statuses.SUCCEEDED, # rollback
statuses.SUCCEEDED, # check
statuses.SUCCEEDED, # delete
]
self.assert_spec_inspection(wf_name)
self.assert_conducting_sequences(wf_name, expected_task_seq, mock_statuses=mock_statuses)
def test_cycle_and_fork(self):
wf_name = "cycle-fork"
expected_task_seq = [
"init",
"query",
"decide_cheer",
"decide_work",
"cheer",
"notify_work",
"toil",
"query",
"decide_cheer",
"decide_work",
]
mock_results = [
None, # init
True, # query
None, # decide_cheer
None, # decide_work
None, # cheer
None, # notify_work
None, # toil
False, # query
None, # decide_cheer
None, # decide_work
]
self.assert_spec_inspection(wf_name)
self.assert_conducting_sequences(wf_name, expected_task_seq, mock_results=mock_results)
|
the-stack_106_22458 | import warnings
class OnceTrigger(object):
"""Trigger based on the starting point of the iteration.
This trigger accepts only once at starting point of the iteration. There
are two ways to specify the starting point: only starting point in whole
iteration or called again when training resumed.
Args:
call_on_resume (bool): Whether the extension is called again or not
when restored from a snapshot. It is set to ``False`` by default.
"""
def __init__(self, call_on_resume=False):
self._flag_first = True
self._flag_resumed = call_on_resume
def __call__(self, trainer):
flag = self._flag_first or self._flag_resumed
self._flag_resumed = False
self._flag_first = False
return flag
def serialize(self, serializer):
try:
self._flag_first = serializer('_flag_first', self._flag_first)
except KeyError:
warnings.warn(
'The flag is not saved.'
'OnceTrigger guess it is not first when resumed.'
'If this trigger is resumed before first called,'
'it may not work correctly.')
self._flag_first = False
|
the-stack_106_22459 | # -*- coding: UTF8 -*-
from pupylib.PupyModule import *
__class_name__="GetPrivsModule"
@config(compat=["windows"], cat="manage")
class GetPrivsModule(PupyModule):
""" try to get SeDebugPrivilege for the current process """
dependencies=["psutil", "pupwinutils.security"]
def init_argparse(self):
self.arg_parser = PupyArgumentParser(prog="getprivs", description=self.__doc__)
def run(self, args):
self.client.conn.modules["pupwinutils.security"].EnablePrivilege("SeDebugPrivilege")
self.success("SeDebugPrivilege enabled !")
|
the-stack_106_22461 | import serial
import time
class BaseCtrl:
COMMAND_DELAY = 0.05
def __init__(self, app, device='/dev/ttyUSB0', baudrate=9600, channels=8):
self.verify_wait = -1
self.channels = channels
self.log = app.log
self.log.info("Init %s(%s, baudrate: %d)", self.__class__.__name__, device, baudrate)
self.port = serial.Serial(device,
baudrate=baudrate,
bytesize=8,
parity='N',
stopbits=1,
timeout=1)
self.port.reset_input_buffer()
self._init()
def _init(self):
pass
def set_verify(self, wait):
self.verify_wait= wait
if wait >= 0:
self.port.reset_input_buffer()
def write(self, cmd):
self.log.debug("Write: %s", cmd)
data = cmd.encode()
self.port.write(data)
def read(self, size=0):
if size > 0:
data = self.port.read(size)
else:
data = self.port.readline()
self.log.debug("Read: %s", data)
return data
def set(self, channel, bright):
self.log.error("set({}, {}): to be implemented".format(channel, bright))
def on(self, channel=0):
self.log.error("on({}): to be implemented".format(channel))
def off(self, channel=0):
self.log.error("off({}): to be implemented".format(channel))
|
the-stack_106_22463 | import logging
from time import sleep
from typing import List
from django.utils import timezone
from river.adapters.progression_counter import ProgressionCounter
from river.adapters.topics import TopicsManager
from river.models import Batch
logger = logging.getLogger(__name__)
def teardown_after_batch(batch: Batch, topics: TopicsManager):
for base_topic in ["batch", "extract", "transform", "load"]:
topics.delete(f"{base_topic}.{batch.id}")
def clean(counter: ProgressionCounter, topics: TopicsManager):
current_batches = Batch.objects.filter(completed_at__isnull=True, canceled_at__isnull=True).prefetch_related(
"resources"
)
batches_to_delete: List[Batch] = []
for batch in current_batches:
resources_progressions = [counter.get(f"{batch.id}:{resource.id}") for resource in batch.resources.all()]
if all(
[
progression is not None
and progression.extracted is not None
and ((progression.loaded or 0) + (progression.failed or 0)) >= progression.extracted
for progression in resources_progressions
]
):
batches_to_delete.append(batch)
if batches_to_delete:
logger.info(f"Deleting batches: {batches_to_delete}.")
for batch in batches_to_delete:
teardown_after_batch(batch, topics)
batch.completed_at = timezone.now()
batch.save()
logger.info(f"Batch {batch} deleted.")
def run(counter: ProgressionCounter, topics: TopicsManager):
while True:
clean(counter=counter, topics=topics)
sleep(10)
|
the-stack_106_22464 | # 2021 - Borworntat Dendumrongkul
class Notify :
_version = "1.0.0"
_token = ""
def __init__(self, _token=""):
self.token = _token
def version(self):
return self._version
def setKey(self, token):
try:
self._token = token
return True
except:
return False
def send_sticker(self, pack, idx):
payload = {
'message': " ",
'stickerPackageId': pack,
'stickerId': idx
}
return self.send(payload)
def send_message(self, msg):
payload = { 'message': msg }
return self.send(payload)
def send_picture(self, url):
payload = {
'message': " ",
'imageThumbnail': url,
'imageFullsize': url
}
return self.send(payload)
def send(self, payload, file=None):
if self._token != "":
import requests
url = "https://notify-api.line.me/api/notify"
tok = self._token
headers = {
"Authorization": "Bearer " + tok
}
r = requests.post(url, headers=headers, data=payload, files=file)
return True
else:
return False
|
the-stack_106_22465 | # -*- coding: utf-8 -*-
import os
from unittest import TestCase
from unittest.mock import patch, MagicMock
from pathlib import Path
from collections import namedtuple
from sfzlint.cli import sfzlint, sfzlist
from sfzlint import settings
fixture_dir = Path(__file__).parent / 'fixtures'
is_fs_case_insensitive = (
os.path.exists(__file__.upper()) and os.path.exists(__file__.lower()))
class ErrMsg(namedtuple('errmsg', (
'file', 'row', 'column', 'level', 'message'))):
def __new__(cls, file, row, column, l_m):
level, message = l_m.split(' ', 1)
return super().__new__(cls, file, row, column, level, message)
def patchargs(path, *args):
newargv = ['sfzlint', '--no-pickle', str(fixture_dir / path)] + list(args)
def wrapper(fn):
return patch('sys.argv', new=newargv)(fn)
return wrapper
class TestSFZLint(TestCase):
def tearDown(self):
# Ensure this does not get accidentally set
self.assertFalse(settings.pickle)
def assert_has_message(self, message, err_list):
msglen = len(message)
msgs = {e.message[:msglen] for e in err_list}
self.assertIn(message, msgs, f'{message} not in {err_list}')
@patchargs('basic/valid.sfz')
@patch('builtins.print')
def test_valid_file(self, print_mock):
sfzlint()
self.assertFalse(print_mock.called, print_mock.call_args_list)
@patchargs('basic/bad.sfz')
@patch('builtins.print')
def test_invalid_file(self, print_mock):
sfzlint()
self.assertTrue(print_mock.called)
calls = [ErrMsg(*a[0][0].split(':'))
for a in print_mock.call_args_list]
self.assert_has_message('unknown opcode', calls)
@patchargs('basic')
def test_lint_dir(self):
with patch('builtins.print') as print_mock:
sfzlint()
self.assertTrue(print_mock.called)
calls = [ErrMsg(*a[0][0].split(':'))
for a in print_mock.call_args_list]
self.assert_has_message('unknown opcode', calls)
@patchargs('include/inbadfile.sfz')
def test_include_parse_error(self):
with patch('builtins.print') as print_mock:
sfzlint()
self.assertTrue(print_mock.called)
calls = [ErrMsg(*a[0][0].split(':', 3))
for a in print_mock.call_args_list]
self.assert_has_message('error loading include', calls)
@patchargs('include/hasinc.sfz')
@patch('builtins.print')
def test_include_define(self, print_mock):
sfzlint()
self.assertFalse(print_mock.called, print_mock.call_args_list)
@patchargs('basic/valid.sfz', '--spec-version', 'v1')
@patch('builtins.print')
def test_spec_version(self, print_mock):
sfzlint()
self.assertTrue(print_mock.called)
calls = [ErrMsg(*a[0][0].split(':'))
for a in print_mock.call_args_list]
self.assert_has_message('header spec v2 not in', calls)
self.assert_has_message('opcode spec v2 is not', calls)
@patchargs('basic/nosample.sfz')
@patch('builtins.print')
def test_missing_sample(self, print_mock):
sfzlint()
self.assertTrue(print_mock.called)
calls = [ErrMsg(*a[0][0].split(':'))
for a in print_mock.call_args_list]
self.assert_has_message('file not found', calls)
@patchargs('basic/relsample.sfz')
def test_relative_path(self):
with patch('builtins.print') as print_mock:
sfzlint()
self.assertFalse(print_mock.called, print_mock.call_args_list)
@patchargs('basic/def_path.sfz')
def test_default_path(self):
with patch('builtins.print') as print_mock:
sfzlint()
self.assertFalse(print_mock.called, print_mock.call_args_list)
@patchargs('basic/badcase.sfz')
def test_bad_case(self):
with patch('builtins.print') as print_mock:
sfzlint()
self.assertTrue(print_mock.called)
calls = [ErrMsg(*a[0][0].split(':'))
for a in print_mock.call_args_list]
if (is_fs_case_insensitive):
self.assert_has_message('case does not match', calls)
else:
self.assert_has_message('file not found', calls)
@patchargs('include/sub/relpath.sfz',
'--rel-path', str(fixture_dir / 'include'))
def test_rel_path(self):
with patch('builtins.print') as print_mock:
sfzlint()
self.assertFalse(print_mock.called, print_mock.call_args_list)
@patchargs('aria_program.xml')
def test_xml_with_defines(self):
with patch('builtins.print') as print_mock:
sfzlint()
self.assertTrue(print_mock.called)
calls = [ErrMsg(*a[0][0].split(':'))
for a in print_mock.call_args_list]
self.assertEqual(1, len(calls), calls)
self.assertIn('foo', calls[0].message)
class TestSFZList(TestCase):
@patch('sys.argv', new=['sfzlist', '--no-pickle'])
def test_valid_file(self):
print_mock = MagicMock()
sfzlist(print_mock)
self.assertTrue(print_mock.called)
opcodes = {line[0][0].split(' ', 1)[0]
for line in print_mock.call_args_list}
for test_opcode in ('cutoff2_onccN', 'sample', '*_mod', 'curve_index'):
self.assertIn(test_opcode, opcodes)
@patch('sys.argv', new=[
'sfzlist', '--no-pickle', '--path', str(fixture_dir / 'basic')])
def test_path_dir(self):
print_mock = MagicMock()
sfzlist(print_mock)
self.assertTrue(print_mock.called)
opcodes = {line[0][0].split(' ', 1)[0]
for line in print_mock.call_args_list}
for test_opcode in ('foo', 'sw_default', 'amp_velcurve_N'):
self.assertIn(test_opcode, opcodes)
for test_opcode in ('cutoff2_onccN', 'curve_index', '*_mod'):
self.assertNotIn(test_opcode, opcodes)
|
the-stack_106_22468 | #-*-coding:utf-8-*-
"""
@FileName:observation.py
@Description:
@Author:qiwenhao
@Time:2021/5/12 20:11
@Department:AIStudio研发部
@Copyright:©2011-2021 北京华如科技股份有限公司
@Project:601
"""
_OBSINIT = None
class ObservationProcessor(object):
# 解析数据包
@staticmethod
def get_obs(Data):
"""
解析状态信息并组包
:param Data: bvrsim 返回的态势信息
:return: 解析后的 bvrsim 态势信息,格式为dict
"""
# 初始化obs数据结构
if Data is None:
print("从引擎中获取的数据为空!")
return None
obs = {side: dict(platforminfos=[], trackinfos=[], missileinfos=[]) for side in ['blue', 'red']}
obs["sim_time"] = Data.CurTime # 仿真时间
obs["xsim_tag"] = Data.XSimTag # 引擎标识
data = Data.IdenInfos
try:
for info in data:
# 己方作战平台信息解析
for platforminfo in info.PlatformInfos:
if platforminfo.Identification == "红方":
pl_side = "red"
elif platforminfo.Identification == "蓝方":
pl_side = "blue"
else:
continue
obs[pl_side]['platforminfos'].append(
dict(
Name=platforminfo.Name, # 飞机的名称
Identification=platforminfo.Identification, # 飞机的标识符(表示飞机是红方还是蓝方)
ID=platforminfo.ID, # 飞机的ID(表示飞机的唯一编号)
Type=platforminfo.Type, # 飞机的类型(表示飞机的类型,其中有人机类型为 1,无人机类型为2)
Availability=platforminfo.Availability, # 飞机的可用性(表示飞机的可用性,范围为0到1,为1表示飞机存活,0表示飞机阵亡)
X=platforminfo.X, # 飞机的当前X坐标(表示飞机的X坐标)
Y=platforminfo.Y, # 飞机的当前Y坐标(表示飞机的Y坐标)
Lon=platforminfo.Lon, # 飞机的当前所在经度(表示飞机的所在经度)
Lat=platforminfo.Lat, # 飞机的当前所在纬度(表示飞机的所在纬度)
Alt=platforminfo.Alt, # 飞机的当前所在高度(表示飞机的所在高度)
Heading=platforminfo.Heading, # 飞机的当前朝向角度(飞机的当前朝向,范围为-180°到180° 朝向0°表示正北,逆时针方向旋转为正数0°到180°,顺时针方向为正数0°到-180°)
Pitch=platforminfo.Pitch, # 飞机的当前俯仰角度(飞机的当前俯仰,范围为-90°到90°,朝向高处为正,低处为负)
Roll=platforminfo.Roll, # 飞机的当前滚转角度(飞机的当前滚转,范围为-180°到180° )
Speed=platforminfo.Speed, # 飞机的当前速度(飞机的当前速度)
CurTime=platforminfo.CurTime, # 当前时间(当前时间)
AccMag=platforminfo.AccMag, # 飞机的指令加速度(飞机的指令加速度)
NormalG=platforminfo.NormalG, # 飞机的指令过载(飞机的指令过载)
IsLocked=platforminfo.IsLocked, # 飞机是否被敌方导弹锁定(飞机是否被敌方导弹锁定)
Status=platforminfo.Status, # 飞机的当前状态(飞机的当前状态)
LeftWeapon=platforminfo.LeftWeapon # 飞机的当前剩余导弹数(飞机的当前剩余导弹数)
)
)
# 目标信息解析
for trackinfo in info.TargetInfos:
if trackinfo.Identification == "蓝方":
ti_side = "red"
elif trackinfo.Identification == "红方":
ti_side = "blue"
else:
continue
obs[ti_side]['trackinfos'].append(
dict(
Name=trackinfo.Name, # 敌方飞机的名称
Identification=trackinfo.Identification, # 敌方飞机的标识符(表示敌方飞机是红方还是蓝方)
ID=trackinfo.ID, # 敌方飞机的ID(表示飞机的唯一编号)
Type=trackinfo.Type, # 敌方飞机的类型(表示飞机的类型,其中有人机类型为 1,无人机类型为2)
Availability=trackinfo.Availability, # 敌方飞机的可用性(表示飞机的可用性,范围为0到1,为1表示飞机存活,0表示飞机阵亡)
X=trackinfo.X, # 敌方飞机的当前X坐标(表示飞机的X坐标)
Y=trackinfo.Y, # 敌方飞机的当前Y坐标(表示飞机的Y坐标)
Lon=trackinfo.Lon, # 敌方飞机的当前所在经度(表示飞机的所在经度)
Lat=trackinfo.Lat, # 敌方飞机的当前所在纬度(表示飞机的所在纬度)
Alt=trackinfo.Alt, # 敌方飞机的当前所在高度(表示飞机的所在高度)
Heading=trackinfo.Heading, # 敌方飞机的当前朝向角度(飞机的当前朝向,范围为-180°到180° 朝向0°表示正北,逆时针方向旋转为正数0°到180°,顺时针方向为正数0°到-180°)
Pitch=trackinfo.Pitch, # 敌方飞机的当前俯仰角度(飞机的当前俯仰,范围为-90°到90°,朝向高处为正,低处为负)
Roll=trackinfo.Roll, # 敌方飞机的当前滚转角度(飞机的当前滚转,范围为-180°到180° )
Speed=trackinfo.Speed, # 敌方飞机的当前速度(飞机的当前速度)
CurTime=trackinfo.CurTime, # 当前时间(当前时间)
IsLocked=trackinfo.IsLocked # 敌方飞机是否被敌方导弹锁定(飞机是否被己方导弹锁定)
)
)
# 来袭导弹信息解析
for missileinfo in info.MissileInfos:
if missileinfo.Identification == "红方":
mi_side = "blue"
elif missileinfo.Identification == "蓝方":
mi_side = "red"
else:
continue
obs[mi_side]['missileinfos'].append(
dict(
Name=missileinfo.Name, # 敌方导弹的名称
Identification=missileinfo.Identification, # 敌方导弹的标识符(表示敌方导弹是红方还是蓝方)
ID=missileinfo.ID, # 敌方导弹的ID(表示导弹的唯一编号)
Type=missileinfo.Type, # 敌方导弹的类型(表示导弹的类型,其中导弹类型为 3)
Availability=missileinfo.Availability, # 敌方导弹的可用性(表示导弹的可用性,范围为0到1,为1表示飞机存活,0表示导弹已爆炸)
X=missileinfo.X, # 敌方导弹的当前X坐标(表示导弹的X坐标)
Y=missileinfo.Y, # 敌方导弹的当前Y坐标(表示导弹的Y坐标)
Lon=missileinfo.Lon, # 敌方导弹的当前所在经度(表示导弹的所在经度)
Lat=missileinfo.Lat, # 敌方导弹的当前所在纬度(表示导弹的所在纬度)
Alt=missileinfo.Alt, # 敌方导弹的当前所在高度(表示导弹的所在高度)
Heading=missileinfo.Heading, # 敌方导弹的当前朝向角度(导弹的当前朝向,范围为-180°到180° 朝向0°表示正北,逆时针方向旋转为正数0°到180°,顺时针方向为正数0°到-180°)
Pitch=missileinfo.Pitch, # 敌方导弹的当前俯仰角度(导弹的当前俯仰,范围为-90°到90°,朝向高处为正,低处为负)
Roll=missileinfo.Roll, # 敌方导弹的当前滚转角度(导弹的当前滚转,范围为-180°到180° )
Speed=missileinfo.Speed, # 敌方导弹的当前速度(导弹的当前速度)
CurTime=missileinfo.CurTime, # 当前时间(当前时间)
LauncherID=missileinfo.LauncherID, # 敌方导弹的发射者ID(敌方导弹的发射者ID)
EngageTargetID=missileinfo.EngageTargetID # 敌方已发射导弹攻击目标的ID(我方已发射导弹攻击目标的ID)
)
)
# 友方导弹信息解析
for missileinfo in info.MissileInfos:
if missileinfo.Identification == "红方":
mi_side = "red"
elif missileinfo.Identification == "蓝方":
mi_side = "blue"
else:
continue
obs[mi_side]['missileinfos'].append(
dict(
Name=missileinfo.Name, # 我方已发射导弹的名称
Identification=missileinfo.Identification, # 我方已发射导弹的标识符(表示我方已发射导弹是红方还是蓝方)
ID=missileinfo.ID, # 我方已发射导弹的ID(表示导弹的唯一编号)
Type=missileinfo.Type, # 我方已发射导弹的类型(表示导弹的类型,其中导弹类型为 3)
Availability=missileinfo.Availability, # 我方已发射导弹的可用性(表示导弹的可用性,范围为0到1,为1表示飞机存活,0表示导弹已爆炸)
X=missileinfo.X, # 我方已发射导弹的当前X坐标(表示导弹的X坐标)
Y=missileinfo.Y, # 我方已发射导弹的当前Y坐标(表示导弹的Y坐标)
Lon=missileinfo.Lon, # 我方已发射导弹的当前所在经度(表示导弹的所在经度)
Lat=missileinfo.Lat, # 我方已发射导弹的当前所在纬度(表示导弹的所在纬度)
Alt=missileinfo.Alt, # 我方已发射导弹的当前所在高度(表示导弹的所在高度)
Heading=missileinfo.Heading, # 我方已发射导弹的当前朝向角度(导弹的当前朝向,范围为-180°到180° 朝向0°表示正北,逆时针方向旋转为正数0°到180°,顺时针方向为正数0°到-180°)
Pitch=missileinfo.Pitch, # 我方已发射导弹的当前俯仰角度(导弹的当前俯仰,范围为-90°到90°,朝向高处为正,低处为负)
Roll=missileinfo.Roll, # 我方已发射导弹的当前滚转角度(导弹的当前滚转,范围为-180°到180°)
Speed=missileinfo.Speed, # 我方已发射导弹的当前速度(导弹的当前速度)
CurTime=missileinfo.CurTime, # 当前时间(当前时间)
LauncherID=missileinfo.LauncherID, # 我方已发射导弹的发射者ID(我方已发射导弹的发射者ID)
EngageTargetID=missileinfo.EngageTargetID # 我方已发射导弹攻击目标的ID(我方已发射导弹攻击目标的ID)
)
)
global _OBSINIT
if _OBSINIT is None:
_OBSINIT = obs
except Exception as e:
print("解析数据异常~")
return obs
|
the-stack_106_22469 | """Utility functions for DBus use within Bluezero."""
# Standard libraries
import re
import subprocess
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
# D-Bus import
import dbus
import dbus.mainloop.glib
# python-bluezero constants import
from bluezero import constants
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
logger = logging.getLogger(__name__)
logger.setLevel(logging.WARNING)
logger.addHandler(NullHandler())
def bluez_version():
"""
get the version of the BlueZ daemon being used on the system
:return: String of BlueZ version
"""
p = subprocess.Popen(['bluetoothctl', '-v'], stdout=subprocess.PIPE)
ver = p.communicate()
return str(ver[0].decode().rstrip())
def bluez_experimental_mode():
"""
Return True if the BlueZ daemon service is in experimental mode
:return: True if experimental enabled
"""
status = subprocess.check_output('service bluetooth status', shell=True)
if re.search('--experimental', status.decode('utf-8')) is None:
return False
else:
return True
def interfaces_added(path, interfaces):
"""
Callback for when an interface is added
:param path:
:param interfaces:
:return:
"""
if constants.DEVICE_INTERFACE in interfaces:
logger.debug('Device added at {}'.format(path))
def properties_changed(interface, changed, invalidated, path):
"""
Callback for when properties are changed
:param interface:
:param changed:
:param invalidated:
:param path:
:return:
"""
if constants.DEVICE_INTERFACE in interface:
for prop in changed:
logger.debug(
'{}:{} Property {} new value {}'.format(interface,
path,
prop,
changed[prop]))
def get_dbus_obj(dbus_path):
"""
Get the the DBus object for the given path
:param dbus_path:
:return:
"""
bus = dbus.SystemBus()
return bus.get_object(constants.BLUEZ_SERVICE_NAME, dbus_path)
def get_dbus_iface(iface, dbus_obj):
"""
Return the DBus interface object for given interface and DBus object
:param iface:
:param dbus_obj:
:return:
"""
return dbus.Interface(dbus_obj, iface)
def get_managed_objects():
"""Return the objects currently managed by the DBus Object Manager."""
bus = dbus.SystemBus()
manager = dbus.Interface(bus.get_object(
constants.BLUEZ_SERVICE_NAME, '/'),
constants.DBUS_OM_IFACE)
return manager.GetManagedObjects()
def get_mac_addr_from_dbus_path(path):
"""Return the mac addres from a dev_XX_XX_XX_XX_XX_XX dbus path"""
return path.split("/")[-1].replace("dev_", '').replace("_", ":")
def _get_dbus_path2(objects, parent_path, iface_in, prop, value):
"""
Find DBus path for given DBus interface with property of a given value.
:param objects: Dictionary of objects to search
:param parent_path: Parent path to include in search
:param iface_in: The interface of interest
:param prop: The property to search for
:param value: The value of the property being searched for
:return: Path of object searched for
"""
if parent_path is None:
return None
for path, iface in objects.items():
props = iface.get(iface_in)
if props is None:
continue
if props[prop].lower() == value.lower() and \
path.startswith(parent_path):
return path
return None
def get_dbus_path(adapter=None,
device=None,
service=None,
characteristic=None,
descriptor=None):
"""
Return a DBus path for the given properties
:param adapter: Adapter address
:param device: Device address
:param service: GATT Service UUID
:param characteristic: GATT Characteristic UUID
:param descriptor: GATT Descriptor UUID
:return: DBus path
"""
bus = dbus.SystemBus()
manager = dbus.Interface(
bus.get_object(constants.BLUEZ_SERVICE_NAME, '/'),
constants.DBUS_OM_IFACE)
mngd_objs = manager.GetManagedObjects()
_dbus_obj_path = None
if adapter is not None:
_dbus_obj_path = _get_dbus_path2(mngd_objs,
'/org/bluez',
constants.ADAPTER_INTERFACE,
'Address',
adapter)
if device is not None:
_dbus_obj_path = _get_dbus_path2(mngd_objs,
_dbus_obj_path,
constants.DEVICE_INTERFACE,
'Address',
device)
if service is not None:
_dbus_obj_path = _get_dbus_path2(mngd_objs,
_dbus_obj_path,
constants.GATT_SERVICE_IFACE,
'UUID',
service)
if characteristic is not None:
_dbus_obj_path = _get_dbus_path2(mngd_objs,
_dbus_obj_path,
constants.GATT_CHRC_IFACE,
'UUID',
characteristic)
if descriptor is not None:
_dbus_obj_path = _get_dbus_path2(mngd_objs,
_dbus_obj_path,
constants.GATT_DESC_IFACE,
'UUID',
descriptor)
return _dbus_obj_path
def get_profile_path(adapter,
device,
profile):
"""
Return a DBus path for the given properties
:param adapter: Adapter address
:param device: Device address
:param profile:
:return:
"""
bus = dbus.SystemBus()
manager = dbus.Interface(
bus.get_object(constants.BLUEZ_SERVICE_NAME, '/'),
constants.DBUS_OM_IFACE)
mngd_objs = manager.GetManagedObjects()
_dbus_obj_path = None
if adapter is not None:
_dbus_obj_path = _get_dbus_path2(mngd_objs,
'/org/bluez',
constants.ADAPTER_INTERFACE,
'Address',
adapter)
if device is not None:
_dbus_obj_path = _get_dbus_path2(mngd_objs,
_dbus_obj_path,
constants.DEVICE_INTERFACE,
'Address',
device)
if profile is not None:
_dbus_obj_path = _get_dbus_path2(mngd_objs,
_dbus_obj_path,
constants.GATT_PROFILE_IFACE,
'UUID',
profile)
return _dbus_obj_path
def get_iface(adapter=None,
device=None,
service=None,
characteristic=None,
descriptor=None):
"""
For the given list of properties return the deepest interface
:param adapter: Adapter address
:param device: Device address
:param service: GATT Service UUID
:param characteristic: GATT Characteristic UUID
:param descriptor: GATT Descriptor UUID
:return: DBus Interface
"""
if adapter is not None:
_iface = constants.ADAPTER_INTERFACE
if device is not None:
_iface = constants.DEVICE_INTERFACE
if service is not None:
_iface = constants.GATT_SERVICE_IFACE
if characteristic is not None:
_iface = constants.GATT_CHRC_IFACE
if descriptor is not None:
_iface = constants.GATT_DESC_IFACE
return _iface
def get_methods(adapter=None,
device=None,
service=None,
characteristic=None,
descriptor=None):
"""
Get methods available for the specified
:param adapter: Adapter Address
:param device: Device Address
:param service: GATT Service UUID
:param characteristic: GATT Characteristic UUID
:param descriptor: GATT Descriptor UUID
:return: Object of the DBus methods available
"""
path_obj = get_dbus_path(adapter,
device,
service,
characteristic,
descriptor)
iface = get_iface(adapter,
device,
service,
characteristic,
descriptor)
return get_dbus_iface(iface, get_dbus_obj(path_obj))
def get_props(adapter=None,
device=None,
service=None,
characteristic=None,
descriptor=None):
"""
Get properties for the specified object
:param adapter: Adapter Address
:param device: Device Address
:param service: GATT Service UUID
:param characteristic: GATT Characteristic UUID
:param descriptor: GATT Descriptor UUID
:return: Object of the DBus properties available
"""
path_obj = get_dbus_path(adapter,
device,
service,
characteristic,
descriptor)
return get_dbus_iface(dbus.PROPERTIES_IFACE, get_dbus_obj(path_obj))
def str_to_dbusarray(word):
return dbus.Array([dbus.Byte(ord(letter)) for letter in word], 'y')
def bytes_to_dbusarray(bytesarray):
return dbus.Array([dbus.Byte(elem) for elem in bytesarray], 'y')
|
the-stack_106_22471 | # Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This defines a repository rule for configuring the rules' defaults.
The default can be specified as follows:
```python
=== WORKSPACE ===
load(
"@io_bazel_rules_k8s//k8s:with-defaults.bzl",
"k8s_defaults",
)
k8s_defaults(
# This is the name of the generated repository and the rule
# it defines.
name = "k8s_deploy",
# This is the kind of object the generated rule supports manipulating.
# If this is specified, it may not be overridden. If not, then it must
# be specified.
kind = "deployment",
)
=== BUILD ===
load("@k8s_deploy//:defaults.bzl", "k8s_deploy")
...
```
"""
# Generate an override statement for a particular attribute.
def _override(name, attr, value):
return """
if "{attr}" in kwargs:
fail("Cannot override '{attr}' in '{name}' rule.",
attr="{attr}")
kwargs["{attr}"] = "{value}"
""".format(name=name, attr=attr, value=value)
def _impl(repository_ctx):
"""Core implementation of k8s_defaults."""
# This is required by Bazel.
repository_ctx.file("BUILD", "")
overrides = []
if repository_ctx.attr.cluster:
overrides += [_override(repository_ctx.attr.name,
"cluster", repository_ctx.attr.cluster)]
if repository_ctx.attr.context:
overrides += [_override(repository_ctx.attr.name,
"context", repository_ctx.attr.context)]
if repository_ctx.attr.user:
overrides += [_override(repository_ctx.attr.name,
"user", repository_ctx.attr.user)]
if repository_ctx.attr.namespace:
overrides += [_override(repository_ctx.attr.name,
"namespace", repository_ctx.attr.namespace)]
if repository_ctx.attr.kind:
overrides += [_override(repository_ctx.attr.name,
"kind", repository_ctx.attr.kind)]
if repository_ctx.attr.image_chroot:
overrides += [_override(repository_ctx.attr.name,
"image_chroot", repository_ctx.attr.image_chroot)]
repository_ctx.file("defaults.bzl", """
load(
"@io_bazel_rules_k8s//k8s:object.bzl",
_k8s_object="k8s_object"
)
def {name}(**kwargs):
{overrides}
_k8s_object(**kwargs)
""".format(
name=repository_ctx.attr.name,
overrides="\n".join(overrides)
))
k8s_defaults = repository_rule(
attrs = {
"kind": attr.string(mandatory = False),
"cluster": attr.string(mandatory = False),
"context": attr.string(mandatory = False),
"user": attr.string(mandatory = False),
"namespace": attr.string(mandatory = False),
"image_chroot": attr.string(mandatory = False),
},
implementation = _impl,
)
|
the-stack_106_22472 | import names
import os
from tqdm import tqdm
to_change_path = '../static/artwork/face-terrors'
other_path = '../static/artwork/almost-human'
for filename in tqdm(os.listdir(to_change_path)):
if not filename.endswith(".png"):
continue
fn = names.get_first_name()
fname = f'{to_change_path}/{fn}.png'
fname_other_path = f'{other_path}/{fn}.png'
while os.path.isfile(fname) or os.path.isfile(fname_other_path):
fn = names.get_first_name()
fname = f'{to_change_path}/{fn}.png'
fname_other_path = f'{other_path}/{fn}.png'
os.rename(f'{to_change_path}/{filename}', fname) |
the-stack_106_22474 | import json
import sys
def main():
samples = 1
if len(sys.argv) > 1:
samples = sys.argv[1]
output_path = sys.argv[2]
print ("Generating " + samples + " samples")
h_cli_sc = []
broker = open(output_path+"/broker.json", "w")
broker_json = json.dumps(
{ "federates": [{"directory": ".",
"exec": "helics_app broker --federates " + str(int(samples) *2),
"host": "localhost",
"name": "broker_of_"+str(int(samples)*2)}],
"name" : "broker"},
indent=4, sort_keys=True)
broker.write(broker_json)
broker.close()
h_cli_sc.append(output_path+"/broker.json")
for i in range(int(samples)):
send_file_name = output_path+"/pysender"+str(i)+".json"
recv_file_name = output_path+"/pyrecv"+str(i)+".json"
sender = open(send_file_name, "w")
recv = open(recv_file_name, "w")
h_cli_sc.append(send_file_name)
h_cli_sc.append(recv_file_name)
send_name = "pisender"+str(i)
s_json = json.dumps(
{ "federates": [{"directory": ".",
"exec": "python3 -u pisender.py " + str(i),
"host": "localhost",
"name": send_name}],
"name" : send_name},
indent=4, sort_keys=True)
recv_name = "pireceiver"+str(i)
r_json = json.dumps(
{ "federates": [{"directory": ".",
"exec": "python3 -u pireceiver.py " + str(i),
"host": "localhost",
"name": recv_name}],
"name" : recv_name},
indent=4, sort_keys=True)
sender.write(s_json)
recv.write(r_json)
sender.close()
recv.close()
with open("samples.csv", "w") as f:
f.write("\n".join(h_cli_sc))
if __name__ == "__main__":
main()
|
the-stack_106_22476 | ## factory boy
import factory
# Own
from portfolio.models import Account
class AccountFactory(factory.django.DjangoModelFactory):
"""
Factory for creating accounts
"""
class Meta:
model = Account
# Account name by default will be 'Account 1' for the first created
# account, 'Account 2' for the next and so on
name = factory.Sequence(lambda n: 'Account {0}'.format(n))
base_currency = 'EUR'
|
the-stack_106_22477 | ######### global settings #########
GPU = True # running on GPU is highly suggested
GPU_ID = 0
TEST_MODE = False # turning on the testmode means the code will run on a small dataset.
CLEAN = True # set to "True" if you want to clean the temporary large files after generating result
MODEL = 'alexnet-r' # model arch: resnet18, alexnet, resnet50, densenet161
# MODEL_PATH = 'zoo/alexnet-r.pt' # resume model path for robust model
DATASET = 'imagenet' # model trained on: places365 or imagenet
QUANTILE = 0.005 # the threshold used for activation
SEG_THRESHOLD = 0.04 # the threshold used for visualization
SCORE_THRESHOLD = 0.04 # the threshold used for IoU score (in HTML file)
TOPN = 10 # to show top N image with highest activation for each unit
PARALLEL = 1 # how many process is used for tallying (Experiments show that 1 is the fastest)
LAYER_SECTION = None
CATAGORIES = ["object", "part", "scene", "texture", "color", "material"] # concept categories that are chosen to detect: "object", "part", "scene", "material", "texture", "color"
OUTPUT_FOLDER = "result/"+MODEL+"_"+DATASET # result will be stored in this folder
MADRYMODEL = True
"""setting for GoogleNet"""
AUXILIARY = True # whether Auxiliary layer are in used
########### sub settings ###########
# In most of the case, you don't have to change them.
# DATA_DIRECTORY: where broaden dataset locates
# IMG_SIZE: image size, alexnet use 227x227
# NUM_CLASSES: how many labels in final prediction
# FEATURE_NAMES: the array of layer where features will be extracted
# MODEL_FILE: the model file to be probed, "None" means the pretrained model in torchvision
# MODEL_PARALLEL: some model is trained in multi-GPU, so there is another way to load them.
# WORKERS: how many workers are fetching images
# BATCH_SIZE: batch size used in feature extraction
# TALLY_BATCH_SIZE: batch size used in tallying
# INDEX_FILE: if you turn on the TEST_MODE, actually you should provide this file on your own
if MODEL != 'alexnet' or 'alexnet-r':
DATA_DIRECTORY = 'dataset/broden1_224'
IMG_SIZE = 224
else:
DATA_DIRECTORY = 'dataset/broden1_227'
IMG_SIZE = 227
if DATASET == 'places365':
NUM_CLASSES = 365
elif DATASET == 'imagenet':
NUM_CLASSES = 1000
# ResNet18
if MODEL == 'resnet18':
FEATURE_NAMES = ['layer4']
if DATASET == 'places365':
MODEL_FILE = '../zoo/resnet18_places365.pth.tar'
MODEL_PARALLEL = True
elif DATASET == 'imagenet':
MODEL_FILE = None
MODEL_PARALLEL = False
elif MODEL == 'densenet161':
FEATURE_NAMES = ['features']
if DATASET == 'places365':
MODEL_FILE = 'zoo/whole_densenet161_places365_python36.pth.tar'
MODEL_PARALLEL = False
# ResNet50
elif MODEL == 'resnet50':
FEATURE_NAMES = ['conv1', 'layer1', 'layer2', 'layer3', 'layer4']
MODEL_FILE = 'zoo/ResNet50.pt'
MODEL_PARALLEL = False
elif MODEL == 'resnet50-r':
FEATURE_NAMES = ['layer3', 'layer4']
MODEL_FILE = 'zoo/ResNet50_R.pt'
MODEL_PARALLEL = False
elif MODEL == 'resnet50-sin':
FEATURE_NAMES = ['layer3', 'layer4']
MODEL_FILE = 'zoo/resnet50_SIN.pth.tar'
MODEL_PARALLEL = True
elif MODEL == 'resnet50-sin-in':
FEATURE_NAMES = ['layer4']
MODEL_FILE = 'zoo/resnet50_sin_in.pth.tar'
MODEL_PARALLEL = True
elif MODEL == 'resnet50-ft':
FEATURE_NAMES = ['conv1', 'layer1', 'layer2', 'layer3', 'layer4']
MODEL_FILE = 'zoo/resnet50_ft.pth.tar'
MODEL_PARALLEL = True
# AlexNet
elif MODEL == 'alexnet':
LAYER_SECTION = 'features'
FEATURE_NAMES = ["1", "4", "7", "9", "11"]
if DATASET == 'imagenet':
MODEL_FILE = 'zoo/alexnet.pth'
MODEL_PARALLEL = False
elif MODEL == 'alexnet-r':
LAYER_SECTION = 'features'
FEATURE_NAMES = ["1", "4", "7", "9", "11"]
if DATASET == 'imagenet':
MODEL_FILE = 'zoo/alexnet-r.pt'
MODEL_PARALLEL = False
# GoogleNet
elif MODEL == 'googlenet':
FEATURE_NAMES = ["conv1", "conv2", "conv3", "inception3a", "inception3b", "inception4a", "inception4b",
"inception4c", "inception4d", "inception4e", "inception5a", "inception5b"]
if DATASET == 'imagenet':
MODEL_FILE = 'zoo/GoogleNet.pt'
MODEL_PARALLEL = True
elif MODEL == 'googlenet-r':
FEATURE_NAMES = ["inception4b"]
if DATASET == 'imagenet':
MODEL_FILE = 'zoo/GoogleNet_R.pt'
MODEL_PARALLEL = True
if TEST_MODE:
WORKERS = 1
BATCH_SIZE = 4
TALLY_BATCH_SIZE = 2
TALLY_AHEAD = 1
INDEX_FILE = 'index_sm.csv'
OUTPUT_FOLDER += "_test"
else:
WORKERS = 12
BATCH_SIZE = 128
TALLY_BATCH_SIZE = 16
TALLY_AHEAD = 4
INDEX_FILE = 'index.csv'
|
the-stack_106_22480 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 31 18:29:48 2022
@author: fabian
"""
import pytest
import os
import pypsa
import pandas as pd
import numpy as np
@pytest.fixture(scope="module")
def scipy_network():
csv_folder = os.path.join(
os.path.dirname(__file__),
"..",
"examples",
"scigrid-de",
"scigrid-with-load-gen-trafos",
)
return pypsa.Network(csv_folder)
@pytest.fixture(scope="module")
def ac_dc_network():
csv_folder = os.path.join(
os.path.dirname(__file__), "..", "examples", "ac-dc-meshed", "ac-dc-data"
)
return pypsa.Network(csv_folder)
@pytest.fixture(scope="module")
def ac_dc_network_multiindexed(ac_dc_network):
n = ac_dc_network
n.snapshots = pd.MultiIndex.from_product([[2013], n.snapshots])
gens_i = n.generators.index
n.generators_t.p[gens_i] = np.random.rand(len(n.snapshots), len(gens_i))
return n
|
the-stack_106_22482 | import math
import torch
import torch.nn as nn
import numpy as np
# from skimage.measure.simple_metrics import compare_psnr
from skimage.metrics import peak_signal_noise_ratio
import matplotlib.pyplot as plt
def weights_init_kaiming(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
elif classname.find('Linear') != -1:
nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
elif classname.find('BatchNorm') != -1:
# nn.init.uniform(m.weight.data, 1.0, 0.02)
m.weight.data.normal_(mean=0, std=math.sqrt(2./9./64.)).clamp_(-0.025,0.025)
nn.init.constant(m.bias.data, 0.0)
def batch_PSNR(img, imclean, data_range):
Img = img.data.cpu().numpy().astype(np.float32)
Iclean = imclean.data.cpu().numpy().astype(np.float32)
plt.figure()
plt.title("denoised")
plt.imshow(torch.squeeze(img.data.cpu(), 0).permute(1, 2, 0), cmap='gray')
plt.figure()
plt.title("GT")
plt.imshow(torch.squeeze(imclean.data.cpu(), 0).permute(1, 2, 0), cmap='gray')
plt.show()
PSNR = 0
for i in range(Img.shape[0]):
PSNR += peak_signal_noise_ratio(Iclean[i,:,:,:], Img[i,:,:,:], data_range=data_range)
return (PSNR/Img.shape[0])
def data_augmentation(image, mode):
out = np.transpose(image, (1,2,0))
if mode == 0:
# original
out = out
elif mode == 1:
# flip up and down
out = np.flipud(out)
elif mode == 2:
# rotate counterwise 90 degree
out = np.rot90(out)
elif mode == 3:
# rotate 90 degree and flip up and down
out = np.rot90(out)
out = np.flipud(out)
elif mode == 4:
# rotate 180 degree
out = np.rot90(out, k=2)
elif mode == 5:
# rotate 180 degree and flip
out = np.rot90(out, k=2)
out = np.flipud(out)
elif mode == 6:
# rotate 270 degree
out = np.rot90(out, k=3)
elif mode == 7:
# rotate 270 degree and flip
out = np.rot90(out, k=3)
out = np.flipud(out)
return np.transpose(out, (2,0,1))
|
the-stack_106_22485 | import torch
import torch.nn as nn
from SelfAttention import SelfAttention
class TransformerBlock(nn.Module):
def __init__(self, embedding_size, heads, dropout, forward_expansion):
super(TransformerBlock, self).__init__()
self.attention = SelfAttention(embedding_size, heads)
self.norm1 = nn.LayerNorm(embedding_size)
self.norm2 = nn.LayerNorm(embedding_size)
self.feed_forward = nn.Sequential(
nn.Linear(embedding_size, forward_expansion * embedding_size),
nn.ReLU(),
nn.Linear(forward_expansion * embedding_size, embedding_size),
)
self.dropout = nn.Dropout(dropout)
def forward(self, value, key, query, mask):
attention = self.attention(value, key, query, mask)
# Add skip connection, normalize then dropout
x = self.dropout(self.norm1(attention + query))
forward = self.feed_forward(x)
out = self.dropout(self.norm2(forward + x))
return out |
the-stack_106_22486 | #!/usr/bin/env python
from __future__ import print_function
import io
import logging
import argparse
from parser import Box
from construct import setglobalfullprinting
from summary import Summary
log = logging.getLogger(__name__)
setglobalfullprinting(True)
def dump(input_file):
with open(input_file, 'rb') as fd:
fd.seek(0, io.SEEK_END)
eof = fd.tell()
fd.seek(0)
boxes = []
while fd.tell() < eof:
box = Box.parse_stream(fd)
boxes.append(box)
#print(box)
summary = Summary (input_file, boxes)
print(summary.data)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Dump all the boxes from an MP4 file")
parser.add_argument(
"input_file",
type=str,
metavar="FILE",
help="Path to the MP4 file to open",
)
args = parser.parse_args()
dump(args.input_file) |
the-stack_106_22488 | #import sys
#sys.path.append('c:\\Users\\Thoma\\OneDrive\\Documents\\2021_ORNL\\CartanCodeGit\\cartan-quantum-synthesizer')
# -*- coding: utf-8 -*-
__docformat__ = 'google'
"""
A collection of functions useful for exact diagonalization and converting KHK decomposition to a matrix
"""
import numpy as np
from numpy import kron
from scipy.linalg import expm, norm
import CQS.util.IO as IO
#The Pauli Matricies in matrix form
X = np.array([[0,1],[1,0]])
#Pauli X
Y = np.array([[0,-1j],[1j,0]])
#Pauli Y
Z = np.array([[1,0],[0,-1]])
#PauliZ
I = np.array([[1,0],[0,1]])
#2x2 idenity
paulis = [I,X,Y,Z]
# Allows for indexing the Pauli Arrays (Converting from tuple form (0,1,2,3) to string form IXYZ)
def Nident (N):
""" Generates an N qubit Identity Matrix """
return np.diag(np.ones(2**N))
def PauliExpUnitary(N, co, PauliTuple):
"""
Generates the Unitary Matrix for a Pauli Exponential
Uses e^{i.co.Pauli} = I*cos(a) + i*sin(a)*Pauli
Args:
N (int): Number of qubits
co (float): The coefficient of the Pauli Matrix
PauliTuple (Tuple): (PauliString) to exp
Returns:
The result e<sup>i•co•PauliTuple</sup> = I•cos(co) + i•sin(co)•PauliTuple
"""
II = Nident(N)
U = paulis[PauliTuple[0]]
for pauli in PauliTuple[1:]:
U = kron(U,paulis[pauli]) #Generates the PauliTuple Matrix Element
return np.cos(co)*II + 1j*np.sin(co)*U
def exactU(HCos, HTups, time):
"""
Computes the exact matrix exponential for time evolution at the time t. Takes as an input the real component of the exponential.
Args:
HCos (List of complex numbers):
HTupes (List of (PauliStrings)):
time (float - time evolution final time):
"""
H = np.diag(np.zeros(2**len(HTups[0])))
for (co, term) in zip(HCos, HTups):
H = H + IO.tuplesToMatrix(co, term)
return expm(1j * time * H)
def Trotter(ham, time, N, steps):
"""
Prepares U_t, the Trotterized input U
Args:
ham (List of Tuples): Hamiltonian formatted as (co, (PauliString))
time (float): final time to evolve to
N (int): number of qubits
steps (int): Number of trotter steps to take
Returns:
The U<sub>trotter</sub>(t) that approximates U<sub>exact</sub>(t)
"""
timeStep = time/steps
U = Nident(N)
for (co, pauliTuple) in ham:
U = U @ PauliExpUnitary(N, 1*co*timeStep, pauliTuple)
finalU = Nident(N)
for i in range(steps):
finalU = finalU @ U
return finalU
def KHK(kCoefs, hCoefs, k, h):
"""
Defines the Unitary for the KHK<sup>†</sup>]
Specifically, performs ℿ<sub>i</sub> e<sup>i•k[l]•kCoefs[l]</sup> ℿ<sub>j</sub> e<sup>i•h[j]•hCoefs[j]</sup> ℿ<sub>l</sub> e<sup>i•k[(lenK - l)]•kCoefs[(lenK - l)]</sup>
Multiply by t before passing the coefficients for h. Do not multiply h by i, that is automatic. The coefficients should be real for k, imaginary for h
Args:
kCoefs (List): A list of (real) coefficients for k
hCoefs (List): The list of (imaginary) coefficients for the elements in h.
k (List of Tuples): The list of (PauliStrings)
h (List of Tuples): List of (PauliStrings) for h (in the same indexing)
"""
N = len(h[0])
KHK = Nident(N)
#First loop of K terms:
for (term, co) in zip(k, kCoefs):
KHK = KHK @ PauliExpUnitary(N, co, term)
#H terms
for (term, co) in zip(h, hCoefs):
KHK = KHK @ PauliExpUnitary(N, co, term)
for (term, co) in zip(k[::-1], kCoefs[::-1]):
KHK = KHK @ PauliExpUnitary(N, -1*co, term)
return KHK |
the-stack_106_22490 | # coding: utf-8
from __future__ import unicode_literals
import base64
import collections
import hashlib
import itertools
import json
import netrc
import os
import random
import re
import sys
import time
import math
from ..compat import (
compat_cookiejar_Cookie,
compat_cookies_SimpleCookie,
compat_etree_Element,
compat_etree_fromstring,
compat_expanduser,
compat_getpass,
compat_http_client,
compat_os_name,
compat_str,
compat_urllib_error,
compat_urllib_parse_unquote,
compat_urllib_parse_urlencode,
compat_urllib_request,
compat_urlparse,
compat_xml_parse_error,
)
from ..downloader import FileDownloader
from ..downloader.f4m import (
get_base_url,
remove_encrypted_media,
)
from ..utils import (
age_restricted,
base_url,
bug_reports_message,
clean_html,
compiled_regex_type,
determine_ext,
determine_protocol,
dict_get,
encode_data_uri,
error_to_compat_str,
extract_attributes,
ExtractorError,
fix_xml_ampersands,
float_or_none,
format_field,
GeoRestrictedError,
GeoUtils,
int_or_none,
join_nonempty,
js_to_json,
JSON_LD_RE,
mimetype2ext,
network_exceptions,
NO_DEFAULT,
orderedSet,
parse_bitrate,
parse_codecs,
parse_duration,
parse_iso8601,
parse_m3u8_attributes,
parse_resolution,
RegexNotFoundError,
sanitize_filename,
sanitized_Request,
str_or_none,
str_to_int,
strip_or_none,
traverse_obj,
try_get,
unescapeHTML,
UnsupportedError,
unified_strdate,
unified_timestamp,
update_Request,
update_url_query,
url_basename,
url_or_none,
urljoin,
variadic,
xpath_element,
xpath_text,
xpath_with_ns,
)
class InfoExtractor(object):
"""Information Extractor class.
Information extractors are the classes that, given a URL, extract
information about the video (or videos) the URL refers to. This
information includes the real video URL, the video title, author and
others. The information is stored in a dictionary which is then
passed to the YoutubeDL. The YoutubeDL processes this
information possibly downloading the video to the file system, among
other possible outcomes.
The type field determines the type of the result.
By far the most common value (and the default if _type is missing) is
"video", which indicates a single video.
For a video, the dictionaries must include the following fields:
id: Video identifier.
title: Video title, unescaped.
Additionally, it must contain either a formats entry or a url one:
formats: A list of dictionaries for each format available, ordered
from worst to best quality.
Potential fields:
* url The mandatory URL representing the media:
for plain file media - HTTP URL of this file,
for RTMP - RTMP URL,
for HLS - URL of the M3U8 media playlist,
for HDS - URL of the F4M manifest,
for DASH
- HTTP URL to plain file media (in case of
unfragmented media)
- URL of the MPD manifest or base URL
representing the media if MPD manifest
is parsed from a string (in case of
fragmented media)
for MSS - URL of the ISM manifest.
* manifest_url
The URL of the manifest file in case of
fragmented media:
for HLS - URL of the M3U8 master playlist,
for HDS - URL of the F4M manifest,
for DASH - URL of the MPD manifest,
for MSS - URL of the ISM manifest.
* ext Will be calculated from URL if missing
* format A human-readable description of the format
("mp4 container with h264/opus").
Calculated from the format_id, width, height.
and format_note fields if missing.
* format_id A short description of the format
("mp4_h264_opus" or "19").
Technically optional, but strongly recommended.
* format_note Additional info about the format
("3D" or "DASH video")
* width Width of the video, if known
* height Height of the video, if known
* resolution Textual description of width and height
* dynamic_range The dynamic range of the video. One of:
"SDR" (None), "HDR10", "HDR10+, "HDR12", "HLG, "DV"
* tbr Average bitrate of audio and video in KBit/s
* abr Average audio bitrate in KBit/s
* acodec Name of the audio codec in use
* asr Audio sampling rate in Hertz
* vbr Average video bitrate in KBit/s
* fps Frame rate
* vcodec Name of the video codec in use
* container Name of the container format
* filesize The number of bytes, if known in advance
* filesize_approx An estimate for the number of bytes
* player_url SWF Player URL (used for rtmpdump).
* protocol The protocol that will be used for the actual
download, lower-case. One of "http", "https" or
one of the protocols defined in downloader.PROTOCOL_MAP
* fragment_base_url
Base URL for fragments. Each fragment's path
value (if present) will be relative to
this URL.
* fragments A list of fragments of a fragmented media.
Each fragment entry must contain either an url
or a path. If an url is present it should be
considered by a client. Otherwise both path and
fragment_base_url must be present. Here is
the list of all potential fields:
* "url" - fragment's URL
* "path" - fragment's path relative to
fragment_base_url
* "duration" (optional, int or float)
* "filesize" (optional, int)
* is_from_start Is a live format that can be downloaded
from the start. Boolean
* preference Order number of this format. If this field is
present and not None, the formats get sorted
by this field, regardless of all other values.
-1 for default (order by other properties),
-2 or smaller for less than default.
< -1000 to hide the format (if there is
another one which is strictly better)
* language Language code, e.g. "de" or "en-US".
* language_preference Is this in the language mentioned in
the URL?
10 if it's what the URL is about,
-1 for default (don't know),
-10 otherwise, other values reserved for now.
* quality Order number of the video quality of this
format, irrespective of the file format.
-1 for default (order by other properties),
-2 or smaller for less than default.
* source_preference Order number for this video source
(quality takes higher priority)
-1 for default (order by other properties),
-2 or smaller for less than default.
* http_headers A dictionary of additional HTTP headers
to add to the request.
* stretched_ratio If given and not 1, indicates that the
video's pixels are not square.
width : height ratio as float.
* no_resume The server does not support resuming the
(HTTP or RTMP) download. Boolean.
* has_drm The format has DRM and cannot be downloaded. Boolean
* downloader_options A dictionary of downloader options as
described in FileDownloader
RTMP formats can also have the additional fields: page_url,
app, play_path, tc_url, flash_version, rtmp_live, rtmp_conn,
rtmp_protocol, rtmp_real_time
url: Final video URL.
ext: Video filename extension.
format: The video format, defaults to ext (used for --get-format)
player_url: SWF Player URL (used for rtmpdump).
The following fields are optional:
alt_title: A secondary title of the video.
display_id An alternative identifier for the video, not necessarily
unique, but available before title. Typically, id is
something like "4234987", title "Dancing naked mole rats",
and display_id "dancing-naked-mole-rats"
thumbnails: A list of dictionaries, with the following entries:
* "id" (optional, string) - Thumbnail format ID
* "url"
* "preference" (optional, int) - quality of the image
* "width" (optional, int)
* "height" (optional, int)
* "resolution" (optional, string "{width}x{height}",
deprecated)
* "filesize" (optional, int)
* "http_headers" (dict) - HTTP headers for the request
thumbnail: Full URL to a video thumbnail image.
description: Full video description.
uploader: Full name of the video uploader.
license: License name the video is licensed under.
creator: The creator of the video.
timestamp: UNIX timestamp of the moment the video was uploaded
upload_date: Video upload date (YYYYMMDD).
If not explicitly set, calculated from timestamp
release_timestamp: UNIX timestamp of the moment the video was released.
If it is not clear whether to use timestamp or this, use the former
release_date: The date (YYYYMMDD) when the video was released.
If not explicitly set, calculated from release_timestamp
modified_timestamp: UNIX timestamp of the moment the video was last modified.
modified_date: The date (YYYYMMDD) when the video was last modified.
If not explicitly set, calculated from modified_timestamp
uploader_id: Nickname or id of the video uploader.
uploader_url: Full URL to a personal webpage of the video uploader.
channel: Full name of the channel the video is uploaded on.
Note that channel fields may or may not repeat uploader
fields. This depends on a particular extractor.
channel_id: Id of the channel.
channel_url: Full URL to a channel webpage.
channel_follower_count: Number of followers of the channel.
location: Physical location where the video was filmed.
subtitles: The available subtitles as a dictionary in the format
{tag: subformats}. "tag" is usually a language code, and
"subformats" is a list sorted from lower to higher
preference, each element is a dictionary with the "ext"
entry and one of:
* "data": The subtitles file contents
* "url": A URL pointing to the subtitles file
It can optionally also have:
* "name": Name or description of the subtitles
* http_headers: A dictionary of additional HTTP headers
to add to the request.
"ext" will be calculated from URL if missing
automatic_captions: Like 'subtitles'; contains automatically generated
captions instead of normal subtitles
duration: Length of the video in seconds, as an integer or float.
view_count: How many users have watched the video on the platform.
like_count: Number of positive ratings of the video
dislike_count: Number of negative ratings of the video
repost_count: Number of reposts of the video
average_rating: Average rating give by users, the scale used depends on the webpage
comment_count: Number of comments on the video
comments: A list of comments, each with one or more of the following
properties (all but one of text or html optional):
* "author" - human-readable name of the comment author
* "author_id" - user ID of the comment author
* "author_thumbnail" - The thumbnail of the comment author
* "id" - Comment ID
* "html" - Comment as HTML
* "text" - Plain text of the comment
* "timestamp" - UNIX timestamp of comment
* "parent" - ID of the comment this one is replying to.
Set to "root" to indicate that this is a
comment to the original video.
* "like_count" - Number of positive ratings of the comment
* "dislike_count" - Number of negative ratings of the comment
* "is_favorited" - Whether the comment is marked as
favorite by the video uploader
* "author_is_uploader" - Whether the comment is made by
the video uploader
age_limit: Age restriction for the video, as an integer (years)
webpage_url: The URL to the video webpage, if given to yt-dlp it
should allow to get the same result again. (It will be set
by YoutubeDL if it's missing)
categories: A list of categories that the video falls in, for example
["Sports", "Berlin"]
tags: A list of tags assigned to the video, e.g. ["sweden", "pop music"]
cast: A list of the video cast
is_live: True, False, or None (=unknown). Whether this video is a
live stream that goes on instead of a fixed-length video.
was_live: True, False, or None (=unknown). Whether this video was
originally a live stream.
live_status: 'is_live', 'is_upcoming', 'was_live', 'not_live' or None (=unknown)
If absent, automatically set from is_live, was_live
start_time: Time in seconds where the reproduction should start, as
specified in the URL.
end_time: Time in seconds where the reproduction should end, as
specified in the URL.
chapters: A list of dictionaries, with the following entries:
* "start_time" - The start time of the chapter in seconds
* "end_time" - The end time of the chapter in seconds
* "title" (optional, string)
playable_in_embed: Whether this video is allowed to play in embedded
players on other sites. Can be True (=always allowed),
False (=never allowed), None (=unknown), or a string
specifying the criteria for embedability (Eg: 'whitelist')
availability: Under what condition the video is available. One of
'private', 'premium_only', 'subscriber_only', 'needs_auth',
'unlisted' or 'public'. Use 'InfoExtractor._availability'
to set it
__post_extractor: A function to be called just before the metadata is
written to either disk, logger or console. The function
must return a dict which will be added to the info_dict.
This is usefull for additional information that is
time-consuming to extract. Note that the fields thus
extracted will not be available to output template and
match_filter. So, only "comments" and "comment_count" are
currently allowed to be extracted via this method.
The following fields should only be used when the video belongs to some logical
chapter or section:
chapter: Name or title of the chapter the video belongs to.
chapter_number: Number of the chapter the video belongs to, as an integer.
chapter_id: Id of the chapter the video belongs to, as a unicode string.
The following fields should only be used when the video is an episode of some
series, programme or podcast:
series: Title of the series or programme the video episode belongs to.
series_id: Id of the series or programme the video episode belongs to, as a unicode string.
season: Title of the season the video episode belongs to.
season_number: Number of the season the video episode belongs to, as an integer.
season_id: Id of the season the video episode belongs to, as a unicode string.
episode: Title of the video episode. Unlike mandatory video title field,
this field should denote the exact title of the video episode
without any kind of decoration.
episode_number: Number of the video episode within a season, as an integer.
episode_id: Id of the video episode, as a unicode string.
The following fields should only be used when the media is a track or a part of
a music album:
track: Title of the track.
track_number: Number of the track within an album or a disc, as an integer.
track_id: Id of the track (useful in case of custom indexing, e.g. 6.iii),
as a unicode string.
artist: Artist(s) of the track.
genre: Genre(s) of the track.
album: Title of the album the track belongs to.
album_type: Type of the album (e.g. "Demo", "Full-length", "Split", "Compilation", etc).
album_artist: List of all artists appeared on the album (e.g.
"Ash Borer / Fell Voices" or "Various Artists", useful for splits
and compilations).
disc_number: Number of the disc or other physical medium the track belongs to,
as an integer.
release_year: Year (YYYY) when the album was released.
composer: Composer of the piece
Unless mentioned otherwise, the fields should be Unicode strings.
Unless mentioned otherwise, None is equivalent to absence of information.
_type "playlist" indicates multiple videos.
There must be a key "entries", which is a list, an iterable, or a PagedList
object, each element of which is a valid dictionary by this specification.
Additionally, playlists can have "id", "title", and any other relevent
attributes with the same semantics as videos (see above).
It can also have the following optional fields:
playlist_count: The total number of videos in a playlist. If not given,
YoutubeDL tries to calculate it from "entries"
_type "multi_video" indicates that there are multiple videos that
form a single show, for examples multiple acts of an opera or TV episode.
It must have an entries key like a playlist and contain all the keys
required for a video at the same time.
_type "url" indicates that the video must be extracted from another
location, possibly by a different extractor. Its only required key is:
"url" - the next URL to extract.
The key "ie_key" can be set to the class name (minus the trailing "IE",
e.g. "Youtube") if the extractor class is known in advance.
Additionally, the dictionary may have any properties of the resolved entity
known in advance, for example "title" if the title of the referred video is
known ahead of time.
_type "url_transparent" entities have the same specification as "url", but
indicate that the given additional information is more precise than the one
associated with the resolved URL.
This is useful when a site employs a video service that hosts the video and
its technical metadata, but that video service does not embed a useful
title, description etc.
Subclasses of this one should re-define the _real_initialize() and
_real_extract() methods and define a _VALID_URL regexp.
Probably, they should also be added to the list of extractors.
Subclasses may also override suitable() if necessary, but ensure the function
signature is preserved and that this function imports everything it needs
(except other extractors), so that lazy_extractors works correctly
_GEO_BYPASS attribute may be set to False in order to disable
geo restriction bypass mechanisms for a particular extractor.
Though it won't disable explicit geo restriction bypass based on
country code provided with geo_bypass_country.
_GEO_COUNTRIES attribute may contain a list of presumably geo unrestricted
countries for this extractor. One of these countries will be used by
geo restriction bypass mechanism right away in order to bypass
geo restriction, of course, if the mechanism is not disabled.
_GEO_IP_BLOCKS attribute may contain a list of presumably geo unrestricted
IP blocks in CIDR notation for this extractor. One of these IP blocks
will be used by geo restriction bypass mechanism similarly
to _GEO_COUNTRIES.
The _WORKING attribute should be set to False for broken IEs
in order to warn the users and skip the tests.
"""
_ready = False
_downloader = None
_x_forwarded_for_ip = None
_GEO_BYPASS = True
_GEO_COUNTRIES = None
_GEO_IP_BLOCKS = None
_WORKING = True
_LOGIN_HINTS = {
'any': 'Use --cookies, --username and --password, or --netrc to provide account credentials',
'cookies': (
'Use --cookies-from-browser or --cookies for the authentication. '
'See https://github.com/ytdl-org/youtube-dl#how-do-i-pass-cookies-to-youtube-dl for how to manually pass cookies'),
'password': 'Use --username and --password, or --netrc to provide account credentials',
}
def __init__(self, downloader=None):
"""Constructor. Receives an optional downloader (a YoutubeDL instance).
If a downloader is not passed during initialization,
it must be set using "set_downloader()" before "extract()" is called"""
self._ready = False
self._x_forwarded_for_ip = None
self._printed_messages = set()
self.set_downloader(downloader)
@classmethod
def _match_valid_url(cls, url):
# This does not use has/getattr intentionally - we want to know whether
# we have cached the regexp for *this* class, whereas getattr would also
# match the superclass
if '_VALID_URL_RE' not in cls.__dict__:
if '_VALID_URL' not in cls.__dict__:
cls._VALID_URL = cls._make_valid_url()
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
return cls._VALID_URL_RE.match(url)
@classmethod
def suitable(cls, url):
"""Receives a URL and returns True if suitable for this IE."""
# This function must import everything it needs (except other extractors),
# so that lazy_extractors works correctly
return cls._match_valid_url(url) is not None
@classmethod
def _match_id(cls, url):
return cls._match_valid_url(url).group('id')
@classmethod
def get_temp_id(cls, url):
try:
return cls._match_id(url)
except (IndexError, AttributeError):
return None
@classmethod
def working(cls):
"""Getter method for _WORKING."""
return cls._WORKING
def initialize(self):
"""Initializes an instance (authentication, etc)."""
self._printed_messages = set()
self._initialize_geo_bypass({
'countries': self._GEO_COUNTRIES,
'ip_blocks': self._GEO_IP_BLOCKS,
})
if not self._ready:
self._real_initialize()
self._ready = True
def _initialize_geo_bypass(self, geo_bypass_context):
"""
Initialize geo restriction bypass mechanism.
This method is used to initialize geo bypass mechanism based on faking
X-Forwarded-For HTTP header. A random country from provided country list
is selected and a random IP belonging to this country is generated. This
IP will be passed as X-Forwarded-For HTTP header in all subsequent
HTTP requests.
This method will be used for initial geo bypass mechanism initialization
during the instance initialization with _GEO_COUNTRIES and
_GEO_IP_BLOCKS.
You may also manually call it from extractor's code if geo bypass
information is not available beforehand (e.g. obtained during
extraction) or due to some other reason. In this case you should pass
this information in geo bypass context passed as first argument. It may
contain following fields:
countries: List of geo unrestricted countries (similar
to _GEO_COUNTRIES)
ip_blocks: List of geo unrestricted IP blocks in CIDR notation
(similar to _GEO_IP_BLOCKS)
"""
if not self._x_forwarded_for_ip:
# Geo bypass mechanism is explicitly disabled by user
if not self.get_param('geo_bypass', True):
return
if not geo_bypass_context:
geo_bypass_context = {}
# Backward compatibility: previously _initialize_geo_bypass
# expected a list of countries, some 3rd party code may still use
# it this way
if isinstance(geo_bypass_context, (list, tuple)):
geo_bypass_context = {
'countries': geo_bypass_context,
}
# The whole point of geo bypass mechanism is to fake IP
# as X-Forwarded-For HTTP header based on some IP block or
# country code.
# Path 1: bypassing based on IP block in CIDR notation
# Explicit IP block specified by user, use it right away
# regardless of whether extractor is geo bypassable or not
ip_block = self.get_param('geo_bypass_ip_block', None)
# Otherwise use random IP block from geo bypass context but only
# if extractor is known as geo bypassable
if not ip_block:
ip_blocks = geo_bypass_context.get('ip_blocks')
if self._GEO_BYPASS and ip_blocks:
ip_block = random.choice(ip_blocks)
if ip_block:
self._x_forwarded_for_ip = GeoUtils.random_ipv4(ip_block)
self._downloader.write_debug(
'[debug] Using fake IP %s as X-Forwarded-For' % self._x_forwarded_for_ip)
return
# Path 2: bypassing based on country code
# Explicit country code specified by user, use it right away
# regardless of whether extractor is geo bypassable or not
country = self.get_param('geo_bypass_country', None)
# Otherwise use random country code from geo bypass context but
# only if extractor is known as geo bypassable
if not country:
countries = geo_bypass_context.get('countries')
if self._GEO_BYPASS and countries:
country = random.choice(countries)
if country:
self._x_forwarded_for_ip = GeoUtils.random_ipv4(country)
self._downloader.write_debug(
'Using fake IP %s (%s) as X-Forwarded-For' % (self._x_forwarded_for_ip, country.upper()))
def extract(self, url):
"""Extracts URL information and returns it in list of dicts."""
try:
for _ in range(2):
try:
self.initialize()
self.write_debug('Extracting URL: %s' % url)
ie_result = self._real_extract(url)
if ie_result is None:
return None
if self._x_forwarded_for_ip:
ie_result['__x_forwarded_for_ip'] = self._x_forwarded_for_ip
subtitles = ie_result.get('subtitles')
if (subtitles and 'live_chat' in subtitles
and 'no-live-chat' in self.get_param('compat_opts', [])):
del subtitles['live_chat']
return ie_result
except GeoRestrictedError as e:
if self.__maybe_fake_ip_and_retry(e.countries):
continue
raise
except UnsupportedError:
raise
except ExtractorError as e:
kwargs = {
'video_id': e.video_id or self.get_temp_id(url),
'ie': self.IE_NAME,
'tb': e.traceback or sys.exc_info()[2],
'expected': e.expected,
'cause': e.cause
}
if hasattr(e, 'countries'):
kwargs['countries'] = e.countries
raise type(e)(e.msg, **kwargs)
except compat_http_client.IncompleteRead as e:
raise ExtractorError('A network error has occurred.', cause=e, expected=True, video_id=self.get_temp_id(url))
except (KeyError, StopIteration) as e:
raise ExtractorError('An extractor error has occurred.', cause=e, video_id=self.get_temp_id(url))
def __maybe_fake_ip_and_retry(self, countries):
if (not self.get_param('geo_bypass_country', None)
and self._GEO_BYPASS
and self.get_param('geo_bypass', True)
and not self._x_forwarded_for_ip
and countries):
country_code = random.choice(countries)
self._x_forwarded_for_ip = GeoUtils.random_ipv4(country_code)
if self._x_forwarded_for_ip:
self.report_warning(
'Video is geo restricted. Retrying extraction with fake IP %s (%s) as X-Forwarded-For.'
% (self._x_forwarded_for_ip, country_code.upper()))
return True
return False
def set_downloader(self, downloader):
"""Sets the downloader for this IE."""
self._downloader = downloader
def _real_initialize(self):
"""Real initialization process. Redefine in subclasses."""
pass
def _real_extract(self, url):
"""Real extraction process. Redefine in subclasses."""
pass
@classmethod
def ie_key(cls):
"""A string for getting the InfoExtractor with get_info_extractor"""
return cls.__name__[:-2]
@property
def IE_NAME(self):
return compat_str(type(self).__name__[:-2])
@staticmethod
def __can_accept_status_code(err, expected_status):
assert isinstance(err, compat_urllib_error.HTTPError)
if expected_status is None:
return False
elif callable(expected_status):
return expected_status(err.code) is True
else:
return err.code in variadic(expected_status)
def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, data=None, headers={}, query={}, expected_status=None):
"""
Return the response handle.
See _download_webpage docstring for arguments specification.
"""
if not self._downloader._first_webpage_request:
sleep_interval = self.get_param('sleep_interval_requests') or 0
if sleep_interval > 0:
self.to_screen('Sleeping %s seconds ...' % sleep_interval)
time.sleep(sleep_interval)
else:
self._downloader._first_webpage_request = False
if note is None:
self.report_download_webpage(video_id)
elif note is not False:
if video_id is None:
self.to_screen('%s' % (note,))
else:
self.to_screen('%s: %s' % (video_id, note))
# Some sites check X-Forwarded-For HTTP header in order to figure out
# the origin of the client behind proxy. This allows bypassing geo
# restriction by faking this header's value to IP that belongs to some
# geo unrestricted country. We will do so once we encounter any
# geo restriction error.
if self._x_forwarded_for_ip:
if 'X-Forwarded-For' not in headers:
headers['X-Forwarded-For'] = self._x_forwarded_for_ip
if isinstance(url_or_request, compat_urllib_request.Request):
url_or_request = update_Request(
url_or_request, data=data, headers=headers, query=query)
else:
if query:
url_or_request = update_url_query(url_or_request, query)
if data is not None or headers:
url_or_request = sanitized_Request(url_or_request, data, headers)
try:
return self._downloader.urlopen(url_or_request)
except network_exceptions as err:
if isinstance(err, compat_urllib_error.HTTPError):
if self.__can_accept_status_code(err, expected_status):
# Retain reference to error to prevent file object from
# being closed before it can be read. Works around the
# effects of <https://bugs.python.org/issue15002>
# introduced in Python 3.4.1.
err.fp._error = err
return err.fp
if errnote is False:
return False
if errnote is None:
errnote = 'Unable to download webpage'
errmsg = '%s: %s' % (errnote, error_to_compat_str(err))
if fatal:
raise ExtractorError(errmsg, sys.exc_info()[2], cause=err)
else:
self.report_warning(errmsg)
return False
def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, encoding=None, data=None, headers={}, query={}, expected_status=None):
"""
Return a tuple (page content as string, URL handle).
See _download_webpage docstring for arguments specification.
"""
# Strip hashes from the URL (#1038)
if isinstance(url_or_request, (compat_str, str)):
url_or_request = url_or_request.partition('#')[0]
urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal, data=data, headers=headers, query=query, expected_status=expected_status)
if urlh is False:
assert not fatal
return False
content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal, encoding=encoding)
return (content, urlh)
@staticmethod
def _guess_encoding_from_content(content_type, webpage_bytes):
m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
if m:
encoding = m.group(1)
else:
m = re.search(br'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]',
webpage_bytes[:1024])
if m:
encoding = m.group(1).decode('ascii')
elif webpage_bytes.startswith(b'\xff\xfe'):
encoding = 'utf-16'
else:
encoding = 'utf-8'
return encoding
def __check_blocked(self, content):
first_block = content[:512]
if ('<title>Access to this site is blocked</title>' in content
and 'Websense' in first_block):
msg = 'Access to this webpage has been blocked by Websense filtering software in your network.'
blocked_iframe = self._html_search_regex(
r'<iframe src="([^"]+)"', content,
'Websense information URL', default=None)
if blocked_iframe:
msg += ' Visit %s for more details' % blocked_iframe
raise ExtractorError(msg, expected=True)
if '<title>The URL you requested has been blocked</title>' in first_block:
msg = (
'Access to this webpage has been blocked by Indian censorship. '
'Use a VPN or proxy server (with --proxy) to route around it.')
block_msg = self._html_search_regex(
r'</h1><p>(.*?)</p>',
content, 'block message', default=None)
if block_msg:
msg += ' (Message: "%s")' % block_msg.replace('\n', ' ')
raise ExtractorError(msg, expected=True)
if ('<title>TTK :: Доступ к ресурсу ограничен</title>' in content
and 'blocklist.rkn.gov.ru' in content):
raise ExtractorError(
'Access to this webpage has been blocked by decision of the Russian government. '
'Visit http://blocklist.rkn.gov.ru/ for a block reason.',
expected=True)
def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None, encoding=None):
content_type = urlh.headers.get('Content-Type', '')
webpage_bytes = urlh.read()
if prefix is not None:
webpage_bytes = prefix + webpage_bytes
if not encoding:
encoding = self._guess_encoding_from_content(content_type, webpage_bytes)
if self.get_param('dump_intermediate_pages', False):
self.to_screen('Dumping request to ' + urlh.geturl())
dump = base64.b64encode(webpage_bytes).decode('ascii')
self._downloader.to_screen(dump)
if self.get_param('write_pages', False):
basen = '%s_%s' % (video_id, urlh.geturl())
trim_length = self.get_param('trim_file_name') or 240
if len(basen) > trim_length:
h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
basen = basen[:trim_length - len(h)] + h
raw_filename = basen + '.dump'
filename = sanitize_filename(raw_filename, restricted=True)
self.to_screen('Saving request to ' + filename)
# Working around MAX_PATH limitation on Windows (see
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
if compat_os_name == 'nt':
absfilepath = os.path.abspath(filename)
if len(absfilepath) > 259:
filename = '\\\\?\\' + absfilepath
with open(filename, 'wb') as outf:
outf.write(webpage_bytes)
try:
content = webpage_bytes.decode(encoding, 'replace')
except LookupError:
content = webpage_bytes.decode('utf-8', 'replace')
self.__check_blocked(content)
return content
def _download_webpage(
self, url_or_request, video_id, note=None, errnote=None,
fatal=True, tries=1, timeout=5, encoding=None, data=None,
headers={}, query={}, expected_status=None):
"""
Return the data of the page as a string.
Arguments:
url_or_request -- plain text URL as a string or
a compat_urllib_request.Requestobject
video_id -- Video/playlist/item identifier (string)
Keyword arguments:
note -- note printed before downloading (string)
errnote -- note printed in case of an error (string)
fatal -- flag denoting whether error should be considered fatal,
i.e. whether it should cause ExtractionError to be raised,
otherwise a warning will be reported and extraction continued
tries -- number of tries
timeout -- sleep interval between tries
encoding -- encoding for a page content decoding, guessed automatically
when not explicitly specified
data -- POST data (bytes)
headers -- HTTP headers (dict)
query -- URL query (dict)
expected_status -- allows to accept failed HTTP requests (non 2xx
status code) by explicitly specifying a set of accepted status
codes. Can be any of the following entities:
- an integer type specifying an exact failed status code to
accept
- a list or a tuple of integer types specifying a list of
failed status codes to accept
- a callable accepting an actual failed status code and
returning True if it should be accepted
Note that this argument does not affect success status codes (2xx)
which are always accepted.
"""
success = False
try_count = 0
while success is False:
try:
res = self._download_webpage_handle(
url_or_request, video_id, note, errnote, fatal,
encoding=encoding, data=data, headers=headers, query=query,
expected_status=expected_status)
success = True
except compat_http_client.IncompleteRead as e:
try_count += 1
if try_count >= tries:
raise e
self._sleep(timeout, video_id)
if res is False:
return res
else:
content, _ = res
return content
def _download_xml_handle(
self, url_or_request, video_id, note='Downloading XML',
errnote='Unable to download XML', transform_source=None,
fatal=True, encoding=None, data=None, headers={}, query={},
expected_status=None):
"""
Return a tuple (xml as an compat_etree_Element, URL handle).
See _download_webpage docstring for arguments specification.
"""
res = self._download_webpage_handle(
url_or_request, video_id, note, errnote, fatal=fatal,
encoding=encoding, data=data, headers=headers, query=query,
expected_status=expected_status)
if res is False:
return res
xml_string, urlh = res
return self._parse_xml(
xml_string, video_id, transform_source=transform_source,
fatal=fatal), urlh
def _download_xml(
self, url_or_request, video_id,
note='Downloading XML', errnote='Unable to download XML',
transform_source=None, fatal=True, encoding=None,
data=None, headers={}, query={}, expected_status=None):
"""
Return the xml as an compat_etree_Element.
See _download_webpage docstring for arguments specification.
"""
res = self._download_xml_handle(
url_or_request, video_id, note=note, errnote=errnote,
transform_source=transform_source, fatal=fatal, encoding=encoding,
data=data, headers=headers, query=query,
expected_status=expected_status)
return res if res is False else res[0]
def _parse_xml(self, xml_string, video_id, transform_source=None, fatal=True):
if transform_source:
xml_string = transform_source(xml_string)
try:
return compat_etree_fromstring(xml_string.encode('utf-8'))
except compat_xml_parse_error as ve:
errmsg = '%s: Failed to parse XML ' % video_id
if fatal:
raise ExtractorError(errmsg, cause=ve)
else:
self.report_warning(errmsg + str(ve))
def _download_json_handle(
self, url_or_request, video_id, note='Downloading JSON metadata',
errnote='Unable to download JSON metadata', transform_source=None,
fatal=True, encoding=None, data=None, headers={}, query={},
expected_status=None):
"""
Return a tuple (JSON object, URL handle).
See _download_webpage docstring for arguments specification.
"""
res = self._download_webpage_handle(
url_or_request, video_id, note, errnote, fatal=fatal,
encoding=encoding, data=data, headers=headers, query=query,
expected_status=expected_status)
if res is False:
return res
json_string, urlh = res
return self._parse_json(
json_string, video_id, transform_source=transform_source,
fatal=fatal), urlh
def _download_json(
self, url_or_request, video_id, note='Downloading JSON metadata',
errnote='Unable to download JSON metadata', transform_source=None,
fatal=True, encoding=None, data=None, headers={}, query={},
expected_status=None):
"""
Return the JSON object as a dict.
See _download_webpage docstring for arguments specification.
"""
res = self._download_json_handle(
url_or_request, video_id, note=note, errnote=errnote,
transform_source=transform_source, fatal=fatal, encoding=encoding,
data=data, headers=headers, query=query,
expected_status=expected_status)
return res if res is False else res[0]
def _parse_json(self, json_string, video_id, transform_source=None, fatal=True):
if transform_source:
json_string = transform_source(json_string)
try:
return json.loads(json_string)
except ValueError as ve:
errmsg = '%s: Failed to parse JSON ' % video_id
if fatal:
raise ExtractorError(errmsg, cause=ve)
else:
self.report_warning(errmsg + str(ve))
def _parse_socket_response_as_json(self, data, video_id, transform_source=None, fatal=True):
return self._parse_json(
data[data.find('{'):data.rfind('}') + 1],
video_id, transform_source, fatal)
def _download_socket_json_handle(
self, url_or_request, video_id, note='Polling socket',
errnote='Unable to poll socket', transform_source=None,
fatal=True, encoding=None, data=None, headers={}, query={},
expected_status=None):
"""
Return a tuple (JSON object, URL handle).
See _download_webpage docstring for arguments specification.
"""
res = self._download_webpage_handle(
url_or_request, video_id, note, errnote, fatal=fatal,
encoding=encoding, data=data, headers=headers, query=query,
expected_status=expected_status)
if res is False:
return res
webpage, urlh = res
return self._parse_socket_response_as_json(
webpage, video_id, transform_source=transform_source,
fatal=fatal), urlh
def _download_socket_json(
self, url_or_request, video_id, note='Polling socket',
errnote='Unable to poll socket', transform_source=None,
fatal=True, encoding=None, data=None, headers={}, query={},
expected_status=None):
"""
Return the JSON object as a dict.
See _download_webpage docstring for arguments specification.
"""
res = self._download_socket_json_handle(
url_or_request, video_id, note=note, errnote=errnote,
transform_source=transform_source, fatal=fatal, encoding=encoding,
data=data, headers=headers, query=query,
expected_status=expected_status)
return res if res is False else res[0]
def report_warning(self, msg, video_id=None, *args, only_once=False, **kwargs):
idstr = format_field(video_id, template='%s: ')
msg = f'[{self.IE_NAME}] {idstr}{msg}'
if only_once:
if f'WARNING: {msg}' in self._printed_messages:
return
self._printed_messages.add(f'WARNING: {msg}')
self._downloader.report_warning(msg, *args, **kwargs)
def to_screen(self, msg, *args, **kwargs):
"""Print msg to screen, prefixing it with '[ie_name]'"""
self._downloader.to_screen('[%s] %s' % (self.IE_NAME, msg), *args, **kwargs)
def write_debug(self, msg, *args, **kwargs):
self._downloader.write_debug('[%s] %s' % (self.IE_NAME, msg), *args, **kwargs)
def get_param(self, name, default=None, *args, **kwargs):
if self._downloader:
return self._downloader.params.get(name, default, *args, **kwargs)
return default
def report_drm(self, video_id, partial=False):
self.raise_no_formats('This video is DRM protected', expected=True, video_id=video_id)
def report_extraction(self, id_or_name):
"""Report information extraction."""
self.to_screen('%s: Extracting information' % id_or_name)
def report_download_webpage(self, video_id):
"""Report webpage download."""
self.to_screen('%s: Downloading webpage' % video_id)
def report_age_confirmation(self):
"""Report attempt to confirm age."""
self.to_screen('Confirming age')
def report_login(self):
"""Report attempt to log in."""
self.to_screen('Logging in')
def raise_login_required(
self, msg='This video is only available for registered users',
metadata_available=False, method='any'):
if metadata_available and (
self.get_param('ignore_no_formats_error') or self.get_param('wait_for_video')):
self.report_warning(msg)
if method is not None:
msg = '%s. %s' % (msg, self._LOGIN_HINTS[method])
raise ExtractorError(msg, expected=True)
def raise_geo_restricted(
self, msg='This video is not available from your location due to geo restriction',
countries=None, metadata_available=False):
if metadata_available and (
self.get_param('ignore_no_formats_error') or self.get_param('wait_for_video')):
self.report_warning(msg)
else:
raise GeoRestrictedError(msg, countries=countries)
def raise_no_formats(self, msg, expected=False, video_id=None):
if expected and (
self.get_param('ignore_no_formats_error') or self.get_param('wait_for_video')):
self.report_warning(msg, video_id)
elif isinstance(msg, ExtractorError):
raise msg
else:
raise ExtractorError(msg, expected=expected, video_id=video_id)
# Methods for following #608
@staticmethod
def url_result(url, ie=None, video_id=None, video_title=None, *, url_transparent=False, **kwargs):
"""Returns a URL that points to a page that should be processed"""
if ie is not None:
kwargs['ie_key'] = ie if isinstance(ie, str) else ie.ie_key()
if video_id is not None:
kwargs['id'] = video_id
if video_title is not None:
kwargs['title'] = video_title
return {
**kwargs,
'_type': 'url_transparent' if url_transparent else 'url',
'url': url,
}
def playlist_from_matches(self, matches, playlist_id=None, playlist_title=None, getter=None, ie=None, **kwargs):
urls = (self.url_result(self._proto_relative_url(m), ie)
for m in orderedSet(map(getter, matches) if getter else matches))
return self.playlist_result(urls, playlist_id, playlist_title, **kwargs)
@staticmethod
def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None, *, multi_video=False, **kwargs):
"""Returns a playlist"""
if playlist_id:
kwargs['id'] = playlist_id
if playlist_title:
kwargs['title'] = playlist_title
if playlist_description is not None:
kwargs['description'] = playlist_description
return {
**kwargs,
'_type': 'multi_video' if multi_video else 'playlist',
'entries': entries,
}
def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Perform a regex search on the given string, using a single or a list of
patterns returning the first matching group.
In case of failure return a default value or raise a WARNING or a
RegexNotFoundError, depending on fatal, specifying the field name.
"""
if isinstance(pattern, (str, compat_str, compiled_regex_type)):
mobj = re.search(pattern, string, flags)
else:
for p in pattern:
mobj = re.search(p, string, flags)
if mobj:
break
_name = self._downloader._format_err(name, self._downloader.Styles.EMPHASIS)
if mobj:
if group is None:
# return the first matching group
return next(g for g in mobj.groups() if g is not None)
elif isinstance(group, (list, tuple)):
return tuple(mobj.group(g) for g in group)
else:
return mobj.group(group)
elif default is not NO_DEFAULT:
return default
elif fatal:
raise RegexNotFoundError('Unable to extract %s' % _name)
else:
self.report_warning('unable to extract %s' % _name + bug_reports_message())
return None
def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Like _search_regex, but strips HTML tags and unescapes entities.
"""
res = self._search_regex(pattern, string, name, default, fatal, flags, group)
if res:
return clean_html(res).strip()
else:
return res
def _get_netrc_login_info(self, netrc_machine=None):
username = None
password = None
netrc_machine = netrc_machine or self._NETRC_MACHINE
if self.get_param('usenetrc', False):
try:
netrc_file = compat_expanduser(self.get_param('netrc_location') or '~')
if os.path.isdir(netrc_file):
netrc_file = os.path.join(netrc_file, '.netrc')
info = netrc.netrc(file=netrc_file).authenticators(netrc_machine)
if info is not None:
username = info[0]
password = info[2]
else:
raise netrc.NetrcParseError(
'No authenticators for %s' % netrc_machine)
except (IOError, netrc.NetrcParseError) as err:
self.report_warning(
'parsing .netrc: %s' % error_to_compat_str(err))
return username, password
def _get_login_info(self, username_option='username', password_option='password', netrc_machine=None):
"""
Get the login info as (username, password)
First look for the manually specified credentials using username_option
and password_option as keys in params dictionary. If no such credentials
available look in the netrc file using the netrc_machine or _NETRC_MACHINE
value.
If there's no info available, return (None, None)
"""
# Attempt to use provided username and password or .netrc data
username = self.get_param(username_option)
if username is not None:
password = self.get_param(password_option)
else:
username, password = self._get_netrc_login_info(netrc_machine)
return username, password
def _get_tfa_info(self, note='two-factor verification code'):
"""
Get the two-factor authentication info
TODO - asking the user will be required for sms/phone verify
currently just uses the command line option
If there's no info available, return None
"""
tfa = self.get_param('twofactor')
if tfa is not None:
return tfa
return compat_getpass('Type %s and press [Return]: ' % note)
# Helper functions for extracting OpenGraph info
@staticmethod
def _og_regexes(prop):
content_re = r'content=(?:"([^"]+?)"|\'([^\']+?)\'|\s*([^\s"\'=<>`]+?))'
property_re = (r'(?:name|property)=(?:\'og[:-]%(prop)s\'|"og[:-]%(prop)s"|\s*og[:-]%(prop)s\b)'
% {'prop': re.escape(prop)})
template = r'<meta[^>]+?%s[^>]+?%s'
return [
template % (property_re, content_re),
template % (content_re, property_re),
]
@staticmethod
def _meta_regex(prop):
return r'''(?isx)<meta
(?=[^>]+(?:itemprop|name|property|id|http-equiv)=(["\']?)%s\1)
[^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(prop)
def _og_search_property(self, prop, html, name=None, **kargs):
prop = variadic(prop)
if name is None:
name = 'OpenGraph %s' % prop[0]
og_regexes = []
for p in prop:
og_regexes.extend(self._og_regexes(p))
escaped = self._search_regex(og_regexes, html, name, flags=re.DOTALL, **kargs)
if escaped is None:
return None
return unescapeHTML(escaped)
def _og_search_thumbnail(self, html, **kargs):
return self._og_search_property('image', html, 'thumbnail URL', fatal=False, **kargs)
def _og_search_description(self, html, **kargs):
return self._og_search_property('description', html, fatal=False, **kargs)
def _og_search_title(self, html, **kargs):
kargs.setdefault('fatal', False)
return self._og_search_property('title', html, **kargs)
def _og_search_video_url(self, html, name='video url', secure=True, **kargs):
regexes = self._og_regexes('video') + self._og_regexes('video:url')
if secure:
regexes = self._og_regexes('video:secure_url') + regexes
return self._html_search_regex(regexes, html, name, **kargs)
def _og_search_url(self, html, **kargs):
return self._og_search_property('url', html, **kargs)
def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs):
name = variadic(name)
if display_name is None:
display_name = name[0]
return self._html_search_regex(
[self._meta_regex(n) for n in name],
html, display_name, fatal=fatal, group='content', **kwargs)
def _dc_search_uploader(self, html):
return self._html_search_meta('dc.creator', html, 'uploader')
def _rta_search(self, html):
# See http://www.rtalabel.org/index.php?content=howtofaq#single
if re.search(r'(?ix)<meta\s+name="rating"\s+'
r' content="RTA-5042-1996-1400-1577-RTA"',
html):
return 18
return 0
def _media_rating_search(self, html):
# See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/
rating = self._html_search_meta('rating', html)
if not rating:
return None
RATING_TABLE = {
'safe for kids': 0,
'general': 8,
'14 years': 14,
'mature': 17,
'restricted': 19,
}
return RATING_TABLE.get(rating.lower())
def _family_friendly_search(self, html):
# See http://schema.org/VideoObject
family_friendly = self._html_search_meta(
'isFamilyFriendly', html, default=None)
if not family_friendly:
return None
RATING_TABLE = {
'1': 0,
'true': 0,
'0': 18,
'false': 18,
}
return RATING_TABLE.get(family_friendly.lower())
def _twitter_search_player(self, html):
return self._html_search_meta('twitter:player', html,
'twitter card player')
def _search_json_ld(self, html, video_id, expected_type=None, **kwargs):
json_ld_list = list(re.finditer(JSON_LD_RE, html))
default = kwargs.get('default', NO_DEFAULT)
# JSON-LD may be malformed and thus `fatal` should be respected.
# At the same time `default` may be passed that assumes `fatal=False`
# for _search_regex. Let's simulate the same behavior here as well.
fatal = kwargs.get('fatal', True) if default is NO_DEFAULT else False
json_ld = []
for mobj in json_ld_list:
json_ld_item = self._parse_json(
mobj.group('json_ld'), video_id, fatal=fatal)
if not json_ld_item:
continue
if isinstance(json_ld_item, dict):
json_ld.append(json_ld_item)
elif isinstance(json_ld_item, (list, tuple)):
json_ld.extend(json_ld_item)
if json_ld:
json_ld = self._json_ld(json_ld, video_id, fatal=fatal, expected_type=expected_type)
if json_ld:
return json_ld
if default is not NO_DEFAULT:
return default
elif fatal:
raise RegexNotFoundError('Unable to extract JSON-LD')
else:
self.report_warning('unable to extract JSON-LD %s' % bug_reports_message())
return {}
def _json_ld(self, json_ld, video_id, fatal=True, expected_type=None):
if isinstance(json_ld, compat_str):
json_ld = self._parse_json(json_ld, video_id, fatal=fatal)
if not json_ld:
return {}
info = {}
if not isinstance(json_ld, (list, tuple, dict)):
return info
if isinstance(json_ld, dict):
json_ld = [json_ld]
INTERACTION_TYPE_MAP = {
'CommentAction': 'comment',
'AgreeAction': 'like',
'DisagreeAction': 'dislike',
'LikeAction': 'like',
'DislikeAction': 'dislike',
'ListenAction': 'view',
'WatchAction': 'view',
'ViewAction': 'view',
}
def extract_interaction_type(e):
interaction_type = e.get('interactionType')
if isinstance(interaction_type, dict):
interaction_type = interaction_type.get('@type')
return str_or_none(interaction_type)
def extract_interaction_statistic(e):
interaction_statistic = e.get('interactionStatistic')
if isinstance(interaction_statistic, dict):
interaction_statistic = [interaction_statistic]
if not isinstance(interaction_statistic, list):
return
for is_e in interaction_statistic:
if not isinstance(is_e, dict):
continue
if is_e.get('@type') != 'InteractionCounter':
continue
interaction_type = extract_interaction_type(is_e)
if not interaction_type:
continue
# For interaction count some sites provide string instead of
# an integer (as per spec) with non digit characters (e.g. ",")
# so extracting count with more relaxed str_to_int
interaction_count = str_to_int(is_e.get('userInteractionCount'))
if interaction_count is None:
continue
count_kind = INTERACTION_TYPE_MAP.get(interaction_type.split('/')[-1])
if not count_kind:
continue
count_key = '%s_count' % count_kind
if info.get(count_key) is not None:
continue
info[count_key] = interaction_count
def extract_chapter_information(e):
chapters = [{
'title': part.get('name'),
'start_time': part.get('startOffset'),
'end_time': part.get('endOffset'),
} for part in variadic(e.get('hasPart') or []) if part.get('@type') == 'Clip']
for idx, (last_c, current_c, next_c) in enumerate(zip(
[{'end_time': 0}] + chapters, chapters, chapters[1:])):
current_c['end_time'] = current_c['end_time'] or next_c['start_time']
current_c['start_time'] = current_c['start_time'] or last_c['end_time']
if None in current_c.values():
self.report_warning(f'Chapter {idx} contains broken data. Not extracting chapters')
return
if chapters:
chapters[-1]['end_time'] = chapters[-1]['end_time'] or info['duration']
info['chapters'] = chapters
def extract_video_object(e):
assert e['@type'] == 'VideoObject'
author = e.get('author')
info.update({
'url': url_or_none(e.get('contentUrl')),
'title': unescapeHTML(e.get('name')),
'description': unescapeHTML(e.get('description')),
'thumbnails': [{'url': url_or_none(url)}
for url in variadic(traverse_obj(e, 'thumbnailUrl', 'thumbnailURL'))],
'duration': parse_duration(e.get('duration')),
'timestamp': unified_timestamp(e.get('uploadDate')),
# author can be an instance of 'Organization' or 'Person' types.
# both types can have 'name' property(inherited from 'Thing' type). [1]
# however some websites are using 'Text' type instead.
# 1. https://schema.org/VideoObject
'uploader': author.get('name') if isinstance(author, dict) else author if isinstance(author, compat_str) else None,
'filesize': float_or_none(e.get('contentSize')),
'tbr': int_or_none(e.get('bitrate')),
'width': int_or_none(e.get('width')),
'height': int_or_none(e.get('height')),
'view_count': int_or_none(e.get('interactionCount')),
})
extract_interaction_statistic(e)
extract_chapter_information(e)
def traverse_json_ld(json_ld, at_top_level=True):
for e in json_ld:
if at_top_level and '@context' not in e:
continue
if at_top_level and set(e.keys()) == {'@context', '@graph'}:
traverse_json_ld(variadic(e['@graph'], allowed_types=(dict,)), at_top_level=False)
break
item_type = e.get('@type')
if expected_type is not None and expected_type != item_type:
continue
rating = traverse_obj(e, ('aggregateRating', 'ratingValue'), expected_type=float_or_none)
if rating is not None:
info['average_rating'] = rating
if item_type in ('TVEpisode', 'Episode'):
episode_name = unescapeHTML(e.get('name'))
info.update({
'episode': episode_name,
'episode_number': int_or_none(e.get('episodeNumber')),
'description': unescapeHTML(e.get('description')),
})
if not info.get('title') and episode_name:
info['title'] = episode_name
part_of_season = e.get('partOfSeason')
if isinstance(part_of_season, dict) and part_of_season.get('@type') in ('TVSeason', 'Season', 'CreativeWorkSeason'):
info.update({
'season': unescapeHTML(part_of_season.get('name')),
'season_number': int_or_none(part_of_season.get('seasonNumber')),
})
part_of_series = e.get('partOfSeries') or e.get('partOfTVSeries')
if isinstance(part_of_series, dict) and part_of_series.get('@type') in ('TVSeries', 'Series', 'CreativeWorkSeries'):
info['series'] = unescapeHTML(part_of_series.get('name'))
elif item_type == 'Movie':
info.update({
'title': unescapeHTML(e.get('name')),
'description': unescapeHTML(e.get('description')),
'duration': parse_duration(e.get('duration')),
'timestamp': unified_timestamp(e.get('dateCreated')),
})
elif item_type in ('Article', 'NewsArticle'):
info.update({
'timestamp': parse_iso8601(e.get('datePublished')),
'title': unescapeHTML(e.get('headline')),
'description': unescapeHTML(e.get('articleBody') or e.get('description')),
})
if traverse_obj(e, ('video', 0, '@type')) == 'VideoObject':
extract_video_object(e['video'][0])
elif item_type == 'VideoObject':
extract_video_object(e)
if expected_type is None:
continue
else:
break
video = e.get('video')
if isinstance(video, dict) and video.get('@type') == 'VideoObject':
extract_video_object(video)
if expected_type is None:
continue
else:
break
traverse_json_ld(json_ld)
return dict((k, v) for k, v in info.items() if v is not None)
def _search_nextjs_data(self, webpage, video_id, *, transform_source=None, fatal=True, **kw):
return self._parse_json(
self._search_regex(
r'(?s)<script[^>]+id=[\'"]__NEXT_DATA__[\'"][^>]*>([^<]+)</script>',
webpage, 'next.js data', fatal=fatal, **kw),
video_id, transform_source=transform_source, fatal=fatal)
def _search_nuxt_data(self, webpage, video_id, context_name='__NUXT__'):
''' Parses Nuxt.js metadata. This works as long as the function __NUXT__ invokes is a pure function. '''
# not all website do this, but it can be changed
# https://stackoverflow.com/questions/67463109/how-to-change-or-hide-nuxt-and-nuxt-keyword-in-page-source
rectx = re.escape(context_name)
js, arg_keys, arg_vals = self._search_regex(
(r'<script>window\.%s=\(function\((?P<arg_keys>.*?)\)\{return\s(?P<js>\{.*?\})\}\((?P<arg_vals>.+?)\)\);?</script>' % rectx,
r'%s\(.*?\(function\((?P<arg_keys>.*?)\)\{return\s(?P<js>\{.*?\})\}\((?P<arg_vals>.*?)\)' % rectx),
webpage, context_name, group=['js', 'arg_keys', 'arg_vals'])
args = dict(zip(arg_keys.split(','), arg_vals.split(',')))
for key, val in args.items():
if val in ('undefined', 'void 0'):
args[key] = 'null'
return self._parse_json(js_to_json(js, args), video_id)['data'][0]
@staticmethod
def _hidden_inputs(html):
html = re.sub(r'<!--(?:(?!<!--).)*-->', '', html)
hidden_inputs = {}
for input in re.findall(r'(?i)(<input[^>]+>)', html):
attrs = extract_attributes(input)
if not input:
continue
if attrs.get('type') not in ('hidden', 'submit'):
continue
name = attrs.get('name') or attrs.get('id')
value = attrs.get('value')
if name and value is not None:
hidden_inputs[name] = value
return hidden_inputs
def _form_hidden_inputs(self, form_id, html):
form = self._search_regex(
r'(?is)<form[^>]+?id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>' % form_id,
html, '%s form' % form_id, group='form')
return self._hidden_inputs(form)
class FormatSort:
regex = r' *((?P<reverse>\+)?(?P<field>[a-zA-Z0-9_]+)((?P<separator>[~:])(?P<limit>.*?))?)? *$'
default = ('hidden', 'aud_or_vid', 'hasvid', 'ie_pref', 'lang', 'quality',
'res', 'fps', 'hdr:12', 'codec:vp9.2', 'size', 'br', 'asr',
'proto', 'ext', 'hasaud', 'source', 'id') # These must not be aliases
ytdl_default = ('hasaud', 'lang', 'quality', 'tbr', 'filesize', 'vbr',
'height', 'width', 'proto', 'vext', 'abr', 'aext',
'fps', 'fs_approx', 'source', 'id')
settings = {
'vcodec': {'type': 'ordered', 'regex': True,
'order': ['av0?1', 'vp0?9.2', 'vp0?9', '[hx]265|he?vc?', '[hx]264|avc', 'vp0?8', 'mp4v|h263', 'theora', '', None, 'none']},
'acodec': {'type': 'ordered', 'regex': True,
'order': ['[af]lac', 'wav|aiff', 'opus', 'vorbis', 'aac', 'mp?4a?', 'mp3', 'e-?a?c-?3', 'ac-?3', 'dts', '', None, 'none']},
'hdr': {'type': 'ordered', 'regex': True, 'field': 'dynamic_range',
'order': ['dv', '(hdr)?12', r'(hdr)?10\+', '(hdr)?10', 'hlg', '', 'sdr', None]},
'proto': {'type': 'ordered', 'regex': True, 'field': 'protocol',
'order': ['(ht|f)tps', '(ht|f)tp$', 'm3u8.*', '.*dash', 'websocket_frag', 'rtmpe?', '', 'mms|rtsp', 'ws|websocket', 'f4']},
'vext': {'type': 'ordered', 'field': 'video_ext',
'order': ('mp4', 'webm', 'flv', '', 'none'),
'order_free': ('webm', 'mp4', 'flv', '', 'none')},
'aext': {'type': 'ordered', 'field': 'audio_ext',
'order': ('m4a', 'aac', 'mp3', 'ogg', 'opus', 'webm', '', 'none'),
'order_free': ('opus', 'ogg', 'webm', 'm4a', 'mp3', 'aac', '', 'none')},
'hidden': {'visible': False, 'forced': True, 'type': 'extractor', 'max': -1000},
'aud_or_vid': {'visible': False, 'forced': True, 'type': 'multiple',
'field': ('vcodec', 'acodec'),
'function': lambda it: int(any(v != 'none' for v in it))},
'ie_pref': {'priority': True, 'type': 'extractor'},
'hasvid': {'priority': True, 'field': 'vcodec', 'type': 'boolean', 'not_in_list': ('none',)},
'hasaud': {'field': 'acodec', 'type': 'boolean', 'not_in_list': ('none',)},
'lang': {'convert': 'float', 'field': 'language_preference', 'default': -1},
'quality': {'convert': 'float', 'default': -1},
'filesize': {'convert': 'bytes'},
'fs_approx': {'convert': 'bytes', 'field': 'filesize_approx'},
'id': {'convert': 'string', 'field': 'format_id'},
'height': {'convert': 'float_none'},
'width': {'convert': 'float_none'},
'fps': {'convert': 'float_none'},
'tbr': {'convert': 'float_none'},
'vbr': {'convert': 'float_none'},
'abr': {'convert': 'float_none'},
'asr': {'convert': 'float_none'},
'source': {'convert': 'float', 'field': 'source_preference', 'default': -1},
'codec': {'type': 'combined', 'field': ('vcodec', 'acodec')},
'br': {'type': 'combined', 'field': ('tbr', 'vbr', 'abr'), 'same_limit': True},
'size': {'type': 'combined', 'same_limit': True, 'field': ('filesize', 'fs_approx')},
'ext': {'type': 'combined', 'field': ('vext', 'aext')},
'res': {'type': 'multiple', 'field': ('height', 'width'),
'function': lambda it: (lambda l: min(l) if l else 0)(tuple(filter(None, it)))},
# For compatibility with youtube-dl
'format_id': {'type': 'alias', 'field': 'id'},
'preference': {'type': 'alias', 'field': 'ie_pref'},
'language_preference': {'type': 'alias', 'field': 'lang'},
# Deprecated
'dimension': {'type': 'alias', 'field': 'res'},
'resolution': {'type': 'alias', 'field': 'res'},
'extension': {'type': 'alias', 'field': 'ext'},
'bitrate': {'type': 'alias', 'field': 'br'},
'total_bitrate': {'type': 'alias', 'field': 'tbr'},
'video_bitrate': {'type': 'alias', 'field': 'vbr'},
'audio_bitrate': {'type': 'alias', 'field': 'abr'},
'framerate': {'type': 'alias', 'field': 'fps'},
'protocol': {'type': 'alias', 'field': 'proto'},
'source_preference': {'type': 'alias', 'field': 'source'},
'filesize_approx': {'type': 'alias', 'field': 'fs_approx'},
'filesize_estimate': {'type': 'alias', 'field': 'size'},
'samplerate': {'type': 'alias', 'field': 'asr'},
'video_ext': {'type': 'alias', 'field': 'vext'},
'audio_ext': {'type': 'alias', 'field': 'aext'},
'video_codec': {'type': 'alias', 'field': 'vcodec'},
'audio_codec': {'type': 'alias', 'field': 'acodec'},
'video': {'type': 'alias', 'field': 'hasvid'},
'has_video': {'type': 'alias', 'field': 'hasvid'},
'audio': {'type': 'alias', 'field': 'hasaud'},
'has_audio': {'type': 'alias', 'field': 'hasaud'},
'extractor': {'type': 'alias', 'field': 'ie_pref'},
'extractor_preference': {'type': 'alias', 'field': 'ie_pref'},
}
def __init__(self, ie, field_preference):
self._order = []
self.ydl = ie._downloader
self.evaluate_params(self.ydl.params, field_preference)
if ie.get_param('verbose'):
self.print_verbose_info(self.ydl.write_debug)
def _get_field_setting(self, field, key):
if field not in self.settings:
if key in ('forced', 'priority'):
return False
self.ydl.deprecation_warning(
f'Using arbitrary fields ({field}) for format sorting is deprecated '
'and may be removed in a future version')
self.settings[field] = {}
propObj = self.settings[field]
if key not in propObj:
type = propObj.get('type')
if key == 'field':
default = 'preference' if type == 'extractor' else (field,) if type in ('combined', 'multiple') else field
elif key == 'convert':
default = 'order' if type == 'ordered' else 'float_string' if field else 'ignore'
else:
default = {'type': 'field', 'visible': True, 'order': [], 'not_in_list': (None,)}.get(key, None)
propObj[key] = default
return propObj[key]
def _resolve_field_value(self, field, value, convertNone=False):
if value is None:
if not convertNone:
return None
else:
value = value.lower()
conversion = self._get_field_setting(field, 'convert')
if conversion == 'ignore':
return None
if conversion == 'string':
return value
elif conversion == 'float_none':
return float_or_none(value)
elif conversion == 'bytes':
return FileDownloader.parse_bytes(value)
elif conversion == 'order':
order_list = (self._use_free_order and self._get_field_setting(field, 'order_free')) or self._get_field_setting(field, 'order')
use_regex = self._get_field_setting(field, 'regex')
list_length = len(order_list)
empty_pos = order_list.index('') if '' in order_list else list_length + 1
if use_regex and value is not None:
for i, regex in enumerate(order_list):
if regex and re.match(regex, value):
return list_length - i
return list_length - empty_pos # not in list
else: # not regex or value = None
return list_length - (order_list.index(value) if value in order_list else empty_pos)
else:
if value.isnumeric():
return float(value)
else:
self.settings[field]['convert'] = 'string'
return value
def evaluate_params(self, params, sort_extractor):
self._use_free_order = params.get('prefer_free_formats', False)
self._sort_user = params.get('format_sort', [])
self._sort_extractor = sort_extractor
def add_item(field, reverse, closest, limit_text):
field = field.lower()
if field in self._order:
return
self._order.append(field)
limit = self._resolve_field_value(field, limit_text)
data = {
'reverse': reverse,
'closest': False if limit is None else closest,
'limit_text': limit_text,
'limit': limit}
if field in self.settings:
self.settings[field].update(data)
else:
self.settings[field] = data
sort_list = (
tuple(field for field in self.default if self._get_field_setting(field, 'forced'))
+ (tuple() if params.get('format_sort_force', False)
else tuple(field for field in self.default if self._get_field_setting(field, 'priority')))
+ tuple(self._sort_user) + tuple(sort_extractor) + self.default)
for item in sort_list:
match = re.match(self.regex, item)
if match is None:
raise ExtractorError('Invalid format sort string "%s" given by extractor' % item)
field = match.group('field')
if field is None:
continue
if self._get_field_setting(field, 'type') == 'alias':
alias, field = field, self._get_field_setting(field, 'field')
if alias not in ('format_id', 'preference', 'language_preference'):
self.ydl.deprecation_warning(
f'Format sorting alias {alias} is deprecated '
f'and may be removed in a future version. Please use {field} instead')
reverse = match.group('reverse') is not None
closest = match.group('separator') == '~'
limit_text = match.group('limit')
has_limit = limit_text is not None
has_multiple_fields = self._get_field_setting(field, 'type') == 'combined'
has_multiple_limits = has_limit and has_multiple_fields and not self._get_field_setting(field, 'same_limit')
fields = self._get_field_setting(field, 'field') if has_multiple_fields else (field,)
limits = limit_text.split(':') if has_multiple_limits else (limit_text,) if has_limit else tuple()
limit_count = len(limits)
for (i, f) in enumerate(fields):
add_item(f, reverse, closest,
limits[i] if i < limit_count
else limits[0] if has_limit and not has_multiple_limits
else None)
def print_verbose_info(self, write_debug):
if self._sort_user:
write_debug('Sort order given by user: %s' % ', '.join(self._sort_user))
if self._sort_extractor:
write_debug('Sort order given by extractor: %s' % ', '.join(self._sort_extractor))
write_debug('Formats sorted by: %s' % ', '.join(['%s%s%s' % (
'+' if self._get_field_setting(field, 'reverse') else '', field,
'%s%s(%s)' % ('~' if self._get_field_setting(field, 'closest') else ':',
self._get_field_setting(field, 'limit_text'),
self._get_field_setting(field, 'limit'))
if self._get_field_setting(field, 'limit_text') is not None else '')
for field in self._order if self._get_field_setting(field, 'visible')]))
def _calculate_field_preference_from_value(self, format, field, type, value):
reverse = self._get_field_setting(field, 'reverse')
closest = self._get_field_setting(field, 'closest')
limit = self._get_field_setting(field, 'limit')
if type == 'extractor':
maximum = self._get_field_setting(field, 'max')
if value is None or (maximum is not None and value >= maximum):
value = -1
elif type == 'boolean':
in_list = self._get_field_setting(field, 'in_list')
not_in_list = self._get_field_setting(field, 'not_in_list')
value = 0 if ((in_list is None or value in in_list) and (not_in_list is None or value not in not_in_list)) else -1
elif type == 'ordered':
value = self._resolve_field_value(field, value, True)
# try to convert to number
val_num = float_or_none(value, default=self._get_field_setting(field, 'default'))
is_num = self._get_field_setting(field, 'convert') != 'string' and val_num is not None
if is_num:
value = val_num
return ((-10, 0) if value is None
else (1, value, 0) if not is_num # if a field has mixed strings and numbers, strings are sorted higher
else (0, -abs(value - limit), value - limit if reverse else limit - value) if closest
else (0, value, 0) if not reverse and (limit is None or value <= limit)
else (0, -value, 0) if limit is None or (reverse and value == limit) or value > limit
else (-1, value, 0))
def _calculate_field_preference(self, format, field):
type = self._get_field_setting(field, 'type') # extractor, boolean, ordered, field, multiple
get_value = lambda f: format.get(self._get_field_setting(f, 'field'))
if type == 'multiple':
type = 'field' # Only 'field' is allowed in multiple for now
actual_fields = self._get_field_setting(field, 'field')
value = self._get_field_setting(field, 'function')(get_value(f) for f in actual_fields)
else:
value = get_value(field)
return self._calculate_field_preference_from_value(format, field, type, value)
def calculate_preference(self, format):
# Determine missing protocol
if not format.get('protocol'):
format['protocol'] = determine_protocol(format)
# Determine missing ext
if not format.get('ext') and 'url' in format:
format['ext'] = determine_ext(format['url'])
if format.get('vcodec') == 'none':
format['audio_ext'] = format['ext'] if format.get('acodec') != 'none' else 'none'
format['video_ext'] = 'none'
else:
format['video_ext'] = format['ext']
format['audio_ext'] = 'none'
# if format.get('preference') is None and format.get('ext') in ('f4f', 'f4m'): # Not supported?
# format['preference'] = -1000
# Determine missing bitrates
if format.get('tbr') is None:
if format.get('vbr') is not None and format.get('abr') is not None:
format['tbr'] = format.get('vbr', 0) + format.get('abr', 0)
else:
if format.get('vcodec') != 'none' and format.get('vbr') is None:
format['vbr'] = format.get('tbr') - format.get('abr', 0)
if format.get('acodec') != 'none' and format.get('abr') is None:
format['abr'] = format.get('tbr') - format.get('vbr', 0)
return tuple(self._calculate_field_preference(format, field) for field in self._order)
def _sort_formats(self, formats, field_preference=[]):
if not formats:
return
format_sort = self.FormatSort(self, field_preference)
formats.sort(key=lambda f: format_sort.calculate_preference(f))
def _check_formats(self, formats, video_id):
if formats:
formats[:] = filter(
lambda f: self._is_valid_url(
f['url'], video_id,
item='%s video format' % f.get('format_id') if f.get('format_id') else 'video'),
formats)
@staticmethod
def _remove_duplicate_formats(formats):
format_urls = set()
unique_formats = []
for f in formats:
if f['url'] not in format_urls:
format_urls.add(f['url'])
unique_formats.append(f)
formats[:] = unique_formats
def _is_valid_url(self, url, video_id, item='video', headers={}):
url = self._proto_relative_url(url, scheme='http:')
# For now assume non HTTP(S) URLs always valid
if not (url.startswith('http://') or url.startswith('https://')):
return True
try:
self._request_webpage(url, video_id, 'Checking %s URL' % item, headers=headers)
return True
except ExtractorError as e:
self.to_screen(
'%s: %s URL is invalid, skipping: %s'
% (video_id, item, error_to_compat_str(e.cause)))
return False
def http_scheme(self):
""" Either "http:" or "https:", depending on the user's preferences """
return (
'http:'
if self.get_param('prefer_insecure', False)
else 'https:')
def _proto_relative_url(self, url, scheme=None):
if url is None:
return url
if url.startswith('//'):
if scheme is None:
scheme = self.http_scheme()
return scheme + url
else:
return url
def _sleep(self, timeout, video_id, msg_template=None):
if msg_template is None:
msg_template = '%(video_id)s: Waiting for %(timeout)s seconds'
msg = msg_template % {'video_id': video_id, 'timeout': timeout}
self.to_screen(msg)
time.sleep(timeout)
def _extract_f4m_formats(self, manifest_url, video_id, preference=None, quality=None, f4m_id=None,
transform_source=lambda s: fix_xml_ampersands(s).strip(),
fatal=True, m3u8_id=None, data=None, headers={}, query={}):
manifest = self._download_xml(
manifest_url, video_id, 'Downloading f4m manifest',
'Unable to download f4m manifest',
# Some manifests may be malformed, e.g. prosiebensat1 generated manifests
# (see https://github.com/ytdl-org/youtube-dl/issues/6215#issuecomment-121704244)
transform_source=transform_source,
fatal=fatal, data=data, headers=headers, query=query)
if manifest is False:
return []
return self._parse_f4m_formats(
manifest, manifest_url, video_id, preference=preference, quality=quality, f4m_id=f4m_id,
transform_source=transform_source, fatal=fatal, m3u8_id=m3u8_id)
def _parse_f4m_formats(self, manifest, manifest_url, video_id, preference=None, quality=None, f4m_id=None,
transform_source=lambda s: fix_xml_ampersands(s).strip(),
fatal=True, m3u8_id=None):
if not isinstance(manifest, compat_etree_Element) and not fatal:
return []
# currently yt-dlp cannot decode the playerVerificationChallenge as Akamai uses Adobe Alchemy
akamai_pv = manifest.find('{http://ns.adobe.com/f4m/1.0}pv-2.0')
if akamai_pv is not None and ';' in akamai_pv.text:
playerVerificationChallenge = akamai_pv.text.split(';')[0]
if playerVerificationChallenge.strip() != '':
return []
formats = []
manifest_version = '1.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media')
if not media_nodes:
manifest_version = '2.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media')
# Remove unsupported DRM protected media from final formats
# rendition (see https://github.com/ytdl-org/youtube-dl/issues/8573).
media_nodes = remove_encrypted_media(media_nodes)
if not media_nodes:
return formats
manifest_base_url = get_base_url(manifest)
bootstrap_info = xpath_element(
manifest, ['{http://ns.adobe.com/f4m/1.0}bootstrapInfo', '{http://ns.adobe.com/f4m/2.0}bootstrapInfo'],
'bootstrap info', default=None)
vcodec = None
mime_type = xpath_text(
manifest, ['{http://ns.adobe.com/f4m/1.0}mimeType', '{http://ns.adobe.com/f4m/2.0}mimeType'],
'base URL', default=None)
if mime_type and mime_type.startswith('audio/'):
vcodec = 'none'
for i, media_el in enumerate(media_nodes):
tbr = int_or_none(media_el.attrib.get('bitrate'))
width = int_or_none(media_el.attrib.get('width'))
height = int_or_none(media_el.attrib.get('height'))
format_id = join_nonempty(f4m_id, tbr or i)
# If <bootstrapInfo> is present, the specified f4m is a
# stream-level manifest, and only set-level manifests may refer to
# external resources. See section 11.4 and section 4 of F4M spec
if bootstrap_info is None:
media_url = None
# @href is introduced in 2.0, see section 11.6 of F4M spec
if manifest_version == '2.0':
media_url = media_el.attrib.get('href')
if media_url is None:
media_url = media_el.attrib.get('url')
if not media_url:
continue
manifest_url = (
media_url if media_url.startswith('http://') or media_url.startswith('https://')
else ((manifest_base_url or '/'.join(manifest_url.split('/')[:-1])) + '/' + media_url))
# If media_url is itself a f4m manifest do the recursive extraction
# since bitrates in parent manifest (this one) and media_url manifest
# may differ leading to inability to resolve the format by requested
# bitrate in f4m downloader
ext = determine_ext(manifest_url)
if ext == 'f4m':
f4m_formats = self._extract_f4m_formats(
manifest_url, video_id, preference=preference, quality=quality, f4m_id=f4m_id,
transform_source=transform_source, fatal=fatal)
# Sometimes stream-level manifest contains single media entry that
# does not contain any quality metadata (e.g. http://matchtv.ru/#live-player).
# At the same time parent's media entry in set-level manifest may
# contain it. We will copy it from parent in such cases.
if len(f4m_formats) == 1:
f = f4m_formats[0]
f.update({
'tbr': f.get('tbr') or tbr,
'width': f.get('width') or width,
'height': f.get('height') or height,
'format_id': f.get('format_id') if not tbr else format_id,
'vcodec': vcodec,
})
formats.extend(f4m_formats)
continue
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
manifest_url, video_id, 'mp4', preference=preference,
quality=quality, m3u8_id=m3u8_id, fatal=fatal))
continue
formats.append({
'format_id': format_id,
'url': manifest_url,
'manifest_url': manifest_url,
'ext': 'flv' if bootstrap_info is not None else None,
'protocol': 'f4m',
'tbr': tbr,
'width': width,
'height': height,
'vcodec': vcodec,
'preference': preference,
'quality': quality,
})
return formats
def _m3u8_meta_format(self, m3u8_url, ext=None, preference=None, quality=None, m3u8_id=None):
return {
'format_id': join_nonempty(m3u8_id, 'meta'),
'url': m3u8_url,
'ext': ext,
'protocol': 'm3u8',
'preference': preference - 100 if preference else -100,
'quality': quality,
'resolution': 'multiple',
'format_note': 'Quality selection URL',
}
def _report_ignoring_subs(self, name):
self.report_warning(bug_reports_message(
f'Ignoring subtitle tracks found in the {name} manifest; '
'if any subtitle tracks are missing,'
), only_once=True)
def _extract_m3u8_formats(self, *args, **kwargs):
fmts, subs = self._extract_m3u8_formats_and_subtitles(*args, **kwargs)
if subs:
self._report_ignoring_subs('HLS')
return fmts
def _extract_m3u8_formats_and_subtitles(
self, m3u8_url, video_id, ext=None, entry_protocol='m3u8_native',
preference=None, quality=None, m3u8_id=None, note=None,
errnote=None, fatal=True, live=False, data=None, headers={},
query={}):
res = self._download_webpage_handle(
m3u8_url, video_id,
note='Downloading m3u8 information' if note is None else note,
errnote='Failed to download m3u8 information' if errnote is None else errnote,
fatal=fatal, data=data, headers=headers, query=query)
if res is False:
return [], {}
m3u8_doc, urlh = res
m3u8_url = urlh.geturl()
return self._parse_m3u8_formats_and_subtitles(
m3u8_doc, m3u8_url, ext=ext, entry_protocol=entry_protocol,
preference=preference, quality=quality, m3u8_id=m3u8_id,
note=note, errnote=errnote, fatal=fatal, live=live, data=data,
headers=headers, query=query, video_id=video_id)
def _parse_m3u8_formats_and_subtitles(
self, m3u8_doc, m3u8_url=None, ext=None, entry_protocol='m3u8_native',
preference=None, quality=None, m3u8_id=None, live=False, note=None,
errnote=None, fatal=True, data=None, headers={}, query={},
video_id=None):
formats, subtitles = [], {}
has_drm = re.search('|'.join([
r'#EXT-X-FAXS-CM:', # Adobe Flash Access
r'#EXT-X-(?:SESSION-)?KEY:.*?URI="skd://', # Apple FairPlay
]), m3u8_doc)
def format_url(url):
return url if re.match(r'^https?://', url) else compat_urlparse.urljoin(m3u8_url, url)
if self.get_param('hls_split_discontinuity', False):
def _extract_m3u8_playlist_indices(manifest_url=None, m3u8_doc=None):
if not m3u8_doc:
if not manifest_url:
return []
m3u8_doc = self._download_webpage(
manifest_url, video_id, fatal=fatal, data=data, headers=headers,
note=False, errnote='Failed to download m3u8 playlist information')
if m3u8_doc is False:
return []
return range(1 + sum(line.startswith('#EXT-X-DISCONTINUITY') for line in m3u8_doc.splitlines()))
else:
def _extract_m3u8_playlist_indices(*args, **kwargs):
return [None]
# References:
# 1. https://tools.ietf.org/html/draft-pantos-http-live-streaming-21
# 2. https://github.com/ytdl-org/youtube-dl/issues/12211
# 3. https://github.com/ytdl-org/youtube-dl/issues/18923
# We should try extracting formats only from master playlists [1, 4.3.4],
# i.e. playlists that describe available qualities. On the other hand
# media playlists [1, 4.3.3] should be returned as is since they contain
# just the media without qualities renditions.
# Fortunately, master playlist can be easily distinguished from media
# playlist based on particular tags availability. As of [1, 4.3.3, 4.3.4]
# master playlist tags MUST NOT appear in a media playlist and vice versa.
# As of [1, 4.3.3.1] #EXT-X-TARGETDURATION tag is REQUIRED for every
# media playlist and MUST NOT appear in master playlist thus we can
# clearly detect media playlist with this criterion.
if '#EXT-X-TARGETDURATION' in m3u8_doc: # media playlist, return as is
formats = [{
'format_id': join_nonempty(m3u8_id, idx),
'format_index': idx,
'url': m3u8_url or encode_data_uri(m3u8_doc.encode('utf-8'), 'application/x-mpegurl'),
'ext': ext,
'protocol': entry_protocol,
'preference': preference,
'quality': quality,
'has_drm': has_drm,
} for idx in _extract_m3u8_playlist_indices(m3u8_doc=m3u8_doc)]
return formats, subtitles
groups = {}
last_stream_inf = {}
def extract_media(x_media_line):
media = parse_m3u8_attributes(x_media_line)
# As per [1, 4.3.4.1] TYPE, GROUP-ID and NAME are REQUIRED
media_type, group_id, name = media.get('TYPE'), media.get('GROUP-ID'), media.get('NAME')
if not (media_type and group_id and name):
return
groups.setdefault(group_id, []).append(media)
# <https://tools.ietf.org/html/rfc8216#section-4.3.4.1>
if media_type == 'SUBTITLES':
# According to RFC 8216 §4.3.4.2.1, URI is REQUIRED in the
# EXT-X-MEDIA tag if the media type is SUBTITLES.
# However, lack of URI has been spotted in the wild.
# e.g. NebulaIE; see https://github.com/yt-dlp/yt-dlp/issues/339
if not media.get('URI'):
return
url = format_url(media['URI'])
sub_info = {
'url': url,
'ext': determine_ext(url),
}
if sub_info['ext'] == 'm3u8':
# Per RFC 8216 §3.1, the only possible subtitle format m3u8
# files may contain is WebVTT:
# <https://tools.ietf.org/html/rfc8216#section-3.1>
sub_info['ext'] = 'vtt'
sub_info['protocol'] = 'm3u8_native'
lang = media.get('LANGUAGE') or 'und'
subtitles.setdefault(lang, []).append(sub_info)
if media_type not in ('VIDEO', 'AUDIO'):
return
media_url = media.get('URI')
if media_url:
manifest_url = format_url(media_url)
formats.extend({
'format_id': join_nonempty(m3u8_id, group_id, name, idx),
'format_note': name,
'format_index': idx,
'url': manifest_url,
'manifest_url': m3u8_url,
'language': media.get('LANGUAGE'),
'ext': ext,
'protocol': entry_protocol,
'preference': preference,
'quality': quality,
'vcodec': 'none' if media_type == 'AUDIO' else None,
} for idx in _extract_m3u8_playlist_indices(manifest_url))
def build_stream_name():
# Despite specification does not mention NAME attribute for
# EXT-X-STREAM-INF tag it still sometimes may be present (see [1]
# or vidio test in TestInfoExtractor.test_parse_m3u8_formats)
# 1. http://www.vidio.com/watch/165683-dj_ambred-booyah-live-2015
stream_name = last_stream_inf.get('NAME')
if stream_name:
return stream_name
# If there is no NAME in EXT-X-STREAM-INF it will be obtained
# from corresponding rendition group
stream_group_id = last_stream_inf.get('VIDEO')
if not stream_group_id:
return
stream_group = groups.get(stream_group_id)
if not stream_group:
return stream_group_id
rendition = stream_group[0]
return rendition.get('NAME') or stream_group_id
# parse EXT-X-MEDIA tags before EXT-X-STREAM-INF in order to have the
# chance to detect video only formats when EXT-X-STREAM-INF tags
# precede EXT-X-MEDIA tags in HLS manifest such as [3].
for line in m3u8_doc.splitlines():
if line.startswith('#EXT-X-MEDIA:'):
extract_media(line)
for line in m3u8_doc.splitlines():
if line.startswith('#EXT-X-STREAM-INF:'):
last_stream_inf = parse_m3u8_attributes(line)
elif line.startswith('#') or not line.strip():
continue
else:
tbr = float_or_none(
last_stream_inf.get('AVERAGE-BANDWIDTH')
or last_stream_inf.get('BANDWIDTH'), scale=1000)
manifest_url = format_url(line.strip())
for idx in _extract_m3u8_playlist_indices(manifest_url):
format_id = [m3u8_id, None, idx]
# Bandwidth of live streams may differ over time thus making
# format_id unpredictable. So it's better to keep provided
# format_id intact.
if not live:
stream_name = build_stream_name()
format_id[1] = stream_name or '%d' % (tbr or len(formats))
f = {
'format_id': join_nonempty(*format_id),
'format_index': idx,
'url': manifest_url,
'manifest_url': m3u8_url,
'tbr': tbr,
'ext': ext,
'fps': float_or_none(last_stream_inf.get('FRAME-RATE')),
'protocol': entry_protocol,
'preference': preference,
'quality': quality,
}
resolution = last_stream_inf.get('RESOLUTION')
if resolution:
mobj = re.search(r'(?P<width>\d+)[xX](?P<height>\d+)', resolution)
if mobj:
f['width'] = int(mobj.group('width'))
f['height'] = int(mobj.group('height'))
# Unified Streaming Platform
mobj = re.search(
r'audio.*?(?:%3D|=)(\d+)(?:-video.*?(?:%3D|=)(\d+))?', f['url'])
if mobj:
abr, vbr = mobj.groups()
abr, vbr = float_or_none(abr, 1000), float_or_none(vbr, 1000)
f.update({
'vbr': vbr,
'abr': abr,
})
codecs = parse_codecs(last_stream_inf.get('CODECS'))
f.update(codecs)
audio_group_id = last_stream_inf.get('AUDIO')
# As per [1, 4.3.4.1.1] any EXT-X-STREAM-INF tag which
# references a rendition group MUST have a CODECS attribute.
# However, this is not always respected, for example, [2]
# contains EXT-X-STREAM-INF tag which references AUDIO
# rendition group but does not have CODECS and despite
# referencing an audio group it represents a complete
# (with audio and video) format. So, for such cases we will
# ignore references to rendition groups and treat them
# as complete formats.
if audio_group_id and codecs and f.get('vcodec') != 'none':
audio_group = groups.get(audio_group_id)
if audio_group and audio_group[0].get('URI'):
# TODO: update acodec for audio only formats with
# the same GROUP-ID
f['acodec'] = 'none'
if not f.get('ext'):
f['ext'] = 'm4a' if f.get('vcodec') == 'none' else 'mp4'
formats.append(f)
# for DailyMotion
progressive_uri = last_stream_inf.get('PROGRESSIVE-URI')
if progressive_uri:
http_f = f.copy()
del http_f['manifest_url']
http_f.update({
'format_id': f['format_id'].replace('hls-', 'http-'),
'protocol': 'http',
'url': progressive_uri,
})
formats.append(http_f)
last_stream_inf = {}
return formats, subtitles
def _extract_m3u8_vod_duration(
self, m3u8_vod_url, video_id, note=None, errnote=None, data=None, headers={}, query={}):
m3u8_vod = self._download_webpage(
m3u8_vod_url, video_id,
note='Downloading m3u8 VOD manifest' if note is None else note,
errnote='Failed to download VOD manifest' if errnote is None else errnote,
fatal=False, data=data, headers=headers, query=query)
return self._parse_m3u8_vod_duration(m3u8_vod or '', video_id)
def _parse_m3u8_vod_duration(self, m3u8_vod, video_id):
if '#EXT-X-PLAYLIST-TYPE:VOD' not in m3u8_vod:
return None
return int(sum(
float(line[len('#EXTINF:'):].split(',')[0])
for line in m3u8_vod.splitlines() if line.startswith('#EXTINF:'))) or None
@staticmethod
def _xpath_ns(path, namespace=None):
if not namespace:
return path
out = []
for c in path.split('/'):
if not c or c == '.':
out.append(c)
else:
out.append('{%s}%s' % (namespace, c))
return '/'.join(out)
def _extract_smil_formats_and_subtitles(self, smil_url, video_id, fatal=True, f4m_params=None, transform_source=None):
smil = self._download_smil(smil_url, video_id, fatal=fatal, transform_source=transform_source)
if smil is False:
assert not fatal
return [], {}
namespace = self._parse_smil_namespace(smil)
fmts = self._parse_smil_formats(
smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
subs = self._parse_smil_subtitles(
smil, namespace=namespace)
return fmts, subs
def _extract_smil_formats(self, *args, **kwargs):
fmts, subs = self._extract_smil_formats_and_subtitles(*args, **kwargs)
if subs:
self._report_ignoring_subs('SMIL')
return fmts
def _extract_smil_info(self, smil_url, video_id, fatal=True, f4m_params=None):
smil = self._download_smil(smil_url, video_id, fatal=fatal)
if smil is False:
return {}
return self._parse_smil(smil, smil_url, video_id, f4m_params=f4m_params)
def _download_smil(self, smil_url, video_id, fatal=True, transform_source=None):
return self._download_xml(
smil_url, video_id, 'Downloading SMIL file',
'Unable to download SMIL file', fatal=fatal, transform_source=transform_source)
def _parse_smil(self, smil, smil_url, video_id, f4m_params=None):
namespace = self._parse_smil_namespace(smil)
formats = self._parse_smil_formats(
smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
subtitles = self._parse_smil_subtitles(smil, namespace=namespace)
video_id = os.path.splitext(url_basename(smil_url))[0]
title = None
description = None
upload_date = None
for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
name = meta.attrib.get('name')
content = meta.attrib.get('content')
if not name or not content:
continue
if not title and name == 'title':
title = content
elif not description and name in ('description', 'abstract'):
description = content
elif not upload_date and name == 'date':
upload_date = unified_strdate(content)
thumbnails = [{
'id': image.get('type'),
'url': image.get('src'),
'width': int_or_none(image.get('width')),
'height': int_or_none(image.get('height')),
} for image in smil.findall(self._xpath_ns('.//image', namespace)) if image.get('src')]
return {
'id': video_id,
'title': title or video_id,
'description': description,
'upload_date': upload_date,
'thumbnails': thumbnails,
'formats': formats,
'subtitles': subtitles,
}
def _parse_smil_namespace(self, smil):
return self._search_regex(
r'(?i)^{([^}]+)?}smil$', smil.tag, 'namespace', default=None)
def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
base = smil_url
for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
b = meta.get('base') or meta.get('httpBase')
if b:
base = b
break
formats = []
rtmp_count = 0
http_count = 0
m3u8_count = 0
imgs_count = 0
srcs = set()
media = smil.findall(self._xpath_ns('.//video', namespace)) + smil.findall(self._xpath_ns('.//audio', namespace))
for medium in media:
src = medium.get('src')
if not src or src in srcs:
continue
srcs.add(src)
bitrate = float_or_none(medium.get('system-bitrate') or medium.get('systemBitrate'), 1000)
filesize = int_or_none(medium.get('size') or medium.get('fileSize'))
width = int_or_none(medium.get('width'))
height = int_or_none(medium.get('height'))
proto = medium.get('proto')
ext = medium.get('ext')
src_ext = determine_ext(src)
streamer = medium.get('streamer') or base
if proto == 'rtmp' or streamer.startswith('rtmp'):
rtmp_count += 1
formats.append({
'url': streamer,
'play_path': src,
'ext': 'flv',
'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate),
'tbr': bitrate,
'filesize': filesize,
'width': width,
'height': height,
})
if transform_rtmp_url:
streamer, src = transform_rtmp_url(streamer, src)
formats[-1].update({
'url': streamer,
'play_path': src,
})
continue
src_url = src if src.startswith('http') else compat_urlparse.urljoin(base, src)
src_url = src_url.strip()
if proto == 'm3u8' or src_ext == 'm3u8':
m3u8_formats = self._extract_m3u8_formats(
src_url, video_id, ext or 'mp4', m3u8_id='hls', fatal=False)
if len(m3u8_formats) == 1:
m3u8_count += 1
m3u8_formats[0].update({
'format_id': 'hls-%d' % (m3u8_count if bitrate is None else bitrate),
'tbr': bitrate,
'width': width,
'height': height,
})
formats.extend(m3u8_formats)
elif src_ext == 'f4m':
f4m_url = src_url
if not f4m_params:
f4m_params = {
'hdcore': '3.2.0',
'plugin': 'flowplayer-3.2.0.1',
}
f4m_url += '&' if '?' in f4m_url else '?'
f4m_url += compat_urllib_parse_urlencode(f4m_params)
formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False))
elif src_ext == 'mpd':
formats.extend(self._extract_mpd_formats(
src_url, video_id, mpd_id='dash', fatal=False))
elif re.search(r'\.ism/[Mm]anifest', src_url):
formats.extend(self._extract_ism_formats(
src_url, video_id, ism_id='mss', fatal=False))
elif src_url.startswith('http') and self._is_valid_url(src, video_id):
http_count += 1
formats.append({
'url': src_url,
'ext': ext or src_ext or 'flv',
'format_id': 'http-%d' % (bitrate or http_count),
'tbr': bitrate,
'filesize': filesize,
'width': width,
'height': height,
})
for medium in smil.findall(self._xpath_ns('.//imagestream', namespace)):
src = medium.get('src')
if not src or src in srcs:
continue
srcs.add(src)
imgs_count += 1
formats.append({
'format_id': 'imagestream-%d' % (imgs_count),
'url': src,
'ext': mimetype2ext(medium.get('type')),
'acodec': 'none',
'vcodec': 'none',
'width': int_or_none(medium.get('width')),
'height': int_or_none(medium.get('height')),
'format_note': 'SMIL storyboards',
})
return formats
def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'):
urls = []
subtitles = {}
for num, textstream in enumerate(smil.findall(self._xpath_ns('.//textstream', namespace))):
src = textstream.get('src')
if not src or src in urls:
continue
urls.append(src)
ext = textstream.get('ext') or mimetype2ext(textstream.get('type')) or determine_ext(src)
lang = textstream.get('systemLanguage') or textstream.get('systemLanguageName') or textstream.get('lang') or subtitles_lang
subtitles.setdefault(lang, []).append({
'url': src,
'ext': ext,
})
return subtitles
def _extract_xspf_playlist(self, xspf_url, playlist_id, fatal=True):
xspf = self._download_xml(
xspf_url, playlist_id, 'Downloading xpsf playlist',
'Unable to download xspf manifest', fatal=fatal)
if xspf is False:
return []
return self._parse_xspf(
xspf, playlist_id, xspf_url=xspf_url,
xspf_base_url=base_url(xspf_url))
def _parse_xspf(self, xspf_doc, playlist_id, xspf_url=None, xspf_base_url=None):
NS_MAP = {
'xspf': 'http://xspf.org/ns/0/',
's1': 'http://static.streamone.nl/player/ns/0',
}
entries = []
for track in xspf_doc.findall(xpath_with_ns('./xspf:trackList/xspf:track', NS_MAP)):
title = xpath_text(
track, xpath_with_ns('./xspf:title', NS_MAP), 'title', default=playlist_id)
description = xpath_text(
track, xpath_with_ns('./xspf:annotation', NS_MAP), 'description')
thumbnail = xpath_text(
track, xpath_with_ns('./xspf:image', NS_MAP), 'thumbnail')
duration = float_or_none(
xpath_text(track, xpath_with_ns('./xspf:duration', NS_MAP), 'duration'), 1000)
formats = []
for location in track.findall(xpath_with_ns('./xspf:location', NS_MAP)):
format_url = urljoin(xspf_base_url, location.text)
if not format_url:
continue
formats.append({
'url': format_url,
'manifest_url': xspf_url,
'format_id': location.get(xpath_with_ns('s1:label', NS_MAP)),
'width': int_or_none(location.get(xpath_with_ns('s1:width', NS_MAP))),
'height': int_or_none(location.get(xpath_with_ns('s1:height', NS_MAP))),
})
self._sort_formats(formats)
entries.append({
'id': playlist_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
})
return entries
def _extract_mpd_formats(self, *args, **kwargs):
fmts, subs = self._extract_mpd_formats_and_subtitles(*args, **kwargs)
if subs:
self._report_ignoring_subs('DASH')
return fmts
def _extract_mpd_formats_and_subtitles(
self, mpd_url, video_id, mpd_id=None, note=None, errnote=None,
fatal=True, data=None, headers={}, query={}):
res = self._download_xml_handle(
mpd_url, video_id,
note='Downloading MPD manifest' if note is None else note,
errnote='Failed to download MPD manifest' if errnote is None else errnote,
fatal=fatal, data=data, headers=headers, query=query)
if res is False:
return [], {}
mpd_doc, urlh = res
if mpd_doc is None:
return [], {}
mpd_base_url = base_url(urlh.geturl())
return self._parse_mpd_formats_and_subtitles(
mpd_doc, mpd_id, mpd_base_url, mpd_url)
def _parse_mpd_formats(self, *args, **kwargs):
fmts, subs = self._parse_mpd_formats_and_subtitles(*args, **kwargs)
if subs:
self._report_ignoring_subs('DASH')
return fmts
def _parse_mpd_formats_and_subtitles(
self, mpd_doc, mpd_id=None, mpd_base_url='', mpd_url=None):
"""
Parse formats from MPD manifest.
References:
1. MPEG-DASH Standard, ISO/IEC 23009-1:2014(E),
http://standards.iso.org/ittf/PubliclyAvailableStandards/c065274_ISO_IEC_23009-1_2014.zip
2. https://en.wikipedia.org/wiki/Dynamic_Adaptive_Streaming_over_HTTP
"""
if not self.get_param('dynamic_mpd', True):
if mpd_doc.get('type') == 'dynamic':
return [], {}
namespace = self._search_regex(r'(?i)^{([^}]+)?}MPD$', mpd_doc.tag, 'namespace', default=None)
def _add_ns(path):
return self._xpath_ns(path, namespace)
def is_drm_protected(element):
return element.find(_add_ns('ContentProtection')) is not None
def extract_multisegment_info(element, ms_parent_info):
ms_info = ms_parent_info.copy()
# As per [1, 5.3.9.2.2] SegmentList and SegmentTemplate share some
# common attributes and elements. We will only extract relevant
# for us.
def extract_common(source):
segment_timeline = source.find(_add_ns('SegmentTimeline'))
if segment_timeline is not None:
s_e = segment_timeline.findall(_add_ns('S'))
if s_e:
ms_info['total_number'] = 0
ms_info['s'] = []
for s in s_e:
r = int(s.get('r', 0))
ms_info['total_number'] += 1 + r
ms_info['s'].append({
't': int(s.get('t', 0)),
# @d is mandatory (see [1, 5.3.9.6.2, Table 17, page 60])
'd': int(s.attrib['d']),
'r': r,
})
start_number = source.get('startNumber')
if start_number:
ms_info['start_number'] = int(start_number)
timescale = source.get('timescale')
if timescale:
ms_info['timescale'] = int(timescale)
segment_duration = source.get('duration')
if segment_duration:
ms_info['segment_duration'] = float(segment_duration)
def extract_Initialization(source):
initialization = source.find(_add_ns('Initialization'))
if initialization is not None:
ms_info['initialization_url'] = initialization.attrib['sourceURL']
segment_list = element.find(_add_ns('SegmentList'))
if segment_list is not None:
extract_common(segment_list)
extract_Initialization(segment_list)
segment_urls_e = segment_list.findall(_add_ns('SegmentURL'))
if segment_urls_e:
ms_info['segment_urls'] = [segment.attrib['media'] for segment in segment_urls_e]
else:
segment_template = element.find(_add_ns('SegmentTemplate'))
if segment_template is not None:
extract_common(segment_template)
media = segment_template.get('media')
if media:
ms_info['media'] = media
initialization = segment_template.get('initialization')
if initialization:
ms_info['initialization'] = initialization
else:
extract_Initialization(segment_template)
return ms_info
mpd_duration = parse_duration(mpd_doc.get('mediaPresentationDuration'))
formats, subtitles = [], {}
stream_numbers = collections.defaultdict(int)
for period in mpd_doc.findall(_add_ns('Period')):
period_duration = parse_duration(period.get('duration')) or mpd_duration
period_ms_info = extract_multisegment_info(period, {
'start_number': 1,
'timescale': 1,
})
for adaptation_set in period.findall(_add_ns('AdaptationSet')):
adaption_set_ms_info = extract_multisegment_info(adaptation_set, period_ms_info)
for representation in adaptation_set.findall(_add_ns('Representation')):
representation_attrib = adaptation_set.attrib.copy()
representation_attrib.update(representation.attrib)
# According to [1, 5.3.7.2, Table 9, page 41], @mimeType is mandatory
mime_type = representation_attrib['mimeType']
content_type = representation_attrib.get('contentType', mime_type.split('/')[0])
codecs = parse_codecs(representation_attrib.get('codecs', ''))
if content_type not in ('video', 'audio', 'text'):
if mime_type == 'image/jpeg':
content_type = mime_type
elif codecs['vcodec'] != 'none':
content_type = 'video'
elif codecs['acodec'] != 'none':
content_type = 'audio'
elif codecs.get('tcodec', 'none') != 'none':
content_type = 'text'
elif mimetype2ext(mime_type) in ('tt', 'dfxp', 'ttml', 'xml', 'json'):
content_type = 'text'
else:
self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type)
continue
base_url = ''
for element in (representation, adaptation_set, period, mpd_doc):
base_url_e = element.find(_add_ns('BaseURL'))
if base_url_e is not None:
base_url = base_url_e.text + base_url
if re.match(r'^https?://', base_url):
break
if mpd_base_url and base_url.startswith('/'):
base_url = compat_urlparse.urljoin(mpd_base_url, base_url)
elif mpd_base_url and not re.match(r'^https?://', base_url):
if not mpd_base_url.endswith('/'):
mpd_base_url += '/'
base_url = mpd_base_url + base_url
representation_id = representation_attrib.get('id')
lang = representation_attrib.get('lang')
url_el = representation.find(_add_ns('BaseURL'))
filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength') if url_el is not None else None)
bandwidth = int_or_none(representation_attrib.get('bandwidth'))
if representation_id is not None:
format_id = representation_id
else:
format_id = content_type
if mpd_id:
format_id = mpd_id + '-' + format_id
if content_type in ('video', 'audio'):
f = {
'format_id': format_id,
'manifest_url': mpd_url,
'ext': mimetype2ext(mime_type),
'width': int_or_none(representation_attrib.get('width')),
'height': int_or_none(representation_attrib.get('height')),
'tbr': float_or_none(bandwidth, 1000),
'asr': int_or_none(representation_attrib.get('audioSamplingRate')),
'fps': int_or_none(representation_attrib.get('frameRate')),
'language': lang if lang not in ('mul', 'und', 'zxx', 'mis') else None,
'format_note': 'DASH %s' % content_type,
'filesize': filesize,
'container': mimetype2ext(mime_type) + '_dash',
**codecs
}
elif content_type == 'text':
f = {
'ext': mimetype2ext(mime_type),
'manifest_url': mpd_url,
'filesize': filesize,
}
elif content_type == 'image/jpeg':
# See test case in VikiIE
# https://www.viki.com/videos/1175236v-choosing-spouse-by-lottery-episode-1
f = {
'format_id': format_id,
'ext': 'mhtml',
'manifest_url': mpd_url,
'format_note': 'DASH storyboards (jpeg)',
'acodec': 'none',
'vcodec': 'none',
}
if is_drm_protected(adaptation_set) or is_drm_protected(representation):
f['has_drm'] = True
representation_ms_info = extract_multisegment_info(representation, adaption_set_ms_info)
def prepare_template(template_name, identifiers):
tmpl = representation_ms_info[template_name]
# First of, % characters outside $...$ templates
# must be escaped by doubling for proper processing
# by % operator string formatting used further (see
# https://github.com/ytdl-org/youtube-dl/issues/16867).
t = ''
in_template = False
for c in tmpl:
t += c
if c == '$':
in_template = not in_template
elif c == '%' and not in_template:
t += c
# Next, $...$ templates are translated to their
# %(...) counterparts to be used with % operator
if representation_id is not None:
t = t.replace('$RepresentationID$', representation_id)
t = re.sub(r'\$(%s)\$' % '|'.join(identifiers), r'%(\1)d', t)
t = re.sub(r'\$(%s)%%([^$]+)\$' % '|'.join(identifiers), r'%(\1)\2', t)
t.replace('$$', '$')
return t
# @initialization is a regular template like @media one
# so it should be handled just the same way (see
# https://github.com/ytdl-org/youtube-dl/issues/11605)
if 'initialization' in representation_ms_info:
initialization_template = prepare_template(
'initialization',
# As per [1, 5.3.9.4.2, Table 15, page 54] $Number$ and
# $Time$ shall not be included for @initialization thus
# only $Bandwidth$ remains
('Bandwidth', ))
representation_ms_info['initialization_url'] = initialization_template % {
'Bandwidth': bandwidth,
}
def location_key(location):
return 'url' if re.match(r'^https?://', location) else 'path'
if 'segment_urls' not in representation_ms_info and 'media' in representation_ms_info:
media_template = prepare_template('media', ('Number', 'Bandwidth', 'Time'))
media_location_key = location_key(media_template)
# As per [1, 5.3.9.4.4, Table 16, page 55] $Number$ and $Time$
# can't be used at the same time
if '%(Number' in media_template and 's' not in representation_ms_info:
segment_duration = None
if 'total_number' not in representation_ms_info and 'segment_duration' in representation_ms_info:
segment_duration = float_or_none(representation_ms_info['segment_duration'], representation_ms_info['timescale'])
representation_ms_info['total_number'] = int(math.ceil(
float_or_none(period_duration, segment_duration, default=0)))
representation_ms_info['fragments'] = [{
media_location_key: media_template % {
'Number': segment_number,
'Bandwidth': bandwidth,
},
'duration': segment_duration,
} for segment_number in range(
representation_ms_info['start_number'],
representation_ms_info['total_number'] + representation_ms_info['start_number'])]
else:
# $Number*$ or $Time$ in media template with S list available
# Example $Number*$: http://www.svtplay.se/klipp/9023742/stopptid-om-bjorn-borg
# Example $Time$: https://play.arkena.com/embed/avp/v2/player/media/b41dda37-d8e7-4d3f-b1b5-9a9db578bdfe/1/129411
representation_ms_info['fragments'] = []
segment_time = 0
segment_d = None
segment_number = representation_ms_info['start_number']
def add_segment_url():
segment_url = media_template % {
'Time': segment_time,
'Bandwidth': bandwidth,
'Number': segment_number,
}
representation_ms_info['fragments'].append({
media_location_key: segment_url,
'duration': float_or_none(segment_d, representation_ms_info['timescale']),
})
for num, s in enumerate(representation_ms_info['s']):
segment_time = s.get('t') or segment_time
segment_d = s['d']
add_segment_url()
segment_number += 1
for r in range(s.get('r', 0)):
segment_time += segment_d
add_segment_url()
segment_number += 1
segment_time += segment_d
elif 'segment_urls' in representation_ms_info and 's' in representation_ms_info:
# No media template
# Example: https://www.youtube.com/watch?v=iXZV5uAYMJI
# or any YouTube dashsegments video
fragments = []
segment_index = 0
timescale = representation_ms_info['timescale']
for s in representation_ms_info['s']:
duration = float_or_none(s['d'], timescale)
for r in range(s.get('r', 0) + 1):
segment_uri = representation_ms_info['segment_urls'][segment_index]
fragments.append({
location_key(segment_uri): segment_uri,
'duration': duration,
})
segment_index += 1
representation_ms_info['fragments'] = fragments
elif 'segment_urls' in representation_ms_info:
# Segment URLs with no SegmentTimeline
# Example: https://www.seznam.cz/zpravy/clanek/cesko-zasahne-vitr-o-sile-vichrice-muze-byt-i-zivotu-nebezpecny-39091
# https://github.com/ytdl-org/youtube-dl/pull/14844
fragments = []
segment_duration = float_or_none(
representation_ms_info['segment_duration'],
representation_ms_info['timescale']) if 'segment_duration' in representation_ms_info else None
for segment_url in representation_ms_info['segment_urls']:
fragment = {
location_key(segment_url): segment_url,
}
if segment_duration:
fragment['duration'] = segment_duration
fragments.append(fragment)
representation_ms_info['fragments'] = fragments
# If there is a fragments key available then we correctly recognized fragmented media.
# Otherwise we will assume unfragmented media with direct access. Technically, such
# assumption is not necessarily correct since we may simply have no support for
# some forms of fragmented media renditions yet, but for now we'll use this fallback.
if 'fragments' in representation_ms_info:
f.update({
# NB: mpd_url may be empty when MPD manifest is parsed from a string
'url': mpd_url or base_url,
'fragment_base_url': base_url,
'fragments': [],
'protocol': 'http_dash_segments' if mime_type != 'image/jpeg' else 'mhtml',
})
if 'initialization_url' in representation_ms_info:
initialization_url = representation_ms_info['initialization_url']
if not f.get('url'):
f['url'] = initialization_url
f['fragments'].append({location_key(initialization_url): initialization_url})
f['fragments'].extend(representation_ms_info['fragments'])
if not period_duration:
period_duration = try_get(
representation_ms_info,
lambda r: sum(frag['duration'] for frag in r['fragments']), float)
else:
# Assuming direct URL to unfragmented media.
f['url'] = base_url
if content_type in ('video', 'audio', 'image/jpeg'):
f['manifest_stream_number'] = stream_numbers[f['url']]
stream_numbers[f['url']] += 1
formats.append(f)
elif content_type == 'text':
subtitles.setdefault(lang or 'und', []).append(f)
return formats, subtitles
def _extract_ism_formats(self, *args, **kwargs):
fmts, subs = self._extract_ism_formats_and_subtitles(*args, **kwargs)
if subs:
self._report_ignoring_subs('ISM')
return fmts
def _extract_ism_formats_and_subtitles(self, ism_url, video_id, ism_id=None, note=None, errnote=None, fatal=True, data=None, headers={}, query={}):
res = self._download_xml_handle(
ism_url, video_id,
note='Downloading ISM manifest' if note is None else note,
errnote='Failed to download ISM manifest' if errnote is None else errnote,
fatal=fatal, data=data, headers=headers, query=query)
if res is False:
return [], {}
ism_doc, urlh = res
if ism_doc is None:
return [], {}
return self._parse_ism_formats_and_subtitles(ism_doc, urlh.geturl(), ism_id)
def _parse_ism_formats_and_subtitles(self, ism_doc, ism_url, ism_id=None):
"""
Parse formats from ISM manifest.
References:
1. [MS-SSTR]: Smooth Streaming Protocol,
https://msdn.microsoft.com/en-us/library/ff469518.aspx
"""
if ism_doc.get('IsLive') == 'TRUE':
return [], {}
duration = int(ism_doc.attrib['Duration'])
timescale = int_or_none(ism_doc.get('TimeScale')) or 10000000
formats = []
subtitles = {}
for stream in ism_doc.findall('StreamIndex'):
stream_type = stream.get('Type')
if stream_type not in ('video', 'audio', 'text'):
continue
url_pattern = stream.attrib['Url']
stream_timescale = int_or_none(stream.get('TimeScale')) or timescale
stream_name = stream.get('Name')
stream_language = stream.get('Language', 'und')
for track in stream.findall('QualityLevel'):
fourcc = track.get('FourCC') or ('AACL' if track.get('AudioTag') == '255' else None)
# TODO: add support for WVC1 and WMAP
if fourcc not in ('H264', 'AVC1', 'AACL', 'TTML'):
self.report_warning('%s is not a supported codec' % fourcc)
continue
tbr = int(track.attrib['Bitrate']) // 1000
# [1] does not mention Width and Height attributes. However,
# they're often present while MaxWidth and MaxHeight are
# missing, so should be used as fallbacks
width = int_or_none(track.get('MaxWidth') or track.get('Width'))
height = int_or_none(track.get('MaxHeight') or track.get('Height'))
sampling_rate = int_or_none(track.get('SamplingRate'))
track_url_pattern = re.sub(r'{[Bb]itrate}', track.attrib['Bitrate'], url_pattern)
track_url_pattern = compat_urlparse.urljoin(ism_url, track_url_pattern)
fragments = []
fragment_ctx = {
'time': 0,
}
stream_fragments = stream.findall('c')
for stream_fragment_index, stream_fragment in enumerate(stream_fragments):
fragment_ctx['time'] = int_or_none(stream_fragment.get('t')) or fragment_ctx['time']
fragment_repeat = int_or_none(stream_fragment.get('r')) or 1
fragment_ctx['duration'] = int_or_none(stream_fragment.get('d'))
if not fragment_ctx['duration']:
try:
next_fragment_time = int(stream_fragment[stream_fragment_index + 1].attrib['t'])
except IndexError:
next_fragment_time = duration
fragment_ctx['duration'] = (next_fragment_time - fragment_ctx['time']) / fragment_repeat
for _ in range(fragment_repeat):
fragments.append({
'url': re.sub(r'{start[ _]time}', compat_str(fragment_ctx['time']), track_url_pattern),
'duration': fragment_ctx['duration'] / stream_timescale,
})
fragment_ctx['time'] += fragment_ctx['duration']
if stream_type == 'text':
subtitles.setdefault(stream_language, []).append({
'ext': 'ismt',
'protocol': 'ism',
'url': ism_url,
'manifest_url': ism_url,
'fragments': fragments,
'_download_params': {
'stream_type': stream_type,
'duration': duration,
'timescale': stream_timescale,
'fourcc': fourcc,
'language': stream_language,
'codec_private_data': track.get('CodecPrivateData'),
}
})
elif stream_type in ('video', 'audio'):
formats.append({
'format_id': join_nonempty(ism_id, stream_name, tbr),
'url': ism_url,
'manifest_url': ism_url,
'ext': 'ismv' if stream_type == 'video' else 'isma',
'width': width,
'height': height,
'tbr': tbr,
'asr': sampling_rate,
'vcodec': 'none' if stream_type == 'audio' else fourcc,
'acodec': 'none' if stream_type == 'video' else fourcc,
'protocol': 'ism',
'fragments': fragments,
'has_drm': ism_doc.find('Protection') is not None,
'_download_params': {
'stream_type': stream_type,
'duration': duration,
'timescale': stream_timescale,
'width': width or 0,
'height': height or 0,
'fourcc': fourcc,
'language': stream_language,
'codec_private_data': track.get('CodecPrivateData'),
'sampling_rate': sampling_rate,
'channels': int_or_none(track.get('Channels', 2)),
'bits_per_sample': int_or_none(track.get('BitsPerSample', 16)),
'nal_unit_length_field': int_or_none(track.get('NALUnitLengthField', 4)),
},
})
return formats, subtitles
def _parse_html5_media_entries(self, base_url, webpage, video_id, m3u8_id=None, m3u8_entry_protocol='m3u8_native', mpd_id=None, preference=None, quality=None):
def absolute_url(item_url):
return urljoin(base_url, item_url)
def parse_content_type(content_type):
if not content_type:
return {}
ctr = re.search(r'(?P<mimetype>[^/]+/[^;]+)(?:;\s*codecs="?(?P<codecs>[^"]+))?', content_type)
if ctr:
mimetype, codecs = ctr.groups()
f = parse_codecs(codecs)
f['ext'] = mimetype2ext(mimetype)
return f
return {}
def _media_formats(src, cur_media_type, type_info={}):
full_url = absolute_url(src)
ext = type_info.get('ext') or determine_ext(full_url)
if ext == 'm3u8':
is_plain_url = False
formats = self._extract_m3u8_formats(
full_url, video_id, ext='mp4',
entry_protocol=m3u8_entry_protocol, m3u8_id=m3u8_id,
preference=preference, quality=quality, fatal=False)
elif ext == 'mpd':
is_plain_url = False
formats = self._extract_mpd_formats(
full_url, video_id, mpd_id=mpd_id, fatal=False)
else:
is_plain_url = True
formats = [{
'url': full_url,
'vcodec': 'none' if cur_media_type == 'audio' else None,
}]
return is_plain_url, formats
entries = []
# amp-video and amp-audio are very similar to their HTML5 counterparts
# so we wll include them right here (see
# https://www.ampproject.org/docs/reference/components/amp-video)
# For dl8-* tags see https://delight-vr.com/documentation/dl8-video/
_MEDIA_TAG_NAME_RE = r'(?:(?:amp|dl8(?:-live)?)-)?(video|audio)'
media_tags = [(media_tag, media_tag_name, media_type, '')
for media_tag, media_tag_name, media_type
in re.findall(r'(?s)(<(%s)[^>]*/>)' % _MEDIA_TAG_NAME_RE, webpage)]
media_tags.extend(re.findall(
# We only allow video|audio followed by a whitespace or '>'.
# Allowing more characters may end up in significant slow down (see
# https://github.com/ytdl-org/youtube-dl/issues/11979, example URL:
# http://www.porntrex.com/maps/videositemap.xml).
r'(?s)(<(?P<tag>%s)(?:\s+[^>]*)?>)(.*?)</(?P=tag)>' % _MEDIA_TAG_NAME_RE, webpage))
for media_tag, _, media_type, media_content in media_tags:
media_info = {
'formats': [],
'subtitles': {},
}
media_attributes = extract_attributes(media_tag)
src = strip_or_none(media_attributes.get('src'))
if src:
_, formats = _media_formats(src, media_type)
media_info['formats'].extend(formats)
media_info['thumbnail'] = absolute_url(media_attributes.get('poster'))
if media_content:
for source_tag in re.findall(r'<source[^>]+>', media_content):
s_attr = extract_attributes(source_tag)
# data-video-src and data-src are non standard but seen
# several times in the wild
src = strip_or_none(dict_get(s_attr, ('src', 'data-video-src', 'data-src')))
if not src:
continue
f = parse_content_type(s_attr.get('type'))
is_plain_url, formats = _media_formats(src, media_type, f)
if is_plain_url:
# width, height, res, label and title attributes are
# all not standard but seen several times in the wild
labels = [
s_attr.get(lbl)
for lbl in ('label', 'title')
if str_or_none(s_attr.get(lbl))
]
width = int_or_none(s_attr.get('width'))
height = (int_or_none(s_attr.get('height'))
or int_or_none(s_attr.get('res')))
if not width or not height:
for lbl in labels:
resolution = parse_resolution(lbl)
if not resolution:
continue
width = width or resolution.get('width')
height = height or resolution.get('height')
for lbl in labels:
tbr = parse_bitrate(lbl)
if tbr:
break
else:
tbr = None
f.update({
'width': width,
'height': height,
'tbr': tbr,
'format_id': s_attr.get('label') or s_attr.get('title'),
})
f.update(formats[0])
media_info['formats'].append(f)
else:
media_info['formats'].extend(formats)
for track_tag in re.findall(r'<track[^>]+>', media_content):
track_attributes = extract_attributes(track_tag)
kind = track_attributes.get('kind')
if not kind or kind in ('subtitles', 'captions'):
src = strip_or_none(track_attributes.get('src'))
if not src:
continue
lang = track_attributes.get('srclang') or track_attributes.get('lang') or track_attributes.get('label')
media_info['subtitles'].setdefault(lang, []).append({
'url': absolute_url(src),
})
for f in media_info['formats']:
f.setdefault('http_headers', {})['Referer'] = base_url
if media_info['formats'] or media_info['subtitles']:
entries.append(media_info)
return entries
def _extract_akamai_formats(self, *args, **kwargs):
fmts, subs = self._extract_akamai_formats_and_subtitles(*args, **kwargs)
if subs:
self._report_ignoring_subs('akamai')
return fmts
def _extract_akamai_formats_and_subtitles(self, manifest_url, video_id, hosts={}):
signed = 'hdnea=' in manifest_url
if not signed:
# https://learn.akamai.com/en-us/webhelp/media-services-on-demand/stream-packaging-user-guide/GUID-BE6C0F73-1E06-483B-B0EA-57984B91B7F9.html
manifest_url = re.sub(
r'(?:b=[\d,-]+|(?:__a__|attributes)=off|__b__=\d+)&?',
'', manifest_url).strip('?')
formats = []
subtitles = {}
hdcore_sign = 'hdcore=3.7.0'
f4m_url = re.sub(r'(https?://[^/]+)/i/', r'\1/z/', manifest_url).replace('/master.m3u8', '/manifest.f4m')
hds_host = hosts.get('hds')
if hds_host:
f4m_url = re.sub(r'(https?://)[^/]+', r'\1' + hds_host, f4m_url)
if 'hdcore=' not in f4m_url:
f4m_url += ('&' if '?' in f4m_url else '?') + hdcore_sign
f4m_formats = self._extract_f4m_formats(
f4m_url, video_id, f4m_id='hds', fatal=False)
for entry in f4m_formats:
entry.update({'extra_param_to_segment_url': hdcore_sign})
formats.extend(f4m_formats)
m3u8_url = re.sub(r'(https?://[^/]+)/z/', r'\1/i/', manifest_url).replace('/manifest.f4m', '/master.m3u8')
hls_host = hosts.get('hls')
if hls_host:
m3u8_url = re.sub(r'(https?://)[^/]+', r'\1' + hls_host, m3u8_url)
m3u8_formats, m3u8_subtitles = self._extract_m3u8_formats_and_subtitles(
m3u8_url, video_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False)
formats.extend(m3u8_formats)
subtitles = self._merge_subtitles(subtitles, m3u8_subtitles)
http_host = hosts.get('http')
if http_host and m3u8_formats and not signed:
REPL_REGEX = r'https?://[^/]+/i/([^,]+),([^/]+),([^/]+)\.csmil/.+'
qualities = re.match(REPL_REGEX, m3u8_url).group(2).split(',')
qualities_length = len(qualities)
if len(m3u8_formats) in (qualities_length, qualities_length + 1):
i = 0
for f in m3u8_formats:
if f['vcodec'] != 'none':
for protocol in ('http', 'https'):
http_f = f.copy()
del http_f['manifest_url']
http_url = re.sub(
REPL_REGEX, protocol + r'://%s/\g<1>%s\3' % (http_host, qualities[i]), f['url'])
http_f.update({
'format_id': http_f['format_id'].replace('hls-', protocol + '-'),
'url': http_url,
'protocol': protocol,
})
formats.append(http_f)
i += 1
return formats, subtitles
def _extract_wowza_formats(self, url, video_id, m3u8_entry_protocol='m3u8_native', skip_protocols=[]):
query = compat_urlparse.urlparse(url).query
url = re.sub(r'/(?:manifest|playlist|jwplayer)\.(?:m3u8|f4m|mpd|smil)', '', url)
mobj = re.search(
r'(?:(?:http|rtmp|rtsp)(?P<s>s)?:)?(?P<url>//[^?]+)', url)
url_base = mobj.group('url')
http_base_url = '%s%s:%s' % ('http', mobj.group('s') or '', url_base)
formats = []
def manifest_url(manifest):
m_url = '%s/%s' % (http_base_url, manifest)
if query:
m_url += '?%s' % query
return m_url
if 'm3u8' not in skip_protocols:
formats.extend(self._extract_m3u8_formats(
manifest_url('playlist.m3u8'), video_id, 'mp4',
m3u8_entry_protocol, m3u8_id='hls', fatal=False))
if 'f4m' not in skip_protocols:
formats.extend(self._extract_f4m_formats(
manifest_url('manifest.f4m'),
video_id, f4m_id='hds', fatal=False))
if 'dash' not in skip_protocols:
formats.extend(self._extract_mpd_formats(
manifest_url('manifest.mpd'),
video_id, mpd_id='dash', fatal=False))
if re.search(r'(?:/smil:|\.smil)', url_base):
if 'smil' not in skip_protocols:
rtmp_formats = self._extract_smil_formats(
manifest_url('jwplayer.smil'),
video_id, fatal=False)
for rtmp_format in rtmp_formats:
rtsp_format = rtmp_format.copy()
rtsp_format['url'] = '%s/%s' % (rtmp_format['url'], rtmp_format['play_path'])
del rtsp_format['play_path']
del rtsp_format['ext']
rtsp_format.update({
'url': rtsp_format['url'].replace('rtmp://', 'rtsp://'),
'format_id': rtmp_format['format_id'].replace('rtmp', 'rtsp'),
'protocol': 'rtsp',
})
formats.extend([rtmp_format, rtsp_format])
else:
for protocol in ('rtmp', 'rtsp'):
if protocol not in skip_protocols:
formats.append({
'url': '%s:%s' % (protocol, url_base),
'format_id': protocol,
'protocol': protocol,
})
return formats
def _find_jwplayer_data(self, webpage, video_id=None, transform_source=js_to_json):
mobj = re.search(
r'(?s)jwplayer\((?P<quote>[\'"])[^\'" ]+(?P=quote)\)(?!</script>).*?\.setup\s*\((?P<options>[^)]+)\)',
webpage)
if mobj:
try:
jwplayer_data = self._parse_json(mobj.group('options'),
video_id=video_id,
transform_source=transform_source)
except ExtractorError:
pass
else:
if isinstance(jwplayer_data, dict):
return jwplayer_data
def _extract_jwplayer_data(self, webpage, video_id, *args, **kwargs):
jwplayer_data = self._find_jwplayer_data(
webpage, video_id, transform_source=js_to_json)
return self._parse_jwplayer_data(
jwplayer_data, video_id, *args, **kwargs)
def _parse_jwplayer_data(self, jwplayer_data, video_id=None, require_title=True,
m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None):
# JWPlayer backward compatibility: flattened playlists
# https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/api/config.js#L81-L96
if 'playlist' not in jwplayer_data:
jwplayer_data = {'playlist': [jwplayer_data]}
entries = []
# JWPlayer backward compatibility: single playlist item
# https://github.com/jwplayer/jwplayer/blob/v7.7.0/src/js/playlist/playlist.js#L10
if not isinstance(jwplayer_data['playlist'], list):
jwplayer_data['playlist'] = [jwplayer_data['playlist']]
for video_data in jwplayer_data['playlist']:
# JWPlayer backward compatibility: flattened sources
# https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/playlist/item.js#L29-L35
if 'sources' not in video_data:
video_data['sources'] = [video_data]
this_video_id = video_id or video_data['mediaid']
formats = self._parse_jwplayer_formats(
video_data['sources'], video_id=this_video_id, m3u8_id=m3u8_id,
mpd_id=mpd_id, rtmp_params=rtmp_params, base_url=base_url)
subtitles = {}
tracks = video_data.get('tracks')
if tracks and isinstance(tracks, list):
for track in tracks:
if not isinstance(track, dict):
continue
track_kind = track.get('kind')
if not track_kind or not isinstance(track_kind, compat_str):
continue
if track_kind.lower() not in ('captions', 'subtitles'):
continue
track_url = urljoin(base_url, track.get('file'))
if not track_url:
continue
subtitles.setdefault(track.get('label') or 'en', []).append({
'url': self._proto_relative_url(track_url)
})
entry = {
'id': this_video_id,
'title': unescapeHTML(video_data['title'] if require_title else video_data.get('title')),
'description': clean_html(video_data.get('description')),
'thumbnail': urljoin(base_url, self._proto_relative_url(video_data.get('image'))),
'timestamp': int_or_none(video_data.get('pubdate')),
'duration': float_or_none(jwplayer_data.get('duration') or video_data.get('duration')),
'subtitles': subtitles,
}
# https://github.com/jwplayer/jwplayer/blob/master/src/js/utils/validator.js#L32
if len(formats) == 1 and re.search(r'^(?:http|//).*(?:youtube\.com|youtu\.be)/.+', formats[0]['url']):
entry.update({
'_type': 'url_transparent',
'url': formats[0]['url'],
})
else:
self._sort_formats(formats)
entry['formats'] = formats
entries.append(entry)
if len(entries) == 1:
return entries[0]
else:
return self.playlist_result(entries)
def _parse_jwplayer_formats(self, jwplayer_sources_data, video_id=None,
m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None):
urls = []
formats = []
for source in jwplayer_sources_data:
if not isinstance(source, dict):
continue
source_url = urljoin(
base_url, self._proto_relative_url(source.get('file')))
if not source_url or source_url in urls:
continue
urls.append(source_url)
source_type = source.get('type') or ''
ext = mimetype2ext(source_type) or determine_ext(source_url)
if source_type == 'hls' or ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
source_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id=m3u8_id, fatal=False))
elif source_type == 'dash' or ext == 'mpd':
formats.extend(self._extract_mpd_formats(
source_url, video_id, mpd_id=mpd_id, fatal=False))
elif ext == 'smil':
formats.extend(self._extract_smil_formats(
source_url, video_id, fatal=False))
# https://github.com/jwplayer/jwplayer/blob/master/src/js/providers/default.js#L67
elif source_type.startswith('audio') or ext in (
'oga', 'aac', 'mp3', 'mpeg', 'vorbis'):
formats.append({
'url': source_url,
'vcodec': 'none',
'ext': ext,
})
else:
height = int_or_none(source.get('height'))
if height is None:
# Often no height is provided but there is a label in
# format like "1080p", "720p SD", or 1080.
height = int_or_none(self._search_regex(
r'^(\d{3,4})[pP]?(?:\b|$)', compat_str(source.get('label') or ''),
'height', default=None))
a_format = {
'url': source_url,
'width': int_or_none(source.get('width')),
'height': height,
'tbr': int_or_none(source.get('bitrate')),
'ext': ext,
}
if source_url.startswith('rtmp'):
a_format['ext'] = 'flv'
# See com/longtailvideo/jwplayer/media/RTMPMediaProvider.as
# of jwplayer.flash.swf
rtmp_url_parts = re.split(
r'((?:mp4|mp3|flv):)', source_url, 1)
if len(rtmp_url_parts) == 3:
rtmp_url, prefix, play_path = rtmp_url_parts
a_format.update({
'url': rtmp_url,
'play_path': prefix + play_path,
})
if rtmp_params:
a_format.update(rtmp_params)
formats.append(a_format)
return formats
def _live_title(self, name):
self._downloader.deprecation_warning('yt_dlp.InfoExtractor._live_title is deprecated and does not work as expected')
return name
def _int(self, v, name, fatal=False, **kwargs):
res = int_or_none(v, **kwargs)
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self.report_warning(msg)
return res
def _float(self, v, name, fatal=False, **kwargs):
res = float_or_none(v, **kwargs)
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self.report_warning(msg)
return res
def _set_cookie(self, domain, name, value, expire_time=None, port=None,
path='/', secure=False, discard=False, rest={}, **kwargs):
cookie = compat_cookiejar_Cookie(
0, name, value, port, port is not None, domain, True,
domain.startswith('.'), path, True, secure, expire_time,
discard, None, None, rest)
self._downloader.cookiejar.set_cookie(cookie)
def _get_cookies(self, url):
""" Return a compat_cookies_SimpleCookie with the cookies for the url """
req = sanitized_Request(url)
self._downloader.cookiejar.add_cookie_header(req)
return compat_cookies_SimpleCookie(req.get_header('Cookie'))
def _apply_first_set_cookie_header(self, url_handle, cookie):
"""
Apply first Set-Cookie header instead of the last. Experimental.
Some sites (e.g. [1-3]) may serve two cookies under the same name
in Set-Cookie header and expect the first (old) one to be set rather
than second (new). However, as of RFC6265 the newer one cookie
should be set into cookie store what actually happens.
We will workaround this issue by resetting the cookie to
the first one manually.
1. https://new.vk.com/
2. https://github.com/ytdl-org/youtube-dl/issues/9841#issuecomment-227871201
3. https://learning.oreilly.com/
"""
for header, cookies in url_handle.headers.items():
if header.lower() != 'set-cookie':
continue
if sys.version_info[0] >= 3:
cookies = cookies.encode('iso-8859-1')
cookies = cookies.decode('utf-8')
cookie_value = re.search(
r'%s=(.+?);.*?\b[Dd]omain=(.+?)(?:[,;]|$)' % cookie, cookies)
if cookie_value:
value, domain = cookie_value.groups()
self._set_cookie(domain, cookie, value)
break
def get_testcases(self, include_onlymatching=False):
t = getattr(self, '_TEST', None)
if t:
assert not hasattr(self, '_TESTS'), \
'%s has _TEST and _TESTS' % type(self).__name__
tests = [t]
else:
tests = getattr(self, '_TESTS', [])
for t in tests:
if not include_onlymatching and t.get('only_matching', False):
continue
t['name'] = type(self).__name__[:-len('IE')]
yield t
def is_suitable(self, age_limit):
""" Test whether the extractor is generally suitable for the given
age limit (i.e. pornographic sites are not, all others usually are) """
any_restricted = False
for tc in self.get_testcases(include_onlymatching=False):
if tc.get('playlist', []):
tc = tc['playlist'][0]
is_restricted = age_restricted(
tc.get('info_dict', {}).get('age_limit'), age_limit)
if not is_restricted:
return True
any_restricted = any_restricted or is_restricted
return not any_restricted
def extract_subtitles(self, *args, **kwargs):
if (self.get_param('writesubtitles', False)
or self.get_param('listsubtitles')):
return self._get_subtitles(*args, **kwargs)
return {}
def _get_subtitles(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
def extract_comments(self, *args, **kwargs):
if not self.get_param('getcomments'):
return None
generator = self._get_comments(*args, **kwargs)
def extractor():
comments = []
interrupted = True
try:
while True:
comments.append(next(generator))
except StopIteration:
interrupted = False
except KeyboardInterrupt:
self.to_screen('Interrupted by user')
except Exception as e:
if self.get_param('ignoreerrors') is not True:
raise
self._downloader.report_error(e)
comment_count = len(comments)
self.to_screen(f'Extracted {comment_count} comments')
return {
'comments': comments,
'comment_count': None if interrupted else comment_count
}
return extractor
def _get_comments(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
@staticmethod
def _merge_subtitle_items(subtitle_list1, subtitle_list2):
""" Merge subtitle items for one language. Items with duplicated URLs
will be dropped. """
list1_urls = set([item['url'] for item in subtitle_list1])
ret = list(subtitle_list1)
ret.extend([item for item in subtitle_list2 if item['url'] not in list1_urls])
return ret
@classmethod
def _merge_subtitles(cls, *dicts, target=None):
""" Merge subtitle dictionaries, language by language. """
if target is None:
target = {}
for d in dicts:
for lang, subs in d.items():
target[lang] = cls._merge_subtitle_items(target.get(lang, []), subs)
return target
def extract_automatic_captions(self, *args, **kwargs):
if (self.get_param('writeautomaticsub', False)
or self.get_param('listsubtitles')):
return self._get_automatic_captions(*args, **kwargs)
return {}
def _get_automatic_captions(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
def mark_watched(self, *args, **kwargs):
if not self.get_param('mark_watched', False):
return
if (self._get_login_info()[0] is not None
or self.get_param('cookiefile')
or self.get_param('cookiesfrombrowser')):
self._mark_watched(*args, **kwargs)
def _mark_watched(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
def geo_verification_headers(self):
headers = {}
geo_verification_proxy = self.get_param('geo_verification_proxy')
if geo_verification_proxy:
headers['Ytdl-request-proxy'] = geo_verification_proxy
return headers
def _generic_id(self, url):
return compat_urllib_parse_unquote(os.path.splitext(url.rstrip('/').split('/')[-1])[0])
def _generic_title(self, url):
return compat_urllib_parse_unquote(os.path.splitext(url_basename(url))[0])
@staticmethod
def _availability(is_private=None, needs_premium=None, needs_subscription=None, needs_auth=None, is_unlisted=None):
all_known = all(map(
lambda x: x is not None,
(is_private, needs_premium, needs_subscription, needs_auth, is_unlisted)))
return (
'private' if is_private
else 'premium_only' if needs_premium
else 'subscriber_only' if needs_subscription
else 'needs_auth' if needs_auth
else 'unlisted' if is_unlisted
else 'public' if all_known
else None)
def _configuration_arg(self, key, default=NO_DEFAULT, *, ie_key=None, casesense=False):
'''
@returns A list of values for the extractor argument given by "key"
or "default" if no such key is present
@param default The default value to return when the key is not present (default: [])
@param casesense When false, the values are converted to lower case
'''
val = traverse_obj(
self._downloader.params, ('extractor_args', (ie_key or self.ie_key()).lower(), key))
if val is None:
return [] if default is NO_DEFAULT else default
return list(val) if casesense else [x.lower() for x in val]
def _yes_playlist(self, playlist_id, video_id, smuggled_data=None, *, playlist_label='playlist', video_label='video'):
if not playlist_id or not video_id:
return not video_id
no_playlist = (smuggled_data or {}).get('force_noplaylist')
if no_playlist is not None:
return not no_playlist
video_id = '' if video_id is True else f' {video_id}'
playlist_id = '' if playlist_id is True else f' {playlist_id}'
if self.get_param('noplaylist'):
self.to_screen(f'Downloading just the {video_label}{video_id} because of --no-playlist')
return False
self.to_screen(f'Downloading {playlist_label}{playlist_id} - add --no-playlist to download just the {video_label}{video_id}')
return True
class SearchInfoExtractor(InfoExtractor):
"""
Base class for paged search queries extractors.
They accept URLs in the format _SEARCH_KEY(|all|[0-9]):{query}
Instances should define _SEARCH_KEY and optionally _MAX_RESULTS
"""
_MAX_RESULTS = float('inf')
@classmethod
def _make_valid_url(cls):
return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
def _real_extract(self, query):
prefix, query = self._match_valid_url(query).group('prefix', 'query')
if prefix == '':
return self._get_n_results(query, 1)
elif prefix == 'all':
return self._get_n_results(query, self._MAX_RESULTS)
else:
n = int(prefix)
if n <= 0:
raise ExtractorError('invalid download number %s for query "%s"' % (n, query))
elif n > self._MAX_RESULTS:
self.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
n = self._MAX_RESULTS
return self._get_n_results(query, n)
def _get_n_results(self, query, n):
"""Get a specified number of results for a query.
Either this function or _search_results must be overridden by subclasses """
return self.playlist_result(
itertools.islice(self._search_results(query), 0, None if n == float('inf') else n),
query, query)
def _search_results(self, query):
"""Returns an iterator of search results"""
raise NotImplementedError('This method must be implemented by subclasses')
@property
def SEARCH_KEY(self):
return self._SEARCH_KEY
|
the-stack_106_22492 | #!/usr/bin/env python
# CREATED:2014-01-18 14:09:05 by Brian McFee <[email protected]>
# unit tests for util routines
# Disable cache
import os
try:
os.environ.pop('LIBROSA_CACHE_DIR')
except:
pass
import platform
import numpy as np
import scipy.sparse
from nose.tools import raises, eq_
import six
import warnings
import librosa
from test_core import srand
warnings.resetwarnings()
warnings.simplefilter('always')
np.set_printoptions(precision=3)
def test_example_audio_file():
assert os.path.exists(librosa.util.example_audio_file())
def test_frame():
# Generate a random time series
def __test(P):
srand()
frame, hop = P
y = np.random.randn(8000)
y_frame = librosa.util.frame(y, frame_length=frame, hop_length=hop)
for i in range(y_frame.shape[1]):
assert np.allclose(y_frame[:, i], y[i * hop:(i * hop + frame)])
for frame in [256, 1024, 2048]:
for hop_length in [64, 256, 512]:
yield (__test, [frame, hop_length])
def test_frame_fail():
__test = raises(librosa.ParameterError)(librosa.util.frame)
# First fail, not an ndarray
yield __test, list(range(10)), 5, 1
# Second fail: wrong ndims
yield __test, np.zeros((10, 10)), 5, 1
# Third fail: too short
yield __test, np.zeros(10), 20, 1
# Fourth fail: bad hop length
yield __test, np.zeros(10), 20, -1
# Fifth fail: discontiguous input
yield __test, np.zeros(20)[::2], 10, 1
def test_pad_center():
def __test(y, n, axis, mode):
y_out = librosa.util.pad_center(y, n, axis=axis, mode=mode)
n_len = y.shape[axis]
n_pad = int((n - n_len) / 2)
eq_slice = [slice(None)] * y.ndim
eq_slice[axis] = slice(n_pad, n_pad + n_len)
assert np.allclose(y, y_out[eq_slice])
@raises(librosa.ParameterError)
def __test_fail(y, n, axis, mode):
librosa.util.pad_center(y, n, axis=axis, mode=mode)
for shape in [(16,), (16, 16)]:
y = np.ones(shape)
for axis in [0, -1]:
for mode in ['constant', 'edge', 'reflect']:
for n in [0, 10]:
yield __test, y, n + y.shape[axis], axis, mode
for n in [0, 10]:
yield __test_fail, y, n, axis, mode
def test_fix_length():
def __test(y, n, axis):
y_out = librosa.util.fix_length(y, n, axis=axis)
eq_slice = [slice(None)] * y.ndim
eq_slice[axis] = slice(y.shape[axis])
if n > y.shape[axis]:
assert np.allclose(y, y_out[eq_slice])
else:
assert np.allclose(y[eq_slice], y)
for shape in [(16,), (16, 16)]:
y = np.ones(shape)
for axis in [0, -1]:
for n in [-5, 0, 5]:
yield __test, y, n + y.shape[axis], axis
def test_fix_frames():
srand()
@raises(librosa.ParameterError)
def __test_fail(frames, x_min, x_max, pad):
librosa.util.fix_frames(frames, x_min, x_max, pad)
def __test_pass(frames, x_min, x_max, pad):
f_fix = librosa.util.fix_frames(frames,
x_min=x_min,
x_max=x_max,
pad=pad)
if x_min is not None:
if pad:
assert f_fix[0] == x_min
assert np.all(f_fix >= x_min)
if x_max is not None:
if pad:
assert f_fix[-1] == x_max
assert np.all(f_fix <= x_max)
for low in [-20, 0, 20]:
for high in [low + 20, low + 50, low + 100]:
frames = np.random.randint(low, high=high, size=15)
for x_min in [None, 0, 20]:
for x_max in [None, 20, 100]:
for pad in [False, True]:
if np.any(frames < 0):
yield __test_fail, frames, x_min, x_max, pad
else:
yield __test_pass, frames, x_min, x_max, pad
def test_normalize():
srand()
def __test_pass(X, norm, axis):
X_norm = librosa.util.normalize(X, norm=norm, axis=axis)
# Shape and dtype checks
assert X_norm.dtype == X.dtype
assert X_norm.shape == X.shape
if norm is None:
assert np.allclose(X, X_norm)
return
X_norm = np.abs(X_norm)
if norm == np.inf:
values = np.max(X_norm, axis=axis)
elif norm == -np.inf:
values = np.min(X_norm, axis=axis)
elif norm == 0:
# XXX: normalization here isn't quite right
values = np.ones(1)
else:
values = np.sum(X_norm**norm, axis=axis)**(1./norm)
assert np.allclose(values, np.ones_like(values))
@raises(librosa.ParameterError)
def __test_fail(X, norm, axis):
librosa.util.normalize(X, norm=norm, axis=axis)
for ndims in [1, 2, 3]:
X = np.random.randn(* ([16] * ndims))
for axis in range(X.ndim):
for norm in [np.inf, -np.inf, 0, 0.5, 1.0, 2.0, None]:
yield __test_pass, X, norm, axis
for norm in ['inf', -0.5, -2]:
yield __test_fail, X, norm, axis
# And test for non-finite failure
X[0] = np.nan
yield __test_fail, X, np.inf, 0
X[0] = np.inf
yield __test_fail, X, np.inf, 0
X[0] = -np.inf
yield __test_fail, X, np.inf, 0
def test_normalize_threshold():
x = np.asarray([[0, 1, 2, 3]])
def __test(threshold, result):
assert np.allclose(librosa.util.normalize(x, threshold=threshold),
result)
yield __test, None, [[0, 1, 1, 1]]
yield __test, 1, [[0, 1, 1, 1]]
yield __test, 2, [[0, 1, 1, 1]]
yield __test, 3, [[0, 1, 2, 1]]
yield __test, 4, [[0, 1, 2, 3]]
yield raises(librosa.ParameterError)(__test), 0, [[0, 1, 1, 1]]
yield raises(librosa.ParameterError)(__test), -1, [[0, 1, 1, 1]]
def test_normalize_fill():
def __test(fill, norm, threshold, axis, x, result):
xn = librosa.util.normalize(x,
axis=axis,
fill=fill,
threshold=threshold,
norm=norm)
assert np.allclose(xn, result), (xn, np.asarray(result))
x = np.asarray([[0, 1, 2, 3]], dtype=np.float32)
axis = 0
norm = np.inf
threshold = 2
# Test with inf norm
yield __test, None, norm, threshold, axis, x, [[0, 1, 1, 1]]
yield __test, False, norm, threshold, axis, x, [[0, 0, 1, 1]]
yield __test, True, norm, threshold, axis, x, [[1, 1, 1, 1]]
# Test with l0 norm
norm = 0
yield __test, None, norm, threshold, axis, x, [[0, 1, 2, 3]]
yield __test, False, norm, threshold, axis, x, [[0, 0, 0, 0]]
yield raises(librosa.ParameterError)(__test), True, norm, threshold, axis, x, [[0, 0, 0, 0]]
# Test with l1 norm
norm = 1
yield __test, None, norm, threshold, axis, x, [[0, 1, 1, 1]]
yield __test, False, norm, threshold, axis, x, [[0, 0, 1, 1]]
yield __test, True, norm, threshold, axis, x, [[1, 1, 1, 1]]
# And with l2 norm
norm = 2
x = np.repeat(x, 2, axis=0)
s = np.sqrt(2)/2
# First two columns are left as is, second two map to sqrt(2)/2
yield __test, None, norm, threshold, axis, x, [[0, 1, s, s], [0, 1, s, s]]
# First two columns are zeroed, second two map to sqrt(2)/2
yield __test, False, norm, threshold, axis, x, [[0, 0, s, s], [0, 0, s, s]]
# All columns map to sqrt(2)/2
yield __test, True, norm, threshold, axis, x, [[s, s, s, s], [s, s, s, s]]
# And test the bad-fill case
yield raises(librosa.ParameterError)(__test), 3, norm, threshold, axis, x, x
# And an all-axes test
axis = None
threshold = None
norm = 2
yield __test, None, norm, threshold, axis, np.asarray([[3, 0], [0, 4]]), np.asarray([[0, 0], [0, 0]])
yield __test, None, norm, threshold, axis, np.asarray([[3., 0], [0, 4]]), np.asarray([[0.6, 0], [0, 0.8]])
def test_axis_sort():
srand()
def __test_pass(data, axis, index, value):
if index:
Xsorted, idx = librosa.util.axis_sort(data,
axis=axis,
index=index,
value=value)
cmp_slice = [slice(None)] * X.ndim
cmp_slice[axis] = idx
assert np.allclose(X[cmp_slice], Xsorted)
else:
Xsorted = librosa.util.axis_sort(data,
axis=axis,
index=index,
value=value)
compare_axis = np.mod(1 - axis, 2)
if value is None:
value = np.argmax
sort_values = value(Xsorted, axis=compare_axis)
assert np.allclose(sort_values, np.sort(sort_values))
@raises(librosa.ParameterError)
def __test_fail(data, axis, index, value):
librosa.util.axis_sort(data, axis=axis, index=index, value=value)
for ndim in [1, 2, 3]:
X = np.random.randn(*([10] * ndim))
for axis in [0, 1, -1]:
for index in [False, True]:
for value in [None, np.min, np.mean, np.max]:
if ndim == 2:
yield __test_pass, X, axis, index, value
else:
yield __test_fail, X, axis, index, value
def test_match_intervals_empty():
@raises(librosa.ParameterError)
def __test(int_from, int_to):
librosa.util.match_intervals(int_from, int_to)
ints = np.asarray([[0, 2],
[0, 4],
[3, 6]])
# true matches for the above
yield __test, ints, ints[:0]
yield __test, ints[:0], ints
def test_match_intervals_strict():
def __test(int_from, int_to, matches):
test_matches = librosa.util.match_intervals(int_from, int_to, strict=True)
assert np.array_equal(matches, test_matches)
int_from = np.asarray([[0, 3],
[2, 4],
[5, 7]])
int_to = np.asarray([[0, 2],
[0, 4],
[3, 6]])
# true matches for the above
matches = np.asarray([1, 1, 2])
yield __test, int_from, int_to, matches
# Without the [3, 6] interval, the source [5, 7] has no match
yield raises(librosa.ParameterError)(__test), int_from, int_to[:-1], matches
def test_match_intervals_nonstrict():
def __test(int_from, int_to, matches):
test_matches = librosa.util.match_intervals(int_from, int_to, strict=False)
assert np.array_equal(matches, test_matches)
int_from = np.asarray([[0, 3],
[2, 4],
[5, 7]])
int_to = np.asarray([[0, 2],
[0, 4],
[3, 6]])
# true matches for the above
matches = np.asarray([1, 1, 2])
yield __test, int_from, int_to, matches
# Without the [3, 6] interval, the source [5, 7] should match [2, 4]
yield __test, int_from, int_to[:-1], np.asarray([1, 1, 1])
def test_match_events():
def __make_events(n):
srand()
return np.abs(np.random.randn(n))
def __is_best(y, ev1, ev2):
for i in range(len(y)):
values = np.asarray([np.abs(ev1[i] - e2) for e2 in ev2])
if np.any(values < values[y[i]]):
return False
return True
def __test(n, m):
ev1 = __make_events(n)
ev2 = __make_events(m)
y_pred = librosa.util.match_events(ev1, ev2)
assert __is_best(y_pred, ev1, ev2)
@raises(librosa.ParameterError)
def __test_fail(n, m):
ev1 = __make_events(n)
ev2 = __make_events(m)
librosa.util.match_events(ev1, ev2)
for n in [0, 1, 5, 20, 100]:
for m in [0, 1, 5, 20, 100]:
if n == 0 or m == 0:
yield __test_fail, n, m
else:
yield __test, n, m
def test_match_events_onesided():
events_from = np.asarray([5, 15, 25])
events_to = np.asarray([0, 10, 20, 30])
def __test(left, right, target):
match = librosa.util.match_events(events_from, events_to,
left=left, right=right)
assert np.allclose(target, events_to[match])
yield __test, False, True, [10, 20, 30]
yield __test, True, False, [0, 10, 20]
# Make a right-sided fail
events_from[0] = 40
yield raises(librosa.ParameterError)(__test), False, True, [10, 20, 30]
# Make a left-sided fail
events_from[0] = -1
yield raises(librosa.ParameterError)(__test), True, False, [10, 20, 30]
# Make a two-sided fail
events_from[0] = -1
yield raises(librosa.ParameterError)(__test), False, False, [10, 20, 30]
# Make a two-sided success
events_to[:-1] = events_from
yield __test, False, False, events_from
def test_localmax():
def __test(ndim, axis):
srand()
data = np.random.randn(*([20] * ndim))
lm = librosa.util.localmax(data, axis=axis)
for hits in np.argwhere(lm):
for offset in [-1, 1]:
compare_idx = hits.copy()
compare_idx[axis] += offset
if compare_idx[axis] < 0:
continue
if compare_idx[axis] >= data.shape[axis]:
continue
if offset < 0:
assert data[tuple(hits)] > data[tuple(compare_idx)]
else:
assert data[tuple(hits)] >= data[tuple(compare_idx)]
for ndim in range(1, 5):
for axis in range(ndim):
yield __test, ndim, axis
def test_peak_pick():
def __test(n, pre_max, post_max, pre_avg, post_avg, delta, wait):
srand()
# Generate a test signal
x = np.random.randn(n)**2
peaks = librosa.util.peak_pick(x,
pre_max, post_max,
pre_avg, post_avg,
delta, wait)
for i in peaks:
# Test 1: is it a peak in this window?
s = i - pre_max
if s < 0:
s = 0
t = i + post_max
diff = x[i] - np.max(x[s:t])
assert diff > 0 or np.isclose(diff, 0, rtol=1e-3, atol=1e-4)
# Test 2: is it a big enough peak to count?
s = i - pre_avg
if s < 0:
s = 0
t = i + post_avg
diff = x[i] - (delta + np.mean(x[s:t]))
assert diff > 0 or np.isclose(diff, 0, rtol=1e-3, atol=1e-4)
# Test 3: peak separation
assert not np.any(np.diff(peaks) <= wait)
@raises(librosa.ParameterError)
def __test_shape_fail():
x = np.eye(10)
librosa.util.peak_pick(x, 1, 1, 1, 1, 0.5, 1)
yield __test_shape_fail
win_range = [-1, 0, 1, 10]
for n in [1, 5, 10, 100]:
for pre_max in win_range:
for post_max in win_range:
for pre_avg in win_range:
for post_avg in win_range:
for wait in win_range:
for delta in [-1, 0.05, 100.0]:
tf = __test
if pre_max < 0:
tf = raises(librosa.ParameterError)(__test)
if pre_avg < 0:
tf = raises(librosa.ParameterError)(__test)
if delta < 0:
tf = raises(librosa.ParameterError)(__test)
if wait < 0:
tf = raises(librosa.ParameterError)(__test)
if post_max <= 0:
tf = raises(librosa.ParameterError)(__test)
if post_avg <= 0:
tf = raises(librosa.ParameterError)(__test)
yield (tf, n, pre_max, post_max,
pre_avg, post_avg, delta, wait)
def test_sparsify_rows():
def __test(n, d, q):
srand()
X = np.random.randn(*([d] * n))**4
X = np.asarray(X)
xs = librosa.util.sparsify_rows(X, quantile=q)
if ndim == 1:
X = X.reshape((1, -1))
assert np.allclose(xs.shape, X.shape)
# And make sure that xs matches X on nonzeros
xsd = np.asarray(xs.todense())
for i in range(xs.shape[0]):
assert np.allclose(xsd[i, xs[i].indices], X[i, xs[i].indices])
# Compute row-wise magnitude marginals
v_in = np.sum(np.abs(X), axis=-1)
v_out = np.sum(np.abs(xsd), axis=-1)
# Ensure that v_out retains 1-q fraction of v_in
assert np.all(v_out >= (1.0 - q) * v_in)
for ndim in range(1, 4):
for d in [1, 5, 10, 100]:
for q in [-1, 0.0, 0.01, 0.25, 0.5, 0.99, 1.0, 2.0]:
tf = __test
if ndim not in [1, 2]:
tf = raises(librosa.ParameterError)(__test)
if not 0.0 <= q < 1:
tf = raises(librosa.ParameterError)(__test)
yield tf, ndim, d, q
def test_files():
# Expected output
output = [os.path.join(os.path.abspath(os.path.curdir), 'data', s)
for s in ['test1_22050.wav',
'test1_44100.wav',
'test2_8000.wav']]
def __test(searchdir, ext, recurse, case_sensitive, limit, offset):
files = librosa.util.find_files(searchdir,
ext=ext,
recurse=recurse,
case_sensitive=case_sensitive,
limit=limit,
offset=offset)
s1 = slice(offset, None)
s2 = slice(limit)
assert set(files) == set(output[s1][s2]), (files, output[s1][s2])
if platform.system() == 'Windows':
cases = [False]
else:
cases = [False, True]
for searchdir in [os.path.curdir, os.path.join(os.path.curdir, 'data')]:
for ext in [None, 'wav', 'WAV', ['wav'], ['WAV']]:
for recurse in [False, True]:
for case_sensitive in cases:
for limit in [None, 1, 2]:
for offset in [0, 1, -1]:
tf = __test
if searchdir == os.path.curdir and not recurse:
tf = raises(AssertionError)(__test)
if (ext is not None and case_sensitive and
(ext == 'WAV' or
set(ext) == set(['WAV']))):
tf = raises(AssertionError)(__test)
yield (tf, searchdir, ext, recurse,
case_sensitive, limit, offset)
def test_valid_int():
def __test(x_in, cast):
z = librosa.util.valid_int(x_in, cast)
assert isinstance(z, int)
if cast is None:
assert z == int(np.floor(x_in))
else:
assert z == int(cast(x_in))
__test_fail = raises(librosa.ParameterError)(__test)
for x in np.linspace(-2, 2, num=6):
for cast in [None, np.floor, np.ceil, 7]:
if cast is None or six.callable(cast):
yield __test, x, cast
else:
yield __test_fail, x, cast
def test_valid_intervals():
def __test(intval):
librosa.util.valid_intervals(intval)
for d in range(1, 4):
for n in range(1, 4):
ivals = np.ones(d * [n])
for m in range(1, 3):
slices = [slice(m)] * d
if m == 2 and d == 2 and n > 1:
yield __test, ivals[slices]
else:
yield raises(librosa.ParameterError)(__test), ivals[slices]
# Test for issue #712: intervals must have non-negative duration
yield raises(librosa.ParameterError)(__test), np.asarray([[0, 1], [2, 1]])
def test_warning_deprecated():
@librosa.util.decorators.deprecated('old_version', 'new_version')
def __dummy():
return True
warnings.resetwarnings()
warnings.simplefilter('always')
with warnings.catch_warnings(record=True) as out:
x = __dummy()
# Make sure we still get the right value
assert x is True
# And that the warning triggered
assert len(out) > 0
# And that the category is correct
assert out[0].category is DeprecationWarning
# And that it says the right thing (roughly)
assert 'deprecated' in str(out[0].message).lower()
def test_warning_moved():
@librosa.util.decorators.moved('from', 'old_version', 'new_version')
def __dummy():
return True
warnings.resetwarnings()
warnings.simplefilter('always')
with warnings.catch_warnings(record=True) as out:
x = __dummy()
# Make sure we still get the right value
assert x is True
# And that the warning triggered
assert len(out) > 0
# And that the category is correct
assert out[0].category is DeprecationWarning
# And that it says the right thing (roughly)
assert 'moved' in str(out[0].message).lower()
def test_warning_rename_kw_pass():
warnings.resetwarnings()
warnings.simplefilter('always')
ov = librosa.util.Deprecated()
nv = 23
with warnings.catch_warnings(record=True) as out:
v = librosa.util.rename_kw('old', ov, 'new', nv, '0', '1')
eq_(v, nv)
# Make sure no warning triggered
assert len(out) == 0
def test_warning_rename_kw_fail():
warnings.resetwarnings()
warnings.simplefilter('always')
ov = 27
nv = 23
with warnings.catch_warnings(record=True) as out:
v = librosa.util.rename_kw('old', ov, 'new', nv, '0', '1')
eq_(v, ov)
# Make sure the warning triggered
assert len(out) > 0
# And that the category is correct
assert out[0].category is DeprecationWarning
# And that it says the right thing (roughly)
assert 'renamed' in str(out[0].message).lower()
def test_index_to_slice():
def __test(idx, idx_min, idx_max, step, pad):
slices = librosa.util.index_to_slice(idx,
idx_min=idx_min,
idx_max=idx_max,
step=step,
pad=pad)
if pad:
if idx_min is not None:
eq_(slices[0].start, idx_min)
if idx.min() != idx_min:
slices = slices[1:]
if idx_max is not None:
eq_(slices[-1].stop, idx_max)
if idx.max() != idx_max:
slices = slices[:-1]
if idx_min is not None:
idx = idx[idx >= idx_min]
if idx_max is not None:
idx = idx[idx <= idx_max]
idx = np.unique(idx)
eq_(len(slices), len(idx) - 1)
for sl, start, stop in zip(slices, idx, idx[1:]):
eq_(sl.start, start)
eq_(sl.stop, stop)
eq_(sl.step, step)
for indices in [np.arange(10, 90, 10), np.arange(10, 90, 15)]:
for idx_min in [None, 5, 15]:
for idx_max in [None, 85, 100]:
for step in [None, 2]:
for pad in [False, True]:
yield __test, indices, idx_min, idx_max, step, pad
def test_sync():
def __test_pass(axis, data, idx):
# By default, mean aggregation
dsync = librosa.util.sync(data, idx, axis=axis)
if data.ndim == 1 or axis == -1:
assert np.allclose(dsync, 2 * np.ones_like(dsync))
else:
assert np.allclose(dsync, data)
# Explicit mean aggregation
dsync = librosa.util.sync(data, idx, aggregate=np.mean, axis=axis)
if data.ndim == 1 or axis == -1:
assert np.allclose(dsync, 2 * np.ones_like(dsync))
else:
assert np.allclose(dsync, data)
# Max aggregation
dsync = librosa.util.sync(data, idx, aggregate=np.max, axis=axis)
if data.ndim == 1 or axis == -1:
assert np.allclose(dsync, 4 * np.ones_like(dsync))
else:
assert np.allclose(dsync, data)
# Min aggregation
dsync = librosa.util.sync(data, idx, aggregate=np.min, axis=axis)
if data.ndim == 1 or axis == -1:
assert np.allclose(dsync, np.zeros_like(dsync))
else:
assert np.allclose(dsync, data)
# Test for dtype propagation
assert dsync.dtype == data.dtype
@raises(librosa.ParameterError)
def __test_fail(data, idx):
librosa.util.sync(data, idx)
for ndim in [1, 2, 3]:
shaper = [1] * ndim
shaper[-1] = -1
data = np.mod(np.arange(135), 5)
frames = np.flatnonzero(data[0] == 0)
slices = [slice(start, stop) for (start, stop) in zip(frames, frames[1:])]
data = np.reshape(data, shaper)
for axis in [0, -1]:
# Test with list of indices
yield __test_pass, axis, data, list(frames)
# Test with ndarray of indices
yield __test_pass, axis, data, frames
# Test with list of slices
yield __test_pass, axis, data, slices
for bad_idx in [['foo', 'bar'], [None], [slice(None), None]]:
yield __test_fail, data, bad_idx
def test_roll_sparse():
srand()
def __test(fmt, shift, axis, X):
X_sparse = X.asformat(fmt)
X_dense = X.toarray()
Xs_roll = librosa.util.roll_sparse(X_sparse, shift, axis=axis)
assert scipy.sparse.issparse(Xs_roll)
eq_(Xs_roll.format, X_sparse.format)
Xd_roll = librosa.util.roll_sparse(X_dense, shift, axis=axis)
assert np.allclose(Xs_roll.toarray(), Xd_roll), (X_dense, Xs_roll.toarray(), Xd_roll)
Xd_roll_np = np.roll(X_dense, shift, axis=axis)
assert np.allclose(Xd_roll, Xd_roll_np)
X = scipy.sparse.lil_matrix(np.random.randint(0, high=10, size=(16, 16)))
for fmt in ['csr', 'csc', 'lil', 'dok', 'coo']:
for shift in [0, 8, -8, 20, -20]:
for axis in [0, 1, -1]:
yield __test, fmt, shift, axis, X
@raises(librosa.ParameterError)
def test_roll_sparse_bad_axis():
X = scipy.sparse.eye(5, format='csr')
librosa.util.roll_sparse(X, 3, axis=2)
def test_softmask():
def __test(power, split_zeros):
srand()
X = np.abs(np.random.randn(10, 10))
X_ref = np.abs(np.random.randn(10, 10))
# Zero out some rows
X[3, :] = 0
X_ref[3, :] = 0
M = librosa.util.softmask(X, X_ref, power=power, split_zeros=split_zeros)
assert np.all(0 <= M) and np.all(M <= 1)
if split_zeros and np.isfinite(power):
assert np.allclose(M[3, :], 0.5)
else:
assert not np.any(M[3, :]), M[3]
for power in [1, 2, 50, 100, np.inf]:
for split_zeros in [False, True]:
yield __test, power, split_zeros
def test_softmask_int():
X = 2 * np.ones((3, 3), dtype=np.int32)
X_ref = np.vander(np.arange(3))
M1 = librosa.util.softmask(X, X_ref, power=1)
M2 = librosa.util.softmask(X_ref, X, power=1)
assert np.allclose(M1 + M2, 1)
def test_softmask_fail():
failure = raises(librosa.ParameterError)(librosa.util.softmask)
yield failure, -np.ones(3), np.ones(3), 1, False
yield failure, np.ones(3), -np.ones(3), 1, False
yield failure, np.ones(3), np.ones(4), 1, False
yield failure, np.ones(3), np.ones(3), 0, False
yield failure, np.ones(3), np.ones(3), -1, False
def test_tiny():
def __test(x, value):
eq_(value, librosa.util.tiny(x))
for x, value in [(1, np.finfo(np.float32).tiny),
(np.ones(3, dtype=int), np.finfo(np.float32).tiny),
(np.ones(3, dtype=np.float32), np.finfo(np.float32).tiny),
(1.0, np.finfo(np.float64).tiny),
(np.ones(3, dtype=np.float64), np.finfo(np.float64).tiny),
(1j, np.finfo(np.complex128).tiny),
(np.ones(3, dtype=np.complex64), np.finfo(np.complex64).tiny),
(np.ones(3, dtype=np.complex128), np.finfo(np.complex128).tiny)]:
yield __test, x, value
def test_optional_jit():
@librosa.util.decorators.optional_jit(nopython=True)
def __func1(x):
return x**2
@librosa.util.decorators.optional_jit
def __func2(x):
return x**2
def __test(f):
y = f(2)
eq_(y, 2**2)
yield __test, __func1
yield __test, __func2
def test_util_fill_off_diagonal_8_8():
# Case 1: Square matrix (N=M)
mut_x = np.ones((8, 8))
librosa.util.fill_off_diagonal(mut_x, 0.25)
gt_x = np.array([[1, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 1]])
assert np.array_equal(mut_x, gt_x)
assert np.array_equal(mut_x, gt_x.T)
def test_util_fill_off_diagonal_8_12():
# Case 2a: N!=M
mut_x = np.ones((8, 12))
librosa.util.fill_off_diagonal(mut_x, 0.25)
gt_x = np.array([[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]])
assert np.array_equal(mut_x, gt_x)
# Case 2b: (N!=M).T
mut_x = np.ones((8, 12)).T
librosa.util.fill_off_diagonal(mut_x, 0.25)
assert np.array_equal(mut_x, gt_x.T)
|
the-stack_106_22494 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# Modifications for Guinet et al.
# TODO
import io, os, ot, argparse, random
import numpy as np
from ot.gromov import gromov_wasserstein
from utils import *
parser = argparse.ArgumentParser(description=" ")
parser.add_argument("--embdir", default="data/", type=str)
parser.add_argument("--outdir", default="output/", type=str)
parser.add_argument(
"--lglist",
default="en-fr-es-it-pt-de-pl-ru-da-nl-cs",
type=str,
help="list of languages. The first element is the pivot. Example: en-fr-es to align English, French and Spanish with English as the pivot.",
)
parser.add_argument(
"--maxload", default=10000, type=int, help="Max number of loaded vectors"
)
parser.add_argument(
"--uniform",
action="store_true",
help="switch to uniform probability of picking language pairs",
)
# optimization parameters for the square loss
parser.add_argument("--epoch", default=2, type=int, help="nb of epochs for square loss")
parser.add_argument(
"--niter",
default=500,
type=int,
help="max number of iteration per epoch for square loss",
)
parser.add_argument(
"--lr", default=0.1, type=float, help="learning rate for square loss"
)
parser.add_argument("--bsz", default=500, type=int, help="batch size for square loss")
# optimization parameters for the RCSLS loss
parser.add_argument(
"--altepoch", default=100, type=int, help="nb of epochs for RCSLS loss"
)
parser.add_argument(
"--altlr", default=25, type=float, help="learning rate for RCSLS loss"
)
parser.add_argument("--altbsz", type=int, default=1000, help="batch size for RCSLS")
args = parser.parse_args()
###### SPECIFIC FUNCTIONS ######
def getknn(sc,x, y, k=10):
sidx = np.argpartition(sc, -k, axis=1)[:, -k:]
ytopk = y[sidx.flatten(), :]
ytopk = ytopk.reshape(sidx.shape[0], sidx.shape[1], y.shape[1])
f = np.sum(sc[np.arange(sc.shape[0])[:, None], sidx])
df = np.dot(ytopk.sum(1).T, x)
return f / k, df / k
def rcsls(Xi, Xj, Zi, Zj, R, knn=10):
X_trans = np.dot(Xi, R.T)
f = 2 * np.sum(X_trans * Xj)
df = 2 * np.dot(Xj.T, Xi)
fk0, dfk0 = getknn(np.dot(X_trans, Zj.T), Xi, Zj, knn)
fk1, dfk1 = getknn(np.dot(np.dot(Zi, R.T), Xj.T).T, Xj, Zi, knn)
f = f - fk0 - fk1
df = df - dfk0 - dfk1.T
return -f / Xi.shape[0], -df.T / Xi.shape[0]
def GWmatrix(emb0):
N = np.shape(emb0)[0]
N2 = 0.5 * np.linalg.norm(emb0, axis=1).reshape(1, N)
C2 = np.tile(N2.transpose(), (1, N)) + np.tile(N2, (N, 1))
C2 -= np.dot(emb0, emb0.T)
return C2
def g_wasserstein(x_src, x_tgt, C2):
N = x_src.shape[0]
C1 = GWmatrix(x_src)
M = gromov_wasserstein(
C1, C2, np.ones(N), np.ones(N), "square_loss"
) # epsilon=0.55,max_iter=100,tol=1e-4
return procrustes(np.dot(M, x_tgt), x_src)
def align(EMB, TRANS, lglist, args):
nmax, l = args.maxload, len(lglist)
# create a list of language pairs to sample from
# (default == higher probability to pick a language pair contianing the pivot)
# if --uniform: uniform probability of picking a language pair
samples = []
for i in range(l):
for j in range(l):
if j == i:
continue
if j > 0 and args.uniform == False:
samples.append((0, j))
if i > 0 and args.uniform == False:
samples.append((i, 0))
samples.append((i, j))
# optimization of the l2 loss
print("start optimizing L2 loss")
lr0, bsz, nepoch, niter = args.lr, args.bsz, args.epoch, args.niter
for epoch in range(nepoch):
print("start epoch %d / %d" % (epoch + 1, nepoch))
ones = np.ones(bsz)
f, fold, nb, lr = 0.0, 0.0, 0.0, lr0
for it in range(niter):
if it > 1 and f > fold + 1e-3:
lr /= 2
if lr < 0.05:
break
fold = f
f, nb = 0.0, 0.0
for k in range(100 * (l - 1)):
(i, j) = random.choice(samples)
embi = EMB[i][np.random.permutation(nmax)[:bsz], :]
embj = EMB[j][np.random.permutation(nmax)[:bsz], :]
perm = ot.sinkhorn(
ones,
ones,
np.linalg.multi_dot([embi, -TRANS[i], TRANS[j].T, embj.T]),
reg=0.025,
stopThr=1e-3,
)
grad = np.linalg.multi_dot([embi.T, perm, embj])
f -= (
np.trace(np.linalg.multi_dot([TRANS[i].T, grad, TRANS[j]]))
/ embi.shape[0]
)
nb += 1
if i > 0:
TRANS[i] = proj_ortho(TRANS[i] + lr * np.dot(grad, TRANS[j]))
if j > 0:
TRANS[j] = proj_ortho(
TRANS[j] + lr * np.dot(grad.transpose(), TRANS[i])
)
print(
"iter %d / %d - epoch %d - loss: %.5f lr: %.4f"
% (it, niter, epoch + 1, f / nb, lr)
)
print(
"end of epoch %d - loss: %.5f - lr: %.4f" % (epoch + 1, f / max(nb, 1), lr)
)
niter, bsz = max(int(niter / 2), 2), min(1000, bsz * 2)
# end for epoch in range(nepoch):
# optimization of the RCSLS loss
print("start optimizing RCSLS loss")
f, fold, nb, lr = 0.0, 0.0, 0.0, args.altlr
for epoch in range(args.altepoch):
if epoch > 1 and f - fold > -1e-4 * abs(fold):
lr /= 2
if lr < 1e-1:
break
fold = f
f, nb = 0.0, 0.0
for k in range(round(nmax / args.altbsz) * 10 * (l - 1)):
(i, j) = random.choice(samples)
sgdidx = np.random.choice(nmax, size=args.altbsz, replace=False)
embi = EMB[i][sgdidx, :]
embj = EMB[j][:nmax, :]
# crude alignment approximation:
T = np.dot(TRANS[i], TRANS[j].T)
scores = np.linalg.multi_dot([embi, T, embj.T])
perm = np.zeros_like(scores)
perm[np.arange(len(scores)), scores.argmax(1)] = 1
embj = np.dot(perm, embj)
# normalization over a subset of embeddings for speed up
fi, grad = rcsls(embi, embj, embi, embj, T.T)
f += fi
nb += 1
if i > 0:
TRANS[i] = proj_ortho(TRANS[i] - lr * np.dot(grad, TRANS[j]))
if j > 0:
TRANS[j] = proj_ortho(
TRANS[j] - lr * np.dot(grad.transpose(), TRANS[i])
)
print("epoch %d - loss: %.5f - lr: %.4f" % (epoch + 1, f / max(nb, 1), lr))
# end for epoch in range(args.altepoch):
return TRANS
def convex_init(X, Y, niter=100, reg=0.05, apply_sqrt=False):
n, d = X.shape
K_X, K_Y = np.dot(X, X.T), np.dot(Y, Y.T)
K_Y *= np.linalg.norm(K_X) / np.linalg.norm(K_Y)
K2_X, K2_Y = np.dot(K_X, K_X), np.dot(K_Y, K_Y)
P = np.ones([n, n]) / float(n)
for it in range(1, niter + 1):
G = np.dot(P, K2_X) + np.dot(K2_Y, P) - 2 * np.dot(K_Y, np.dot(P, K_X))
q = ot.sinkhorn(np.ones(n), np.ones(n), G, reg, stopThr=1e-3)
alpha = 2.0 / float(2.0 + it)
P = alpha * q + (1.0 - alpha) * P
return procrustes(np.dot(P, X), Y).T
###### MAIN ######
lglist = args.lglist.split("-")
l = len(lglist)
# embs:
EMB = {}
for i in range(l):
fn = args.embdir + "/wiki." + lglist[i] + ".vec"
_, vecs = load_vectors(fn, maxload=args.maxload)
EMB[i] = vecs
# init
print("Computing initial bilingual apping with Gromov-Wasserstein...")
TRANS = {}
maxinit = 2000
emb0 = EMB[0][:maxinit, :]
C0 = GWmatrix(emb0)
TRANS[0] = np.eye(300)
for i in range(1, l):
print("init " + lglist[i])
embi = EMB[i][:maxinit, :]
TRANS[i] = g_wasserstein(embi, emb0, C0)
# align
align(EMB, TRANS, lglist, args)
print("saving matrices in " + args.outdir)
languages = "".join(lglist)
for i in range(l):
save_matrix(args.outdir + "/W-" + languages + "-" + lglist[i], TRANS[i])
|
the-stack_106_22495 | import numpy as np
from matplotlib import pyplot as plt
import cv2
img = cv2.imread('wiki.jpg',0)
hist,bins = np.histogram(img.flatten(),256,[0,256])
cdf = hist.cumsum()
cdf_normalized = cdf * hist.max()/ cdf.max()
plt.plot(cdf_normalized, color = 'b')
plt.hist(img.flatten(),256,[0,256], color = 'r')
plt.xlim([0,256])
plt.legend(('cdf','histogram'), loc = 'upper left')
plt.show()
cdf_m = np.ma.masked_equal(cdf,0)
cdf_m = (cdf_m - cdf_m.min())*255/(cdf_m.max()-cdf_m.min())
cdf = np.ma.filled(cdf_m,0).astype('uint8')
img2 = cdf[img]
plt.plot(cdf, color = 'b')
plt.hist(img2.flatten(),256,[0,256], color = 'r')
plt.xlim([0,256])
plt.legend(('cdf','histogram'), loc = 'upper left')
plt.show()
img = cv2.imread('wiki.jpg',0)
equ = cv2.equalizeHist(img)
res = np.hstack((img,equ)) #stacking images side-by-side
cv2.imwrite('res.png',res) |
the-stack_106_22497 | #!/usr/bin/python
"""
anybadge
A Python module for generating badges for your projects, with a focus on
simplicity and flexibility.
"""
import os
import re
# Package information
version = __version__ = "0.0.0"
__version_info__ = tuple(re.split('[.-]', __version__))
__title__ = "anybadge"
__summary__ = "A simple, flexible badge generator."
__uri__ = "https://github.com/jongracecox/anybadge"
# Set some defaults
DEFAULT_FONT = 'DejaVu Sans,Verdana,Geneva,sans-serif'
DEFAULT_FONT_SIZE = 11
NUM_PADDING_CHARS = 0
NUM_VALUE_PADDING_CHARS = 0
DEFAULT_COLOR = '#4c1'
DEFAULT_TEXT_COLOR = '#fff'
MASK_ID_PREFIX = 'anybadge_'
# Dictionary for looking up approx pixel widths of
# supported fonts and font sizes.
FONT_WIDTHS = {
'DejaVu Sans,Verdana,Geneva,sans-serif': {
11: 10
}
}
# Create a dictionary of colors to make selections
# easier.
COLORS = {
'white': '#FFFFFF',
'silver': '#C0C0C0',
'gray': '#808080',
'black': '#000000',
'red': '#e05d44',
'brightred': '#FF0000',
'maroon': '#800000',
'olive': '#808000',
'lime': '#00FF00',
'brightyellow': '#FFFF00',
'yellow': '#dfb317',
'green': '#4c1',
'yellowgreen': '#a4a61d',
'aqua': '#00FFFF',
'teal': '#008080',
'blue': '#0000FF',
'navy': '#000080',
'fuchsia': '#FF00FF',
'purple': '#800080',
'orange': '#fe7d37',
'lightgrey': '#9f9f9f',
}
# Template SVG with placeholders for various items that
# will be added during final creation.
TEMPLATE_SVG = """<?xml version="1.0" encoding="UTF-8"?>
<svg xmlns="http://www.w3.org/2000/svg" width="{{ badge width }}" height="20">
<linearGradient id="b" x2="0" y2="100%">
<stop offset="0" stop-color="#bbb" stop-opacity=".1"/>
<stop offset="1" stop-opacity=".1"/>
</linearGradient>
<mask id="{{ mask id }}">
<rect width="{{ badge width }}" height="20" rx="3" fill="#fff"/>
</mask>
<g mask="url(#{{ mask id }})">
<path fill="#555" d="M0 0h{{ color split x }}v20H0z"/>
<path fill="{{ color }}" d="M{{ color split x }} 0h{{ value width }}v20H{{ color split x }}z"/>
<path fill="url(#b)" d="M0 0h{{ badge width }}v20H0z"/>
</g>
<g fill="{{ label text color }}" text-anchor="middle" font-family="{{ font name }}" font-size="{{ font size }}">
<text x="{{ label anchor shadow }}" y="15" fill="#010101" fill-opacity=".3">{{ label }}</text>
<text x="{{ label anchor }}" y="14">{{ label }}</text>
</g>
<g fill="{{ value text color }}" text-anchor="middle" font-family="{{ font name }}" font-size="{{ font size }}">
<text x="{{ value anchor shadow }}" y="15" fill="#010101" fill-opacity=".3">{{ value }}</text>
<text x="{{ value anchor }}" y="14">{{ value }}</text>
</g>
</svg>"""
# Define some templates that can be used for common badge types, saving
# from having to provide thresholds and labels each time.
BADGE_TEMPLATES = {
'pylint': {
'threshold': '2=red 4=orange 8=yellow 10=green',
'label': 'pylint'
},
'coverage': {
'threshold': '50=red 60=orange 80=yellow 100=green',
'label': 'coverage',
'suffix': '%'
}
}
class Badge(object):
"""
Badge class used to generate badges.
Args:
label(str): Badge label text.
value(str): Badge value text.
font_name(str, optional): Name of font to use.
font_size(int, optional): Font size.
num_padding_chars(float, optional): Number of padding characters to use to give extra
space around text.
num_value_padding_chars(float, optional): Number of padding characters to use to give extra
space around value.
template(str, optional): String containing the SVG template. This should be valid SVG
file content with place holders for variables to be populated during rendering.
value_prefix(str, optional): Prefix to be placed before value.
value_suffix(str, optional): Suffix to be placed after value.
thresholds(dict, optional): A dictionary containing thresholds used to select badge
color based on the badge value.
default_color(str, optional): Badge color as a name or as an HTML color code.
use_max_when_value_exceeds(bool, optional): Choose whether to use the maximum threshold
value when the badge value exceeds the top threshold. Default is True.
value_format(str, optional) String with formatting to be used to format the value text.
text_color(str, optional): Text color as a name or as an HTML color code.
Examples:
Create a simple green badge:
>>> badge = Badge('label', 123, default_color='green')
Write a badge to file, overwriting any existing file:
>>> badge = Badge('label', 123, default_color='green')
>>> badge.write_badge('demo.svg', overwrite=True)
Here are a number of examples showing thresholds, since there
are certain situations that may not be obvious:
>>> badge = Badge('pipeline', 'passing', thresholds={'passing': 'green', 'failing': 'red'})
>>> badge.badge_color
'green'
2.32 is not <2
2.32 is < 4, so 2.32 yields orange
>>> badge = Badge('pylint', 2.32, thresholds={2: 'red',
... 4: 'orange',
... 8: 'yellow',
... 10: 'green'})
>>> badge.badge_color
'orange'
8 is not <8
8 is <4, so 8 yields orange
>>> badge = Badge('pylint', 8, thresholds={2: 'red',
... 4: 'orange',
... 8: 'yellow',
... 10: 'green'})
>>> badge.badge_color
'green'
10 is not <8, but use_max_when_value_exceeds defaults to
True, so 10 yields green
>>> badge = Badge('pylint', 11, thresholds={2: 'red',
... 4: 'orange',
... 8: 'yellow',
... 10: 'green'})
>>> badge.badge_color
'green'
11 is not <10, and use_max_when_value_exceeds is set to
False, so 11 yields the default color '#4c1'
>>> badge = Badge('pylint', 11, use_max_when_value_exceeds=False,
... thresholds={2: 'red', 4: 'orange', 8: 'yellow',
... 10: 'green'})
>>> badge.badge_color
'#4c1'
"""
def __init__(self, label, value, font_name=None, font_size=None,
num_padding_chars=None, num_value_padding_chars=None, template=None,
value_prefix='', value_suffix='', thresholds=None, default_color=None,
use_max_when_value_exceeds=True, value_format=None, text_color=None):
"""Constructor for Badge class."""
# Set defaults if values were not passed
if not font_name:
font_name = DEFAULT_FONT
if not font_size:
font_size = DEFAULT_FONT_SIZE
if num_padding_chars is None:
num_padding_chars = NUM_PADDING_CHARS
if num_value_padding_chars is None:
num_value_padding_chars = NUM_VALUE_PADDING_CHARS
if not template:
template = TEMPLATE_SVG
if not default_color:
default_color = DEFAULT_COLOR
if not text_color:
text_color = DEFAULT_TEXT_COLOR
self.label = label
self.value = value
self.value_format = value_format
if value_format:
value_text = str(value_format % self.value_type(value))
else:
value_text = str(self.value_type(value))
self.value_prefix = value_prefix
self.value_suffix = value_suffix
self.value_text = value_prefix + value_text + value_suffix
self.font_name = font_name
self.font_size = font_size
self.num_padding_chars = num_padding_chars
self.num_value_padding_chars = num_value_padding_chars
self.template = template
self.thresholds = thresholds
self.default_color = default_color
# text_color can be passed as a single value or a pair of comma delimited values
self.text_color = text_color
text_colors = text_color.split(',')
self.label_text_color = text_colors[0]
self.value_text_color = text_colors[0]
if len(text_colors) > 1:
self.value_text_color = text_colors[1]
self.use_max_when_value_exceeds = use_max_when_value_exceeds
self.mask_id = self.__class__._get_next_mask_id()
def __repr__(self):
"""Return a representation of the Badge object instance.
The output of the __repr__ function could be used to recreate the current object.
Examples:
>>> badge = Badge('example', '123.456')
>>> repr(badge)
"Badge('example', '123.456')"
>>> badge = Badge('example', '123.456', value_suffix='TB')
>>> repr(badge)
"Badge('example', '123.456', value_suffix='TB')"
>>> badge = Badge('example', '123.456', text_color='#111111', value_suffix='TB')
>>> repr(badge)
"Badge('example', '123.456', value_suffix='TB', text_color='#111111')"
"""
optional_args = ""
if self.font_name != DEFAULT_FONT:
optional_args += ", font_name=%s" % repr(self.font_name)
if self.font_size != DEFAULT_FONT_SIZE:
optional_args += ", font_size=%s" % repr(self.font_size)
if self.num_padding_chars != NUM_PADDING_CHARS:
optional_args += ", num_padding_chars=%s" % repr(self.num_padding_chars)
if self.num_value_padding_chars != NUM_VALUE_PADDING_CHARS:
optional_args += ", num_value_padding_chars=%s" % repr(self.num_value_padding_chars)
if self.template != TEMPLATE_SVG:
optional_args += ", template=%s" % repr(self.template)
if self.value_prefix != '':
optional_args += ", value_prefix=%s" % repr(self.value_prefix)
if self.value_suffix != '':
optional_args += ", value_suffix=%s" % repr(self.value_suffix)
if self.thresholds:
optional_args += ", thresholds=%s" % repr(self.thresholds)
if self.default_color != DEFAULT_COLOR:
optional_args += ", default_color=%s" % repr(self.default_color)
if not self.use_max_when_value_exceeds:
optional_args += ", use_max_when_value_exceeds=%s" % repr(self.use_max_when_value_exceeds)
if self.value_format:
optional_args += ", value_format=%s" % repr(self.value_format)
if self.text_color != DEFAULT_TEXT_COLOR:
optional_args += ", text_color=%s" % repr(self.text_color)
return "%s(%s, %s%s)" % (
self.__class__.__name__,
repr(self.label),
repr(self.value),
optional_args
)
@classmethod
def _get_next_mask_id(cls):
"""Return a new mask ID from a singleton sequence maintained on the class.
Returns: str
"""
if not hasattr(cls, 'mask_id'):
cls.mask_id = 0
cls.mask_id += 1
return MASK_ID_PREFIX + str(cls.mask_id)
@property
def value_is_float(self):
"""Identify whether the value text is a float.
Returns: bool
"""
# If the value is an int then it should not be considered a float.
# We need to check this first before we check whether it is a float because the
# float check also returns True for an int string.
if self.value_is_int:
return False
try:
_ = float(self.value)
except ValueError:
return False
else:
return True
@property
def value_is_int(self):
"""Identify whether the value text is an int.
Returns: bool
"""
try:
a = float(self.value)
b = int(self.value)
except ValueError:
return False
else:
return a == b
@property
def value_type(self):
"""The Python type associated with the value.
Returns: type
"""
if self.value_is_float:
return float
elif self.value_is_int:
return int
else:
return str
@property
def label_width(self):
"""The SVG width of the label text.
Returns: int
"""
return int(self.get_text_width(self.label) + (2.0 * self.num_padding_chars * self.font_width))
@property
def value_width(self):
"""The SVG width of the value text.
Returns: int
"""
return int(self.get_text_width(str(self.value_text)) + (2.0 * self.font_width * self.num_value_padding_chars) + (self.num_padding_chars * self.font_width))
@property
def font_width(self):
"""Return the width multiplier for a font.
Returns:
int: Maximum pixel width of badges selected font.
Example:
>>> Badge(label='x', value='1').font_width
10
"""
return FONT_WIDTHS[self.font_name][self.font_size]
@property
def color_split_position(self):
"""The SVG x position where the color split should occur.
Returns: int
"""
return int(self.font_width + self.label_width +
float(self.font_width) * float(self.num_padding_chars))
@property
def label_anchor(self):
"""The SVG x position of the middle anchor for the label text.
Returns: float
"""
return self.color_split_position / 2
@property
def value_anchor(self):
"""The SVG x position of the middle anchor for the value text.
Returns: float
"""
return self.color_split_position + ((self.badge_width - self.color_split_position) / 2)
@property
def label_anchor_shadow(self):
"""The SVG x position of the label shadow anchor.
Returns: float
"""
return self.label_anchor + 1
@property
def value_anchor_shadow(self):
"""The SVG x position of the value shadow anchor.
Returns: float
"""
return self.value_anchor + 1
@property
def badge_width(self):
"""The total width of badge.
Returns: int
Examples:
>>> badge = Badge('pylint', '5')
>>> badge.badge_width
53
"""
padding_char_width = self.get_text_width(' ')
padding = int(padding_char_width * (self.num_padding_chars + 3))
return padding + self.label_width + self.value_width
@property
def badge_svg_text(self):
"""The badge SVG text.
Returns: str
"""
# Identify whether template is a file or the actual template text
if len(self.template.split('\n')) == 1:
with open(self.template, mode='r') as file_handle:
badge_text = file_handle.read()
else:
badge_text = self.template
return badge_text.replace('{{ badge width }}', str(self.badge_width)) \
.replace('{{ font name }}', self.font_name) \
.replace('{{ font size }}', str(self.font_size)) \
.replace('{{ label }}', self.label) \
.replace('{{ value }}', self.value_text) \
.replace('{{ label anchor }}', str(self.label_anchor)) \
.replace('{{ label anchor shadow }}', str(self.label_anchor_shadow)) \
.replace('{{ value anchor }}', str(self.value_anchor)) \
.replace('{{ value anchor shadow }}', str(self.value_anchor_shadow)) \
.replace('{{ color }}', self.badge_color_code) \
.replace('{{ label text color }}', self.label_text_color) \
.replace('{{ value text color }}', self.value_text_color) \
.replace('{{ color split x }}', str(self.color_split_position)) \
.replace('{{ value width }}', str(self.badge_width - self.color_split_position))\
.replace('{{ mask id }}', self.mask_id)
def __str__(self):
"""Return string representation of badge.
This will return the badge SVG text.
Returns: str
Examples:
>>> print(Badge('example', '123')) # doctest: +ELLIPSIS
<?xml version="1.0" encoding="UTF-8"?>
...
"""
return self.badge_svg_text
def get_text_width(self, text):
"""Return the width of text.
Args:
text(str): Text to get the pixel width of.
Returns:
int: Pixel width of the given text based on the badges selected font.
This implementation assumes a fixed font of:
font-family="DejaVu Sans,Verdana,Geneva,sans-serif" font-size="11"
>>> badge = Badge('x', 1, font_name='DejaVu Sans,Verdana,Geneva,sans-serif', font_size=11)
>>> badge.get_text_width('pylint')
34
"""
return _get_approx_string_width(text, self.font_width)
@property
def badge_color(self):
"""Badge color based on the configured thresholds.
Returns: str"""
# If no thresholds were passed then return the default color
if not self.thresholds:
return self.default_color
if self.value_type == str:
if self.value in self.thresholds:
return self.thresholds[self.value]
else:
return self.default_color
# Convert the threshold dictionary into a sorted list of lists
threshold_list = [[self.value_type(i[0]), i[1]] for i in self.thresholds.items()]
threshold_list.sort(key=lambda x: x[0])
color = None
for threshold, color in threshold_list:
if float(self.value) < float(threshold):
return color
# If we drop out the top of the range then return the last max color
if color and self.use_max_when_value_exceeds:
return color
else:
return self.default_color
@property
def badge_color_code(self):
"""Return the color code for the badge.
Returns: str
"""
color = self.badge_color
if color[0] == '#':
return color
return COLORS[color]
def write_badge(self, file_path, overwrite=False):
"""Write badge to file."""
# Validate path (part 1)
if file_path.endswith('/'):
raise Exception('File location may not be a directory.')
# Get absolute filepath
path = os.path.abspath(file_path)
if not path.lower().endswith('.svg'):
path += '.svg'
# Validate path (part 2)
if not overwrite and os.path.exists(path):
raise Exception('File "{}" already exists.'.format(path))
with open(path, mode='w') as file_handle:
file_handle.write(self.badge_svg_text)
# Based on the following SO answer: https://stackoverflow.com/a/16008023/6252525
def _get_approx_string_width(text, font_width, fixed_width=False):
"""
Get the approximate width of a string using a specific average font width.
Args:
text(str): Text string to calculate width of.
font_width(int): Average width of font characters.
fixed_width(bool): Indicates that the font is fixed width.
Returns:
int: Width of string in pixels.
Examples:
Call the function with a string and the maximum character width of the font you are using:
>>> int(_get_approx_string_width('hello', 10))
29
This example shows the comparison of simplistic calculation based on a fixed width.
Given a test string and a fixed font width of 10, we can calculate the width
by multiplying the length and the font character with:
>>> test_string = 'GOOGLE|ijkl'
>>> _get_approx_string_width(test_string, 10, fixed_width=True)
110
Since some characters in the string are thinner than others we expect that the
apporximate text width will be narrower than the fixed width calculation:
>>> _get_approx_string_width(test_string, 10)
77
"""
if fixed_width:
return len(text) * font_width
size = 0.0
# A dictionary containing percentages that relate to how wide
# each character will be represented in a variable width font.
# These percentages can be calculated using the ``_get_character_percentage_dict`` function.
char_width_percentages = {
"lij|' ": 40.0,
'![]fI.,:;/\\t': 50.0,
'`-(){}r"': 60.0,
'*^zcsJkvxy': 70.0,
'aebdhnopqug#$L+<>=?_~FZT0123456789': 70.0,
'BSPEAKVXY&UwNRCHD': 70.0,
'QGOMm%W@': 100.0
}
for s in text:
percentage = 100.0
for k in char_width_percentages.keys():
if s in k:
percentage = char_width_percentages[k]
break
size += (percentage / 100.0) * float(font_width)
return int(size)
# This is a helper function that can be used to generate alternate dictionaries
# for the _get_approx_string_width function. The function is not needed for
# normal operation of this package, and since it depends on the PIL package,
# which is not included in the dependencies the function will remain commented out.
#
# def _get_character_percentage_dict(font_path, font_size):
# """Get the dictionary used to estimate variable width font text lengths.
#
# Args:
# font_path(str): Path to valid font file.
# font_size(int): Font size to use.
#
# Returns: dict
#
# This function can be used to calculate the dictionary used in the
# ``get_approx_string_width`` function.
#
# Examples:
# >>> _get_character_percentage_dict('/Library/Fonts/Verdana.ttf', 9) # doctest: +ELLIPSIS
# {"lij|' ": 40, '![]fI.,:;/\\\\t': 50, '`-(){}r"': 60, '*^zcsJkvxy': 70, ...
# """
# from PIL import ImageFont
#
# # List of groups in size order, smallest to largest
# char_width_groups = [
# "lij|' ",
# '![]fI.,:;/\\t',
# '`-(){}r"',
# '*^zcsJkvxy',
# 'aebdhnopqug#$L+<>=?_~FZT' + digits,
# 'BSPEAKVXY&UwNRCHD',
# 'QGOMm%W@',
# ]
#
# def get_largest_in_group(group):
# """Get the widest character from the group."""
# return max([ImageFont.truetype(font_path, font_size).getsize(c)[0] for c in group])
#
# largest = char_width_groups[-1]
# font_width = get_largest_in_group(largest)
# return {group: int((get_largest_in_group(group) / font_width) * 100)
# for group in char_width_groups}
def parse_args():
"""Parse the command line arguments."""
import argparse
import textwrap
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''\
Command line utility to generate .svg badges.
This utility can be used to generate .svg badge images, using configurable
thresholds for coloring. Values can be passed as string, integer or floating
point. The type will be detected automatically.
Running the utility with a --file option will result in the .svg image being
written to file. Without the --file option the .svg file content will be
written to stdout, so can be redirected to a file.
Some thresholds have been built in to save time. To use these thresholds you
can simply specify the template name instead of threshold value/color pairs.
examples:
Here are some usage specific examples that may save time on defining
thresholds.
Pylint
anybadge.py --value=2.22 --file=pylint.svg pylint
anybadge.py --label=pylint --value=2.22 --file=pylint.svg \\
2=red 4=orange 8=yellow 10=green
Coverage
anybadge.py --value=65 --file=coverage.svg coverage
anybadge.py --label=coverage --value=65 --suffix='%%' --file=coverage.svg \\
50=red 60=orange 80=yellow 100=green
CI Pipeline
anybadge.py --label=pipeline --value=passing --file=pipeline.svg \\
passing=green failing=red
'''))
parser.add_argument('-l', '--label', type=str, help='The badge label.')
parser.add_argument('-v', '--value', type=str, help='The badge value.', required=True)
parser.add_argument('-m', '--value-format', type=str, default=None,
help='Formatting string for value (e.g. "%%.2f" for 2dp floats)')
parser.add_argument('-c', '--color', type=str, help='For fixed color badges use --color'
'to specify the badge color.',
default=DEFAULT_COLOR)
parser.add_argument('-p', '--prefix', type=str, help='Optional prefix for value.',
default='')
parser.add_argument('-s', '--suffix', type=str, help='Optional suffix for value.',
default='')
parser.add_argument('-d', '--padding', type=int, help='Number of characters to pad on '
'either side of the badge text.',
default=NUM_PADDING_CHARS)
parser.add_argument('--value-padding', type=int, help='Number of characters to pad on '
'either side of the badge value.',
default=NUM_VALUE_PADDING_CHARS)
parser.add_argument('-n', '--font', type=str,
help='Font name. Supported fonts: '
','.join(['"%s"' % x for x in FONT_WIDTHS.keys()]),
default=DEFAULT_FONT)
parser.add_argument('-z', '--font-size', type=int, help='Font size.',
default=DEFAULT_FONT_SIZE)
parser.add_argument('-t', '--template', type=str, help='Location of alternative '
'template .svg file.',
default=TEMPLATE_SVG)
parser.add_argument('-u', '--use-max', action='store_true',
help='Use the maximum threshold color when the value exceeds the '
'maximum threshold.')
parser.add_argument('-f', '--file', type=str, help='Output file location.')
parser.add_argument('-o', '--overwrite', action='store_true',
help='Overwrite output file if it already exists.')
parser.add_argument('-r', '--text-color', type=str, help='Text color. Single value affects both label'
'and value colors. A comma separated pair '
'affects label and value text respectively.',
default=DEFAULT_TEXT_COLOR)
parser.add_argument('args', nargs=argparse.REMAINDER, help='Pairs of <upper>=<color>. '
'For example 2=red 4=orange 6=yellow 8=good. '
'Read this as "Less than 2 = red, less than 4 = orange...".')
return parser.parse_args()
def main():
"""Generate a badge based on command line arguments."""
# Parse command line arguments
args = parse_args()
label = args.label
threshold_text = args.args
suffix = args.suffix
# Check whether thresholds were sent as one word, and is in the
# list of templates. If so, swap in the template.
if len(args.args) == 1 and args.args[0] in BADGE_TEMPLATES:
template_name = args.args[0]
template_dict = BADGE_TEMPLATES[template_name]
threshold_text = template_dict['threshold'].split(' ')
if not args.label:
label = template_dict['label']
if not args.suffix and 'suffix' in template_dict:
suffix = template_dict['suffix']
if not label:
raise ValueError('Label has not been set. Please use --label argument.')
# Create threshold list from args
threshold_list = [x.split('=') for x in threshold_text]
threshold_dict = {x[0]: x[1] for x in threshold_list}
# Create badge object
badge = Badge(label, args.value, value_prefix=args.prefix, value_suffix=suffix,
default_color=args.color, num_padding_chars=args.padding,
num_value_padding_chars=args.value_padding, font_name=args.font,
font_size=args.font_size, template=args.template,
use_max_when_value_exceeds=args.use_max, thresholds=threshold_dict,
value_format=args.value_format, text_color=args.text_color)
if args.file:
# Write badge SVG to file
badge.write_badge(args.file, overwrite=args.overwrite)
else:
print(badge.badge_svg_text)
if __name__ == '__main__':
main()
|
the-stack_106_22499 | import torch
import torch.nn as nn
import torch.nn.functional as F
from utils.common_utils import try_contiguous
def _extract_patches(x, kernel_size, stride, padding):
"""
:param x: The input feature maps. (batch_size, in_c, h, w)
:param kernel_size: the kernel size of the conv filter (tuple of two elements)
:param stride: the stride of conv operation (tuple of two elements)
:param padding: number of paddings. be a tuple of two elements
:return: (batch_size, out_h, out_w, in_c*kh*kw)
"""
if padding[0] + padding[1] > 0:
x = F.pad(x, (padding[1], padding[1], padding[0],
padding[0])).data # Actually check dims
x = x.unfold(2, kernel_size[0], stride[0])
x = x.unfold(3, kernel_size[1], stride[1])
x = x.transpose_(1, 2).transpose_(2, 3).contiguous()
x = x.view(
x.size(0), x.size(1), x.size(2),
x.size(3) * x.size(4) * x.size(5))
return x
def _extract_channel_patches(x, kernel_size, stride, padding):
"""
:param x: The input feature maps. (batch_size, in_c, h, w)
:param kernel_size: the kernel size of the conv filter (tuple of two elements)
:param stride: the stride of conv operation (tuple of two elements)
:param padding: number of paddings. be a tuple of two elements
:return: (batch_size, out_h, out_w, in_c*kh*kw)
"""
if padding[0] + padding[1] > 0:
x = F.pad(x, (padding[1], padding[1], padding[0],
padding[0])).data # Actually check dims
x = x.unfold(2, kernel_size[0], stride[0])
x = x.unfold(3, kernel_size[1], stride[1]) # b * oh * ow * kh * kw * inc
x = x.transpose_(1, 2).transpose_(2, 3).transpose_(3, 4).transpose(4, 5).contiguous()
x = x.view(x.size(0), x.size(1), x.size(2), x.size(3), x.size(4), x.size(5))
return x
def update_running_stat(aa, m_aa, stat_decay):
# using inplace operation to save memory!
m_aa *= stat_decay / (1 - stat_decay)
m_aa += aa
m_aa *= (1 - stat_decay)
def fetch_mat_weights(layer, use_patch=False):
# -> output_dium * input_dim (kh*kw*in_c + [1 if with bias])
if isinstance(layer, nn.Conv2d):
if use_patch:
weight = layer.weight.transpose(1, 2).transpose(2, 3) # n_out * kh * kw * inc
n_out, k_h, k_w, in_c = weight.size()
weight = try_contiguous(weight)
weight = weight.view(-1, weight.size(-1))
bias = 0
if layer.bias is not None:
copied_bias = torch.cat([layer.bias.unsqueeze(1) for _ in range(k_h*k_w)], 1).view(-1, 1)
weight = torch.cat([weight, copied_bias], 1) # layer.bias.unsqueeze(1)], 1)
bias = 1
weight = weight.view(n_out, k_h*k_w, in_c+bias)
else:
weight = layer.weight # n_filters * in_c * kh * kw
# weight = weight.transpose(1, 2).transpose(2, 3).contiguous()
weight = weight.view(weight.size(0), -1)
if layer.bias is not None:
weight = torch.cat([weight, layer.bias.unsqueeze(1)], 1)
elif isinstance(layer, nn.Linear):
weight = layer.weight
if layer.bias is not None:
weight = torch.cat([weight, layer.bias.unsqueeze(1)], 1)
else:
raise NotImplementedError
return weight
def mat_to_weight_and_bias(mat, layer):
if isinstance(layer, nn.Conv2d):
# mat: n_filters * (in_c * kh * kw)
k_h, k_w = layer.kernel_size
in_c = layer.in_channels
out_c = layer.out_channels
bias = None
if layer.bias is not None:
bias = mat[:, -1]
mat = mat[:, :-1]
weight = mat.view(out_c, in_c, k_h, k_w)
elif isinstance(layer, nn.Linear):
in_c = layer.in_features
out_c = layer.out_features
bias = None
if layer.bias is not None:
bias = mat[:, -1]
mat = mat[:, :-1]
weight = mat
else:
raise NotImplementedError
return weight, bias
class ComputeMatGrad:
@classmethod
def __call__(cls, input, grad_output, layer):
if isinstance(layer, nn.Linear):
grad = cls.linear(input, grad_output, layer)
elif isinstance(layer, nn.Conv2d):
grad = cls.conv2d(input, grad_output, layer)
else:
raise NotImplementedError
return grad
@staticmethod
def linear(input, grad_output, layer):
"""
:param input: batch_size * input_dim
:param grad_output: batch_size * output_dim
:param layer: [nn.module] output_dim * input_dim
:return: batch_size * output_dim * (input_dim + [1 if with bias])
"""
with torch.no_grad():
if layer.bias is not None:
input = torch.cat([input, input.new(input.size(0), 1).fill_(1)], 1)
input = input.unsqueeze(1)
grad_output = grad_output.unsqueeze(2)
grad = torch.bmm(grad_output, input)
return grad
@staticmethod
def conv2d(input, grad_output, layer):
"""
:param input: batch_size * in_c * in_h * in_w
:param grad_output: batch_size * out_c * h * w
:param layer: nn.module batch_size * out_c * (in_c*k_h*k_w + [1 if with bias])
:return:
"""
with torch.no_grad():
input = _extract_patches(input, layer.kernel_size, layer.stride, layer.padding)
input = input.view(-1, input.size(-1)) # b * hw * in_c*kh*kw
grad_output = grad_output.transpose(1, 2).transpose(2, 3)
grad_output = try_contiguous(grad_output).view(grad_output.size(0), -1, grad_output.size(-1))
# b * hw * out_c
if layer.bias is not None:
input = torch.cat([input, input.new(input.size(0), 1).fill_(1)], 1)
input = input.view(grad_output.size(0), -1, input.size(-1)) # b * hw * in_c*kh*kw
grad = torch.einsum('abm,abn->amn', (grad_output, input))
return grad
class ComputeCovA:
@classmethod
def compute_cov_a(cls, a, layer):
return cls.__call__(a, layer)
@classmethod
def __call__(cls, a, layer):
if isinstance(layer, nn.Linear):
cov_a = cls.linear(a, layer)
elif isinstance(layer, nn.Conv2d):
cov_a = cls.conv2d(a, layer)
else:
# raise NotImplementedError
cov_a = None
return cov_a
@staticmethod
def conv2d(a, layer):
batch_size = a.size(0)
a = _extract_patches(a, layer.kernel_size, layer.stride, layer.padding)
spatial_size = a.size(1) * a.size(2)
a = a.view(-1, a.size(-1))
if layer.bias is not None:
a = torch.cat([a, a.new(a.size(0), 1).fill_(1)], 1)
a = a/spatial_size
return a.t() @ (a / batch_size)
@staticmethod
def linear(a, layer):
# a: batch_size * in_dim
batch_size = a.size(0)
if layer.bias is not None:
a = torch.cat([a, a.new(a.size(0), 1).fill_(1)], 1)
return a.t() @ (a / batch_size)
class ComputeCovG:
@classmethod
def compute_cov_g(cls, g, layer, batch_averaged=False):
"""
:param g: gradient
:param layer: the corresponding layer
:param batch_averaged: if the gradient is already averaged with the batch size?
:return:
"""
# batch_size = g.size(0)
return cls.__call__(g, layer, batch_averaged)
@classmethod
def __call__(cls, g, layer, batch_averaged):
if isinstance(layer, nn.Conv2d):
cov_g = cls.conv2d(g, layer, batch_averaged)
elif isinstance(layer, nn.Linear):
cov_g = cls.linear(g, layer, batch_averaged)
else:
cov_g = None
return cov_g
@staticmethod
def conv2d(g, layer, batch_averaged):
# g: batch_size * n_filters * out_h * out_w
# n_filters is actually the output dimension (analogous to Linear layer)
spatial_size = g.size(2) * g.size(3)
batch_size = g.shape[0]
g = g.transpose(1, 2).transpose(2, 3)
g = try_contiguous(g)
g = g.view(-1, g.size(-1))
if batch_averaged:
g = g * batch_size
g = g * spatial_size
cov_g = g.t() @ (g / g.size(0))
return cov_g
@staticmethod
def linear(g, layer, batch_averaged):
# g: batch_size * out_dim
batch_size = g.size(0)
if batch_averaged:
cov_g = g.t() @ (g * batch_size)
else:
cov_g = g.t() @ (g / batch_size)
return cov_g
class ComputeCovAPatch(ComputeCovA):
@staticmethod
def conv2d(a, layer):
batch_size = a.size(0)
a = _extract_channel_patches(a, layer.kernel_size, layer.stride, layer.padding)
spatial_size = a.size(1) * a.size(2)
a = a.view(-1, a.size(-1))
patch_size = layer.kernel_size[0] * layer.kernel_size[1]
if layer.bias is not None:
a = torch.cat([a, a.new(a.size(0), 1).fill_(1./patch_size)], 1)
a = a / spatial_size
return a.t() @ (a / batch_size / patch_size)
if __name__ == '__main__':
def test_ComputeCovA():
pass
def test_ComputeCovG():
pass
|
the-stack_106_22500 | # Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""V2 Kubeflow DAG Runner."""
import datetime
import json
import os
from typing import Any, Dict, List, Optional
from kfp.pipeline_spec import pipeline_spec_pb2
from tfx import version
from tfx.dsl.io import fileio
from tfx.orchestration import pipeline as tfx_pipeline
from tfx.orchestration import tfx_runner
from tfx.orchestration.config import pipeline_config
from tfx.orchestration.kubeflow.v2 import pipeline_builder
from tfx.utils import telemetry_utils
from tfx.utils import version_utils
from google.protobuf import json_format
_KUBEFLOW_TFX_CMD = (
'python', '-m',
'tfx.orchestration.kubeflow.v2.container.kubeflow_v2_run_executor')
# Current schema version for the API proto.
_SCHEMA_VERSION = '2.0.0'
# Default TFX container image/commands to use in KubeflowV2DagRunner.
_KUBEFLOW_TFX_IMAGE = 'gcr.io/tfx-oss-public/tfx:{}'.format(
version_utils.get_image_version())
def _get_current_time():
"""Gets the current timestamp."""
return datetime.datetime.now()
class KubeflowV2DagRunnerConfig(pipeline_config.PipelineConfig):
"""Runtime configuration specific to execution on Kubeflow V2 pipelines."""
def __init__(self,
display_name: Optional[str] = None,
default_image: Optional[str] = None,
default_commands: Optional[List[str]] = None,
**kwargs):
"""Constructs a Kubeflow V2 runner config.
Args:
display_name: Optional human-readable pipeline name. Defaults to the
pipeline name passed into `KubeflowV2DagRunner.run()`.
default_image: The default TFX image to be used if not overriden by per
component specification.
default_commands: Optionally specifies the commands of the provided
container image. When not provided, the default `ENTRYPOINT` specified
in the docker image is used. Note: the commands here refers to the K8S
container command, which maps to Docker entrypoint field. If one
supplies command but no args are provided for the container, the
container will be invoked with the provided command, ignoring the
`ENTRYPOINT` and `CMD` defined in the Dockerfile. One can find more
details regarding the difference between K8S and Docker conventions at
https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#notes
**kwargs: Additional args passed to base PipelineConfig.
"""
super().__init__(**kwargs)
self.display_name = display_name
self.default_image = default_image or _KUBEFLOW_TFX_IMAGE
if default_commands is None:
self.default_commands = _KUBEFLOW_TFX_CMD
else:
self.default_commands = default_commands
class KubeflowV2DagRunner(tfx_runner.TfxRunner):
"""Kubeflow V2 pipeline runner (currently for managed pipelines).
Builds a pipeline job spec in json format based on TFX pipeline DSL object.
"""
def __init__(self,
config: KubeflowV2DagRunnerConfig,
output_dir: Optional[str] = None,
output_filename: Optional[str] = None):
"""Constructs an KubeflowV2DagRunner for compiling pipelines.
Args:
config: An KubeflowV2DagRunnerConfig object to specify runtime
configuration when running the pipeline in Kubeflow.
output_dir: An optional output directory into which to output the pipeline
definition files. Defaults to the current working directory.
output_filename: An optional output file name for the pipeline definition
file. The file output format will be a JSON-serialized PipelineJob pb
message. Defaults to 'pipeline.json'.
"""
if not isinstance(config, KubeflowV2DagRunnerConfig):
raise TypeError('config must be type of KubeflowV2DagRunnerConfig.')
super().__init__()
self._config = config
self._output_dir = output_dir or os.getcwd()
self._output_filename = output_filename or 'pipeline.json'
def run(self,
pipeline: tfx_pipeline.Pipeline,
parameter_values: Optional[Dict[str, Any]] = None,
write_out: Optional[bool] = True) -> Dict[str, Any]:
"""Compiles a pipeline DSL object into pipeline file.
Args:
pipeline: TFX pipeline object.
parameter_values: mapping from runtime parameter names to its values.
write_out: set to True to actually write out the file to the place
designated by output_dir and output_filename. Otherwise return the
JSON-serialized pipeline job spec.
Returns:
Returns the JSON pipeline job spec.
Raises:
RuntimeError: if trying to write out to a place occupied by an existing
file.
"""
# TODO(b/166343606): Support user-provided labels.
# TODO(b/169095387): Deprecate .run() method in favor of the unified API
# client.
display_name = (
self._config.display_name or pipeline.pipeline_info.pipeline_name)
pipeline_spec = pipeline_builder.PipelineBuilder(
tfx_pipeline=pipeline,
default_image=self._config.default_image,
default_commands=self._config.default_commands).build()
pipeline_spec.sdk_version = 'tfx-{}'.format(version.__version__)
pipeline_spec.schema_version = _SCHEMA_VERSION
runtime_config = pipeline_builder.RuntimeConfigBuilder(
pipeline_info=pipeline.pipeline_info,
parameter_values=parameter_values).build()
with telemetry_utils.scoped_labels(
{telemetry_utils.LABEL_TFX_RUNNER: 'kubeflow_v2'}):
result = pipeline_spec_pb2.PipelineJob(
display_name=display_name or pipeline.pipeline_info.pipeline_name,
labels=telemetry_utils.make_labels_dict(),
runtime_config=runtime_config)
result.pipeline_spec.update(json_format.MessageToDict(pipeline_spec))
pipeline_json_dict = json_format.MessageToDict(result)
if write_out:
if fileio.exists(self._output_dir) and not fileio.isdir(self._output_dir):
raise RuntimeError('Output path: %s is pointed to a file.' %
self._output_dir)
if not fileio.exists(self._output_dir):
fileio.makedirs(self._output_dir)
with fileio.open(
os.path.join(self._output_dir, self._output_filename), 'wb') as f:
f.write(json.dumps(pipeline_json_dict, sort_keys=True))
return pipeline_json_dict
|
the-stack_106_22501 | # coding: utf-8
"""
Unofficial python library for the SmartRecruiters API
The SmartRecruiters API provides a platform to integrate services or applications, build apps and create fully customizable career sites. It exposes SmartRecruiters functionality and allows to connect and build software enhancing it.
OpenAPI spec version: 1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class JobStatusHistory(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, total_found=None, content=None):
"""
JobStatusHistory - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'total_found': 'int',
'content': 'list[JobStatusHistoryContent]'
}
self.attribute_map = {
'total_found': 'totalFound',
'content': 'content'
}
self._total_found = total_found
self._content = content
@property
def total_found(self):
"""
Gets the total_found of this JobStatusHistory.
:return: The total_found of this JobStatusHistory.
:rtype: int
"""
return self._total_found
@total_found.setter
def total_found(self, total_found):
"""
Sets the total_found of this JobStatusHistory.
:param total_found: The total_found of this JobStatusHistory.
:type: int
"""
if total_found is None:
raise ValueError("Invalid value for `total_found`, must not be `None`")
self._total_found = total_found
@property
def content(self):
"""
Gets the content of this JobStatusHistory.
:return: The content of this JobStatusHistory.
:rtype: list[JobStatusHistoryContent]
"""
return self._content
@content.setter
def content(self, content):
"""
Sets the content of this JobStatusHistory.
:param content: The content of this JobStatusHistory.
:type: list[JobStatusHistoryContent]
"""
if content is None:
raise ValueError("Invalid value for `content`, must not be `None`")
self._content = content
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, JobStatusHistory):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
the-stack_106_22503 | """
File:
JetSegGraph.py
Contents and purpose:
Draws the event graph and progress bar
Copyright (c) 2008 Android Open Source Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import wx
import logging
from JetUtils import *
from JetDefs import *
GRAPH_COLORS = [
'#C0E272',
'#85CF89',
'#CF9683',
'#749EDE',
'#9FB5B1',
'#B095BF',
'#FE546D',
'#B3BB97',
'#FFFFB8',
]
PROGRESS_BAR = '#0000CC'
EOS_BAR = '#095000'
APP_BAR = '#B3BB97'
class Marker():
""" Defines portions of the graph for events """
def __init__(self, sEventType, iEventId, sName, sStartMbt, sEndMbt, iStartMeasure, ppqn):
self.sEventType = sEventType
self.iEventId = iEventId
self.sName = sName
self.StartMbt = ConvertStrTimeToTuple(sStartMbt)
self.EndMbt = ConvertStrTimeToTuple(sEndMbt)
self.iStartMeasure = iStartMeasure
self.iStart = 0
self.iEnd = 0
self.iWidth = 0
self.iHeight = 0
self.iTop = 0
self.iUpdate = False
self.sColor = '#FFFFB8'
self.ppqn = ppqn
self.isDirty = False
def CalcCoord(self, step, height, ColorFct):
""" Calculates the coordinates in pixels for graphing the shaded regions """
#measures
iStartM = self.StartMbt[0] - self.iStartMeasure
iEndM = self.EndMbt[0] - self.iStartMeasure
self.iStart = step * iStartM
self.iEnd = step * iEndM
#beats
self.iStart = self.iStart + ((step / 4.0) * (self.StartMbt[1]-1))
self.iEnd = self.iEnd + ((step / 4.0) * (self.EndMbt[1]-1))
#ticks
pctTickOfBeat = (float(self.StartMbt[2]) / float(self.ppqn))
self.iStart = self.iStart + ((pctTickOfBeat * (step / 4.0)))
pctTickOfBeat = (float(self.EndMbt[2]) / float(self.ppqn))
self.iEnd = self.iEnd + ((pctTickOfBeat * (step / 4.0)))
self.iWidth = self.iEnd - self.iStart
self.iHeight = height
self.sColor = ColorFct()
self.iUpdate = False
class SegmentGraph(wx.Panel):
""" Draws the player graph bar """
def __init__(self, parent, pos=wx.DefaultPosition, size=wx.DefaultSize, ClickCallbackFct=None, showLabels=True, showClips=True, showAppEvts=True):
wx.Panel.__init__(self, parent, -1, pos=pos, size=size, style=wx.BORDER_STATIC)
self.iLocationInMs = 0
self.iLengthInMs = 0
self.iLengthInMeasures = 0
self.iMarkerTop = 15
self.iScaleTop = 0
self.iEdges = 5
self.iStartMeasure = 0
self.iMidiMode = False
self.ClickCallbackFct = ClickCallbackFct
self.iColor = 0
self.showLabels = showLabels
self.showClips = showClips
self.showAppEvts = showAppEvts
self.font = wx.Font(8, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, 'Courier')
self.Markers = []
self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM)
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
#initialize buffer
self.OnSize(None)
def ClearGraph(self):
""" Clears the graph values """
self.iLocationInMs = 0
self.iLengthInMs = 0
self.iLengthInMeasures = 0
self.iMarkerTop = 15
self.iScaleTop = 0
self.iEdges = 5
self.iStartMeasure = 0
self.iMidiMode = False
self.iColor = 0
self.Markers = []
self.iLocationInMs = 0
self.DoDrawing()
def LoadSegment(self, segment, segMarker=None, iMidiMode=False, showLabels=True, showClips=True, showAppEvts=True):
""" Loads up the segment drawing the graph """
if segment is None:
self.ClearGraph()
return None
self.iMidiMode = iMidiMode
self.showLabels = showLabels
self.showClips = showClips
self.showAppEvts = showAppEvts
self.Markers = []
self.iLocationInMs = 0
info = MidiSegInfo(segment)
#disable graph for debugging
#return info
self.iLengthInMs = info.iLengthInMs
self.ppqn = info.ppqn
self.StartMbt = mbtFct(ConvertStrTimeToTuple(segment.start), 1)
self.EndMbt = mbtFct(ConvertStrTimeToTuple(segment.end), 1)
self.LengthMbt = None
self.iStartMeasure = self.StartMbt[0]
self.iLengthInMeasures = self.EndMbt[0] - self.StartMbt[0]
for jet_event in segment.jetevents:
if self.showClips and jet_event.event_type == JetDefs.E_CLIP:
self.AddMarker(JetDefs.E_CLIP, jet_event.event_id, jet_event.event_name, mbtFct(jet_event.event_start,1), mbtFct(jet_event.event_end,1), self.iStartMeasure, self.ppqn)
elif jet_event.event_type == JetDefs.E_EOS:
self.AddMarker(JetDefs.E_EOS, jet_event.event_id, jet_event.event_name, mbtFct(jet_event.event_end,1), mbtFct(jet_event.event_end,1), self.iStartMeasure, self.ppqn)
elif self.showAppEvts and jet_event.event_type == JetDefs.E_APP:
self.AddMarker(JetDefs.E_APP, jet_event.event_id, jet_event.event_name, mbtFct(jet_event.event_start,1), mbtFct(jet_event.event_end,1), self.iStartMeasure, self.ppqn)
if segMarker is not None:
self.AddMarker(JetDefs.E_CLIP, 0, segMarker[0], mbtFct(segMarker[1],1), mbtFct(segMarker[2],1), self.iStartMeasure, self.ppqn)
self.DoDrawing()
return info
def AddMarker(self, sEventType, iEventId, sName, sStartMbt, sEndMbt, iStartMeasure, ppqn):
""" Adds a marker to the list """
if not CompareMbt(sStartMbt, sEndMbt):
sEndMbt = sStartMbt
self.Markers.append(Marker(sEventType, iEventId, sName, sStartMbt, sEndMbt, iStartMeasure, ppqn))
def OnLeftDown(self, event):
""" Calls the function assicated with an event """
pt = event.GetPosition()
for Marker in self.Markers:
if pt[0] >= Marker.iStart and pt[0] <= Marker.iEnd and pt[1] >= Marker.iTop and pt[1] <= Marker.iTop + Marker.iHeight:
if self.ClickCallbackFct != None:
self.ClickCallbackFct(Marker.sName, Marker.iEventId)
def GetAColor(self):
""" Gets a color """
color = GRAPH_COLORS[self.iColor]
self.iColor = self.iColor + 1
if self.iColor >= len(GRAPH_COLORS):
self.iColor = 0
return color
def OnSize(self, event=None):
""" Repaints for resizing of screen """
if OsWindows():
# The Buffer init is done here, to make sure the buffer is always
# the same size as the Window
Size = self.GetClientSizeTuple()
# Make new offscreen bitmap: this bitmap will always have the
# current drawing in it, so it can be used to save the image to
# a file, or whatever.
self._Buffer = wx.EmptyBitmap(*Size)
self.DoDrawing(None)
if event is not None:
event.Skip()
def OnPaint(self, event=None):
""" Painting of windows """
if OsWindows():
dc = wx.BufferedPaintDC(self, self._Buffer)
else:
dc = wx.AutoBufferedPaintDC(self)
dc.Background = wx.Brush(wx.WHITE)
self.DoDrawing(dc)
def DoDrawing(self, dc=None):
""" Does the actual drawing of the control """
if dc is None:
if OsWindows():
dc = wx.BufferedDC(wx.ClientDC(self), self._Buffer)
else:
dc = wx.AutoBufferedPaintDC(self)
dc.Background = wx.Brush(wx.WHITE)
dc.Clear()
self.iColor = 0
gWidth, gHeight = self.GetSize()
gWidth = gWidth - (self.iEdges * 2)
step = int(gWidth / (self.iLengthInMeasures + .01))
for Marker in self.Markers:
Marker.CalcCoord(step, gHeight, self.GetAColor)
""" eliminate overlaps; establish colors """
iClips = 0
iMarkers = 0
for index, Marker in enumerate(self.Markers):
if Marker.sEventType == JetDefs.E_CLIP:
iClips = iClips + 1
iOverlaps = 1
for index1, Marker1 in enumerate(self.Markers):
if Marker.sEventType == JetDefs.E_CLIP:
if index != index1 and not Marker1.iUpdate:
if Marker.iStart <= Marker1.iStart and Marker.iEnd <= Marker1.iEnd and Marker.iEnd >= Marker1.iStart:
iOverlaps = iOverlaps + 1
Marker.iUpdate = True
Marker1.iUpdate = True
if not Marker.iUpdate and Marker.iStart >= Marker1.iStart and Marker.iEnd >= Marker1.iEnd and Marker.iStart <= Marker1.iEnd:
iOverlaps = iOverlaps + 1
Marker.iUpdate = True
Marker1.iUpdate = True
if iOverlaps > 1:
iTop = 0
for index1, Marker1 in enumerate(self.Markers):
if Marker.sEventType == JetDefs.E_CLIP:
if Marker1.iUpdate:
Marker1.iHeight = gHeight / iOverlaps
Marker1.iTop = iTop * Marker1.iHeight
iTop = iTop + 1
elif Marker.sEventType == JetDefs.E_APP:
iMarkers = iMarkers + 1
for Marker in self.Markers:
if Marker.sEventType == JetDefs.E_CLIP:
dc.SetPen(wx.Pen(Marker.sColor))
dc.SetBrush(wx.Brush(Marker.sColor))
dc.DrawRectangle(Marker.iStart + self.iEdges, Marker.iTop, Marker.iWidth, Marker.iHeight)
width, height = dc.GetTextExtent(Marker.sName)
k = ((Marker.iStart + Marker.iEnd) / 2) - (width/2) + self.iEdges
if self.showLabels or self.iMidiMode:
dc.DrawText(Marker.sName, k, ((Marker.iTop+Marker.iHeight/2) - (height*.5)))
if self.iMidiMode:
self.iMidiModeStart = Marker.iStart
elif Marker.sEventType == JetDefs.E_EOS:
dc.SetPen(wx.Pen(EOS_BAR))
dc.SetBrush(wx.Brush(EOS_BAR))
dc.DrawRectangle(Marker.iStart + self.iEdges, Marker.iTop, 1, Marker.iHeight)
width, height = dc.GetTextExtent(Marker.sName)
k = Marker.iStart - (width/2) + self.iEdges
dc.DrawText(Marker.sName, k, ((Marker.iTop+Marker.iHeight/2) - (height*.5)))
elif Marker.sEventType == JetDefs.E_APP:
dc.SetPen(wx.Pen(APP_BAR))
dc.SetBrush(wx.Brush(APP_BAR))
dc.DrawRectangle(Marker.iStart + self.iEdges, Marker.iTop, 1, Marker.iHeight)
width, height = dc.GetTextExtent(Marker.sName)
k = Marker.iStart - (width/2) + self.iEdges
if self.showLabels or self.iMidiMode:
dc.DrawText(Marker.sName, k, ((Marker.iTop+Marker.iHeight/2) - (height*.5)))
""" Draw scale """
if gWidth == 0:
iDiv = 50
else:
iDiv = (gWidth)/18
if iDiv == 0:
iDiv = 50
scale = ((self.iLengthInMeasures / iDiv) + 1)
if scale == 0:
scale = 1
beatStep = step / 4.0
dc.SetFont(self.font)
j = 0
lastEnd = 0
num = range(self.iStartMeasure, self.iStartMeasure + self.iLengthInMeasures + 1, 1)
dc.SetPen(wx.Pen('#5C5142'))
for i in range(0, (self.iLengthInMeasures+1)*step, step):
k = i + self.iEdges
dc.DrawLine(k, self.iScaleTop, k, self.iScaleTop+8)
if i != (self.iLengthInMeasures)*step:
for iBeat in range(1,4):
k = i+(iBeat * beatStep) + self.iEdges
dc.DrawLine(k, self.iScaleTop, k, self.iScaleTop+4)
width, height = dc.GetTextExtent(str(num[j]))
k = i-(width/2) + self.iEdges
if k > lastEnd:
if j == 0 or (j % scale) == 0:
dc.DrawText(str(num[j]), k, self.iScaleTop+8)
lastEnd = k + width
j = j + 1
""" Updates the location bar in case screen moved or resized """
if self.iLocationInMs > 0 and self.iLengthInMs > 0:
iOffset = 0
if self.iMidiMode:
iOffset = self.iMidiModeStart
till = gWidth * (self.iLocationInMs / self.iLengthInMs)
dc.SetPen(wx.Pen(PROGRESS_BAR))
dc.SetBrush(wx.Brush(PROGRESS_BAR))
dc.DrawRectangle(self.iEdges + iOffset, gHeight-6, till, 3)
def UpdateLocation(self, iLocationInMs):
""" Updates the location bar """
#disable graph for debugging
#return info
self.iLocationInMs = iLocationInMs
if self.iLocationInMs > 0 and self.iLengthInMs > 0:
if OsWindows():
dc = wx.BufferedDC(wx.ClientDC(self), self._Buffer)
else:
dc = wx.AutoBufferedPaintDC(self)
dc.Background = wx.Brush(wx.WHITE)
iOffset = 0
if self.iMidiMode:
iOffset = self.iMidiModeStart
gWidth, gHeight = self.GetSize()
gWidth = gWidth - (self.iEdges * 2)
till = gWidth * (self.iLocationInMs / self.iLengthInMs)
dc.SetPen(wx.Pen(PROGRESS_BAR))
dc.SetBrush(wx.Brush(PROGRESS_BAR))
dc.DrawRectangle(self.iEdges + iOffset, gHeight-6, till, 3)
self.isDirty = True
else:
if self.isDirty:
self.DoDrawing()
self.isDirty = False
|
the-stack_106_22504 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016 Michael Gruener <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cloudflare_dns
author: "Michael Gruener (@mgruener)"
requirements:
- "python >= 2.6"
version_added: "2.1"
short_description: manage Cloudflare DNS records
description:
- "Manages dns records via the Cloudflare API, see the docs: U(https://api.cloudflare.com/)"
options:
account_api_token:
description:
- >
Account API token. You can obtain your API key from the bottom of the Cloudflare 'My Account' page, found here: U(https://www.cloudflare.com/a/account)
required: true
account_email:
description:
- "Account email."
required: true
port:
description: Service port. Required for C(type=SRV)
required: false
default: null
priority:
description: Record priority. Required for C(type=MX) and C(type=SRV)
required: false
default: "1"
proto:
description: Service protocol. Required for C(type=SRV)
required: false
choices: [ 'tcp', 'udp' ]
default: null
proxied:
description: Proxy through cloudflare network or just use DNS
required: false
default: no
version_added: "2.3"
record:
description:
- Record to add. Required if C(state=present). Default is C(@) (e.g. the zone name)
required: false
default: "@"
aliases: [ "name" ]
service:
description: Record service. Required for C(type=SRV)
required: false
default: null
solo:
description:
- Whether the record should be the only one for that record type and record name. Only use with C(state=present)
- This will delete all other records with the same record name and type.
required: false
default: null
state:
description:
- Whether the record(s) should exist or not
required: false
choices: [ 'present', 'absent' ]
default: present
timeout:
description:
- Timeout for Cloudflare API calls
required: false
default: 30
ttl:
description:
- The TTL to give the new record. Must be between 120 and 2,147,483,647 seconds, or 1 for automatic.
required: false
default: 1 (automatic)
type:
description:
- The type of DNS record to create. Required if C(state=present)
required: false
choices: [ 'A', 'AAAA', 'CNAME', 'TXT', 'SRV', 'MX', 'NS', 'SPF' ]
default: null
value:
description:
- The record value. Required for C(state=present)
required: false
default: null
aliases: [ "content" ]
weight:
description: Service weight. Required for C(type=SRV)
required: false
default: "1"
zone:
description:
- The name of the Zone to work with (e.g. "example.com"). The Zone must already exist.
required: true
aliases: ["domain"]
'''
EXAMPLES = '''
# create a test.my.com A record to point to 127.0.0.1
- cloudflare_dns:
zone: my.com
record: test
type: A
value: 127.0.0.1
account_email: [email protected]
account_api_token: dummyapitoken
register: record
# create a my.com CNAME record to example.com
- cloudflare_dns:
zone: my.com
type: CNAME
value: example.com
state: present
account_email: [email protected]
account_api_token: dummyapitoken
# change it's ttl
- cloudflare_dns:
zone: my.com
type: CNAME
value: example.com
ttl: 600
state: present
account_email: [email protected]
account_api_token: dummyapitoken
# and delete the record
- cloudflare_dns:
zone: my.com
type: CNAME
value: example.com
state: absent
account_email: [email protected]
account_api_token: dummyapitoken
# create a my.com CNAME record to example.com and proxy through cloudflare's network
- cloudflare_dns:
zone: my.com
type: CNAME
value: example.com
state: present
proxied: yes
account_email: [email protected]
account_api_token: dummyapitoken
# create TXT record "test.my.com" with value "unique value"
# delete all other TXT records named "test.my.com"
- cloudflare_dns:
domain: my.com
record: test
type: TXT
value: unique value
state: present
solo: true
account_email: [email protected]
account_api_token: dummyapitoken
# create a SRV record _foo._tcp.my.com
- cloudflare_dns:
domain: my.com
service: foo
proto: tcp
port: 3500
priority: 10
weight: 20
type: SRV
value: fooserver.my.com
'''
RETURN = '''
record:
description: dictionary containing the record data
returned: success, except on record deletion
type: complex
contains:
content:
description: the record content (details depend on record type)
returned: success
type: string
sample: 192.0.2.91
created_on:
description: the record creation date
returned: success
type: string
sample: 2016-03-25T19:09:42.516553Z
data:
description: additional record data
returned: success, if type is SRV
type: dictionary
sample: {
name: "jabber",
port: 8080,
priority: 10,
proto: "_tcp",
service: "_xmpp",
target: "jabberhost.sample.com",
weight: 5,
}
id:
description: the record id
returned: success
type: string
sample: f9efb0549e96abcb750de63b38c9576e
locked:
description: No documentation available
returned: success
type: boolean
sample: False
meta:
description: No documentation available
returned: success
type: dictionary
sample: { auto_added: false }
modified_on:
description: record modification date
returned: success
type: string
sample: 2016-03-25T19:09:42.516553Z
name:
description: the record name as FQDN (including _service and _proto for SRV)
returned: success
type: string
sample: www.sample.com
priority:
description: priority of the MX record
returned: success, if type is MX
type: int
sample: 10
proxiable:
description: whether this record can be proxied through cloudflare
returned: success
type: boolean
sample: False
proxied:
description: whether the record is proxied through cloudflare
returned: success
type: boolean
sample: False
ttl:
description: the time-to-live for the record
returned: success
type: int
sample: 300
type:
description: the record type
returned: success
type: string
sample: A
zone_id:
description: the id of the zone containing the record
returned: success
type: string
sample: abcede0bf9f0066f94029d2e6b73856a
zone_name:
description: the name of the zone containing the record
returned: success
type: string
sample: sample.com
'''
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib.parse import urlencode
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.urls import fetch_url
class CloudflareAPI(object):
cf_api_endpoint = 'https://api.cloudflare.com/client/v4'
changed = False
def __init__(self, module):
self.module = module
self.account_api_token = module.params['account_api_token']
self.account_email = module.params['account_email']
self.port = module.params['port']
self.priority = module.params['priority']
self.proto = module.params['proto']
self.proxied = module.params['proxied']
self.record = module.params['record']
self.service = module.params['service']
self.is_solo = module.params['solo']
self.state = module.params['state']
self.timeout = module.params['timeout']
self.ttl = module.params['ttl']
self.type = module.params['type']
self.value = module.params['value']
self.weight = module.params['weight']
self.zone = module.params['zone']
if self.record == '@':
self.record = self.zone
if (self.type in ['CNAME', 'NS', 'MX', 'SRV']) and (self.value is not None):
self.value = self.value.rstrip('.')
if (self.type == 'SRV'):
if (self.proto is not None) and (not self.proto.startswith('_')):
self.proto = '_' + self.proto
if (self.service is not None) and (not self.service.startswith('_')):
self.service = '_' + self.service
if not self.record.endswith(self.zone):
self.record = self.record + '.' + self.zone
def _cf_simple_api_call(self, api_call, method='GET', payload=None):
headers = {'X-Auth-Email': self.account_email,
'X-Auth-Key': self.account_api_token,
'Content-Type': 'application/json'}
data = None
if payload:
try:
data = json.dumps(payload)
except Exception as e:
self.module.fail_json(msg="Failed to encode payload as JSON: %s " % to_native(e))
resp, info = fetch_url(self.module,
self.cf_api_endpoint + api_call,
headers=headers,
data=data,
method=method,
timeout=self.timeout)
if info['status'] not in [200, 304, 400, 401, 403, 429, 405, 415]:
self.module.fail_json(msg="Failed API call {0}; got unexpected HTTP code {1}".format(api_call, info['status']))
error_msg = ''
if info['status'] == 401:
# Unauthorized
error_msg = "API user does not have permission; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
elif info['status'] == 403:
# Forbidden
error_msg = "API request not authenticated; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
elif info['status'] == 429:
# Too many requests
error_msg = "API client is rate limited; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
elif info['status'] == 405:
# Method not allowed
error_msg = "API incorrect HTTP method provided; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
elif info['status'] == 415:
# Unsupported Media Type
error_msg = "API request is not valid JSON; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
elif info['status'] == 400:
# Bad Request
error_msg = "API bad request; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
result = None
try:
content = resp.read()
except AttributeError:
if info['body']:
content = info['body']
else:
error_msg += "; The API response was empty"
if content:
try:
result = json.loads(to_text(content, errors='surrogate_or_strict'))
except (json.JSONDecodeError, UnicodeError) as e:
error_msg += "; Failed to parse API response with error {0}: {1}".format(to_native(e), content)
# received an error status but no data with details on what failed
if (info['status'] not in [200, 304]) and (result is None):
self.module.fail_json(msg=error_msg)
if not result['success']:
error_msg += "; Error details: "
for error in result['errors']:
error_msg += "code: {0}, error: {1}; ".format(error['code'], error['message'])
if 'error_chain' in error:
for chain_error in error['error_chain']:
error_msg += "code: {0}, error: {1}; ".format(chain_error['code'], chain_error['message'])
self.module.fail_json(msg=error_msg)
return result, info['status']
def _cf_api_call(self, api_call, method='GET', payload=None):
result, status = self._cf_simple_api_call(api_call, method, payload)
data = result['result']
if 'result_info' in result:
pagination = result['result_info']
if pagination['total_pages'] > 1:
next_page = int(pagination['page']) + 1
parameters = ['page={0}'.format(next_page)]
# strip "page" parameter from call parameters (if there are any)
if '?' in api_call:
raw_api_call, query = api_call.split('?', 1)
parameters += [param for param in query.split('&') if not param.startswith('page')]
else:
raw_api_call = api_call
while next_page <= pagination['total_pages']:
raw_api_call += '?' + '&'.join(parameters)
result, status = self._cf_simple_api_call(raw_api_call, method, payload)
data += result['result']
next_page += 1
return data, status
def _get_zone_id(self, zone=None):
if not zone:
zone = self.zone
zones = self.get_zones(zone)
if len(zones) > 1:
self.module.fail_json(msg="More than one zone matches {0}".format(zone))
if len(zones) < 1:
self.module.fail_json(msg="No zone found with name {0}".format(zone))
return zones[0]['id']
def get_zones(self, name=None):
if not name:
name = self.zone
param = ''
if name:
param = '?' + urlencode({'name': name})
zones, status = self._cf_api_call('/zones' + param)
return zones
def get_dns_records(self, zone_name=None, type=None, record=None, value=''):
if not zone_name:
zone_name = self.zone
if not type:
type = self.type
if not record:
record = self.record
# necessary because None as value means to override user
# set module value
if (not value) and (value is not None):
value = self.value
zone_id = self._get_zone_id()
api_call = '/zones/{0}/dns_records'.format(zone_id)
query = {}
if type:
query['type'] = type
if record:
query['name'] = record
if value:
query['content'] = value
if query:
api_call += '?' + urlencode(query)
records, status = self._cf_api_call(api_call)
return records
def delete_dns_records(self, **kwargs):
params = {}
for param in ['port', 'proto', 'service', 'solo', 'type', 'record', 'value', 'weight', 'zone']:
if param in kwargs:
params[param] = kwargs[param]
else:
params[param] = getattr(self, param)
records = []
content = params['value']
search_record = params['record']
if params['type'] == 'SRV':
if not (params['value'] is None or params['value'] == ''):
content = str(params['weight']) + '\t' + str(params['port']) + '\t' + params['value']
search_record = params['service'] + '.' + params['proto'] + '.' + params['record']
if params['solo']:
search_value = None
else:
search_value = content
records = self.get_dns_records(params['zone'], params['type'], search_record, search_value)
for rr in records:
if params['solo']:
if not ((rr['type'] == params['type']) and (rr['name'] == search_record) and (rr['content'] == content)):
self.changed = True
if not self.module.check_mode:
result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(rr['zone_id'], rr['id']), 'DELETE')
else:
self.changed = True
if not self.module.check_mode:
result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(rr['zone_id'], rr['id']), 'DELETE')
return self.changed
def ensure_dns_record(self, **kwargs):
params = {}
for param in ['port', 'priority', 'proto', 'proxied', 'service', 'ttl', 'type', 'record', 'value', 'weight', 'zone']:
if param in kwargs:
params[param] = kwargs[param]
else:
params[param] = getattr(self, param)
search_value = params['value']
search_record = params['record']
new_record = None
if (params['type'] is None) or (params['record'] is None):
self.module.fail_json(msg="You must provide a type and a record to create a new record")
if (params['type'] in ['A', 'AAAA', 'CNAME', 'TXT', 'MX', 'NS', 'SPF']):
if not params['value']:
self.module.fail_json(msg="You must provide a non-empty value to create this record type")
# there can only be one CNAME per record
# ignoring the value when searching for existing
# CNAME records allows us to update the value if it
# changes
if params['type'] == 'CNAME':
search_value = None
new_record = {
"type": params['type'],
"name": params['record'],
"content": params['value'],
"ttl": params['ttl']
}
if (params['type'] in ['A', 'AAAA', 'CNAME']):
new_record["proxied"] = params["proxied"]
if params['type'] == 'MX':
for attr in [params['priority'], params['value']]:
if (attr is None) or (attr == ''):
self.module.fail_json(msg="You must provide priority and a value to create this record type")
new_record = {
"type": params['type'],
"name": params['record'],
"content": params['value'],
"priority": params['priority'],
"ttl": params['ttl']
}
if params['type'] == 'SRV':
for attr in [params['port'], params['priority'], params['proto'], params['service'], params['weight'], params['value']]:
if (attr is None) or (attr == ''):
self.module.fail_json(msg="You must provide port, priority, proto, service, weight and a value to create this record type")
srv_data = {
"target": params['value'],
"port": params['port'],
"weight": params['weight'],
"priority": params['priority'],
"name": params['record'][:-len('.' + params['zone'])],
"proto": params['proto'],
"service": params['service']
}
new_record = {"type": params['type'], "ttl": params['ttl'], 'data': srv_data}
search_value = str(params['weight']) + '\t' + str(params['port']) + '\t' + params['value']
search_record = params['service'] + '.' + params['proto'] + '.' + params['record']
zone_id = self._get_zone_id(params['zone'])
records = self.get_dns_records(params['zone'], params['type'], search_record, search_value)
# in theory this should be impossible as cloudflare does not allow
# the creation of duplicate records but lets cover it anyways
if len(records) > 1:
self.module.fail_json(msg="More than one record already exists for the given attributes. That should be impossible, please open an issue!")
# record already exists, check if it must be updated
if len(records) == 1:
cur_record = records[0]
do_update = False
if (params['ttl'] is not None) and (cur_record['ttl'] != params['ttl']):
do_update = True
if (params['priority'] is not None) and ('priority' in cur_record) and (cur_record['priority'] != params['priority']):
do_update = True
if ('data' in new_record) and ('data' in cur_record):
if (cur_record['data'] > new_record['data']) - (cur_record['data'] < new_record['data']):
do_update = True
if (params['type'] == 'CNAME') and (cur_record['content'] != new_record['content']):
do_update = True
if do_update:
if self.module.check_mode:
result = new_record
else:
result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(zone_id, records[0]['id']), 'PUT', new_record)
self.changed = True
return result, self.changed
else:
return records, self.changed
if self.module.check_mode:
result = new_record
else:
result, info = self._cf_api_call('/zones/{0}/dns_records'.format(zone_id), 'POST', new_record)
self.changed = True
return result, self.changed
def main():
module = AnsibleModule(
argument_spec=dict(
account_api_token=dict(required=True, no_log=True, type='str'),
account_email=dict(required=True, type='str'),
port=dict(required=False, default=None, type='int'),
priority=dict(required=False, default=1, type='int'),
proto=dict(required=False, default=None, choices=['tcp', 'udp'], type='str'),
proxied=dict(required=False, default=False, type='bool'),
record=dict(required=False, default='@', aliases=['name'], type='str'),
service=dict(required=False, default=None, type='str'),
solo=dict(required=False, default=None, type='bool'),
state=dict(required=False, default='present', choices=['present', 'absent'], type='str'),
timeout=dict(required=False, default=30, type='int'),
ttl=dict(required=False, default=1, type='int'),
type=dict(required=False, default=None, choices=['A', 'AAAA', 'CNAME', 'TXT', 'SRV', 'MX', 'NS', 'SPF'], type='str'),
value=dict(required=False, default=None, aliases=['content'], type='str'),
weight=dict(required=False, default=1, type='int'),
zone=dict(required=True, default=None, aliases=['domain'], type='str'),
),
supports_check_mode=True,
required_if=([
('state', 'present', ['record', 'type', 'value']),
('state', 'absent', ['record']),
('type', 'SRV', ['proto', 'service']),
]
),
)
if module.params['type'] == 'SRV':
if not ((module.params['weight'] is not None and module.params['port'] is not None
and not (module.params['value'] is None or module.params['value'] == ''))
or (module.params['weight'] is None and module.params['port'] is None
and (module.params['value'] is None or module.params['value'] == ''))):
module.fail_json(msg="For SRV records the params weight, port and value all need to be defined, or not at all.")
changed = False
cf_api = CloudflareAPI(module)
# sanity checks
if cf_api.is_solo and cf_api.state == 'absent':
module.fail_json(msg="solo=true can only be used with state=present")
# perform add, delete or update (only the TTL can be updated) of one or
# more records
if cf_api.state == 'present':
# delete all records matching record name + type
if cf_api.is_solo:
changed = cf_api.delete_dns_records(solo=cf_api.is_solo)
result, changed = cf_api.ensure_dns_record()
if isinstance(result, list):
module.exit_json(changed=changed, result={'record': result[0]})
else:
module.exit_json(changed=changed, result={'record': result})
else:
# force solo to False, just to be sure
changed = cf_api.delete_dns_records(solo=False)
module.exit_json(changed=changed)
if __name__ == '__main__':
main()
|
the-stack_106_22505 | """Email sensor support."""
from collections import deque
import datetime
import email
import imaplib
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_DATE,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
CONF_VALUE_TEMPLATE,
CONTENT_TYPE_TEXT_PLAIN,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
CONF_SERVER = "server"
CONF_SENDERS = "senders"
CONF_FOLDER = "folder"
ATTR_FROM = "from"
ATTR_BODY = "body"
ATTR_SUBJECT = "subject"
DEFAULT_PORT = 993
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_SERVER): cv.string,
vol.Required(CONF_SENDERS): [cv.string],
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_FOLDER, default="INBOX"): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Email sensor platform."""
reader = EmailReader(
config.get(CONF_USERNAME),
config.get(CONF_PASSWORD),
config.get(CONF_SERVER),
config.get(CONF_PORT),
config.get(CONF_FOLDER),
)
value_template = config.get(CONF_VALUE_TEMPLATE)
if value_template is not None:
value_template.hass = hass
sensor = EmailContentSensor(
hass,
reader,
config.get(CONF_NAME) or config.get(CONF_USERNAME),
config.get(CONF_SENDERS),
value_template,
)
if sensor.connected:
add_entities([sensor], True)
else:
return False
class EmailReader:
"""A class to read emails from an IMAP server."""
def __init__(self, user, password, server, port, folder):
"""Initialize the Email Reader."""
self._user = user
self._password = password
self._server = server
self._port = port
self._folder = folder
self._last_id = None
self._unread_ids = deque([])
self.connection = None
def connect(self):
"""Login and setup the connection."""
try:
self.connection = imaplib.IMAP4_SSL(self._server, self._port)
self.connection.login(self._user, self._password)
return True
except imaplib.IMAP4.error:
_LOGGER.error("Failed to login to %s", self._server)
return False
def _fetch_message(self, message_uid):
"""Get an email message from a message id."""
_, message_data = self.connection.uid("fetch", message_uid, "(RFC822)")
if message_data is None:
return None
if message_data[0] is None:
return None
raw_email = message_data[0][1]
email_message = email.message_from_bytes(raw_email)
return email_message
def read_next(self):
"""Read the next email from the email server."""
try:
self.connection.select(self._folder, readonly=True)
if not self._unread_ids:
search = f"SINCE {datetime.date.today():%d-%b-%Y}"
if self._last_id is not None:
search = f"UID {self._last_id}:*"
_, data = self.connection.uid("search", None, search)
self._unread_ids = deque(data[0].split())
while self._unread_ids:
message_uid = self._unread_ids.popleft()
if self._last_id is None or int(message_uid) > self._last_id:
self._last_id = int(message_uid)
return self._fetch_message(message_uid)
return self._fetch_message(str(self._last_id))
except imaplib.IMAP4.error:
_LOGGER.info("Connection to %s lost, attempting to reconnect", self._server)
try:
self.connect()
_LOGGER.info(
"Reconnect to %s succeeded, trying last message", self._server
)
if self._last_id is not None:
return self._fetch_message(str(self._last_id))
except imaplib.IMAP4.error:
_LOGGER.error("Failed to reconnect")
return None
class EmailContentSensor(Entity):
"""Representation of an EMail sensor."""
def __init__(self, hass, email_reader, name, allowed_senders, value_template):
"""Initialize the sensor."""
self.hass = hass
self._email_reader = email_reader
self._name = name
self._allowed_senders = [sender.upper() for sender in allowed_senders]
self._value_template = value_template
self._last_id = None
self._message = None
self._state_attributes = None
self.connected = self._email_reader.connect()
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the current email state."""
return self._message
@property
def device_state_attributes(self):
"""Return other state attributes for the message."""
return self._state_attributes
def render_template(self, email_message):
"""Render the message template."""
variables = {
ATTR_FROM: EmailContentSensor.get_msg_sender(email_message),
ATTR_SUBJECT: EmailContentSensor.get_msg_subject(email_message),
ATTR_DATE: email_message["Date"],
ATTR_BODY: EmailContentSensor.get_msg_text(email_message),
}
return self._value_template.render(variables, parse_result=False)
def sender_allowed(self, email_message):
"""Check if the sender is in the allowed senders list."""
return EmailContentSensor.get_msg_sender(email_message).upper() in (
sender for sender in self._allowed_senders
)
@staticmethod
def get_msg_sender(email_message):
"""Get the parsed message sender from the email."""
return str(email.utils.parseaddr(email_message["From"])[1])
@staticmethod
def get_msg_subject(email_message):
"""Decode the message subject."""
decoded_header = email.header.decode_header(email_message["Subject"])
header = email.header.make_header(decoded_header)
return str(header)
@staticmethod
def get_msg_text(email_message):
"""
Get the message text from the email.
Will look for text/plain or use text/html if not found.
"""
message_text = None
message_html = None
message_untyped_text = None
for part in email_message.walk():
if part.get_content_type() == CONTENT_TYPE_TEXT_PLAIN:
if message_text is None:
message_text = part.get_payload()
elif part.get_content_type() == "text/html":
if message_html is None:
message_html = part.get_payload()
elif part.get_content_type().startswith("text"):
if message_untyped_text is None:
message_untyped_text = part.get_payload()
if message_text is not None:
return message_text
if message_html is not None:
return message_html
if message_untyped_text is not None:
return message_untyped_text
return email_message.get_payload()
def update(self):
"""Read emails and publish state change."""
email_message = self._email_reader.read_next()
if email_message is None:
self._message = None
self._state_attributes = {}
return
if self.sender_allowed(email_message):
message = EmailContentSensor.get_msg_subject(email_message)
if self._value_template is not None:
message = self.render_template(email_message)
self._message = message
self._state_attributes = {
ATTR_FROM: EmailContentSensor.get_msg_sender(email_message),
ATTR_SUBJECT: EmailContentSensor.get_msg_subject(email_message),
ATTR_DATE: email_message["Date"],
ATTR_BODY: EmailContentSensor.get_msg_text(email_message),
}
|
the-stack_106_22506 | # -*- coding: utf-8 -*-
from collections import defaultdict
from datetime import datetime, timedelta
from io import StringIO
import math
import operator
import re
import numpy as np
import pytest
import pandas._config.config as cf
from pandas._libs.tslib import Timestamp
from pandas.compat import PY36, lrange, lzip
from pandas.compat.numpy import np_datetime64_compat
from pandas.core.dtypes.common import is_unsigned_integer_dtype
from pandas.core.dtypes.generic import ABCIndex
import pandas as pd
from pandas import (
CategoricalIndex, DataFrame, DatetimeIndex, Float64Index, Int64Index,
PeriodIndex, RangeIndex, Series, TimedeltaIndex, UInt64Index, date_range,
isna, period_range)
from pandas.core.index import _get_combined_index, ensure_index_from_sequences
from pandas.core.indexes.api import Index, MultiIndex
from pandas.core.sorting import safe_sort
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal
class TestIndex(Base):
_holder = Index
def setup_method(self, method):
self.indices = dict(unicodeIndex=tm.makeUnicodeIndex(100),
strIndex=tm.makeStringIndex(100),
dateIndex=tm.makeDateIndex(100),
periodIndex=tm.makePeriodIndex(100),
tdIndex=tm.makeTimedeltaIndex(100),
intIndex=tm.makeIntIndex(100),
uintIndex=tm.makeUIntIndex(100),
rangeIndex=tm.makeRangeIndex(100),
floatIndex=tm.makeFloatIndex(100),
boolIndex=Index([True, False]),
catIndex=tm.makeCategoricalIndex(100),
empty=Index([]),
tuples=MultiIndex.from_tuples(lzip(
['foo', 'bar', 'baz'], [1, 2, 3])),
repeats=Index([0, 0, 1, 1, 2, 2]))
self.setup_indices()
def create_index(self):
return Index(list('abcde'))
def generate_index_types(self, skip_index_keys=[]):
"""
Return a generator of the various index types, leaving
out the ones with a key in skip_index_keys
"""
for key, index in self.indices.items():
if key not in skip_index_keys:
yield key, index
def test_can_hold_identifiers(self):
index = self.create_index()
key = index[0]
assert index._can_hold_identifiers_and_holds_name(key) is True
def test_new_axis(self):
new_index = self.dateIndex[None, :]
assert new_index.ndim == 2
assert isinstance(new_index, np.ndarray)
def test_copy_and_deepcopy(self):
new_copy2 = self.intIndex.copy(dtype=int)
assert new_copy2.dtype.kind == 'i'
@pytest.mark.parametrize("attr", ['strIndex', 'dateIndex'])
def test_constructor_regular(self, attr):
# regular instance creation
index = getattr(self, attr)
tm.assert_contains_all(index, index)
def test_constructor_casting(self):
# casting
arr = np.array(self.strIndex)
index = Index(arr)
tm.assert_contains_all(arr, index)
tm.assert_index_equal(self.strIndex, index)
def test_constructor_copy(self):
# copy
arr = np.array(self.strIndex)
index = Index(arr, copy=True, name='name')
assert isinstance(index, Index)
assert index.name == 'name'
tm.assert_numpy_array_equal(arr, index.values)
arr[0] = "SOMEBIGLONGSTRING"
assert index[0] != "SOMEBIGLONGSTRING"
# what to do here?
# arr = np.array(5.)
# pytest.raises(Exception, arr.view, Index)
def test_constructor_corner(self):
# corner case
msg = (r"Index\(\.\.\.\) must be called with a collection of some"
" kind, 0 was passed")
with pytest.raises(TypeError, match=msg):
Index(0)
@pytest.mark.parametrize("index_vals", [
[('A', 1), 'B'], ['B', ('A', 1)]])
def test_construction_list_mixed_tuples(self, index_vals):
# see gh-10697: if we are constructing from a mixed list of tuples,
# make sure that we are independent of the sorting order.
index = Index(index_vals)
assert isinstance(index, Index)
assert not isinstance(index, MultiIndex)
@pytest.mark.parametrize('na_value', [None, np.nan])
@pytest.mark.parametrize('vtype', [list, tuple, iter])
def test_construction_list_tuples_nan(self, na_value, vtype):
# GH 18505 : valid tuples containing NaN
values = [(1, 'two'), (3., na_value)]
result = Index(vtype(values))
expected = MultiIndex.from_tuples(values)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("cast_as_obj", [True, False])
@pytest.mark.parametrize("index", [
pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern', name='Green Eggs & Ham'), # DTI with tz
pd.date_range('2015-01-01 10:00', freq='D', periods=3), # DTI no tz
pd.timedelta_range('1 days', freq='D', periods=3), # td
pd.period_range('2015-01-01', freq='D', periods=3) # period
])
def test_constructor_from_index_dtlike(self, cast_as_obj, index):
if cast_as_obj:
result = pd.Index(index.astype(object))
else:
result = pd.Index(index)
tm.assert_index_equal(result, index)
if isinstance(index, pd.DatetimeIndex):
assert result.tz == index.tz
if cast_as_obj:
# GH#23524 check that Index(dti, dtype=object) does not
# incorrectly raise ValueError, and that nanoseconds are not
# dropped
index += pd.Timedelta(nanoseconds=50)
result = pd.Index(index, dtype=object)
assert result.dtype == np.object_
assert list(result) == list(index)
@pytest.mark.parametrize("index,has_tz", [
(pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern'), True), # datetimetz
(pd.timedelta_range('1 days', freq='D', periods=3), False), # td
(pd.period_range('2015-01-01', freq='D', periods=3), False) # period
])
def test_constructor_from_series_dtlike(self, index, has_tz):
result = pd.Index(pd.Series(index))
tm.assert_index_equal(result, index)
if has_tz:
assert result.tz == index.tz
@pytest.mark.parametrize("klass", [Index, DatetimeIndex])
def test_constructor_from_series(self, klass):
expected = DatetimeIndex([Timestamp('20110101'), Timestamp('20120101'),
Timestamp('20130101')])
s = Series([Timestamp('20110101'), Timestamp('20120101'),
Timestamp('20130101')])
result = klass(s)
tm.assert_index_equal(result, expected)
def test_constructor_from_series_freq(self):
# GH 6273
# create from a series, passing a freq
dts = ['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']
expected = DatetimeIndex(dts, freq='MS')
s = Series(pd.to_datetime(dts))
result = DatetimeIndex(s, freq='MS')
tm.assert_index_equal(result, expected)
def test_constructor_from_frame_series_freq(self):
# GH 6273
# create from a series, passing a freq
dts = ['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']
expected = DatetimeIndex(dts, freq='MS')
df = pd.DataFrame(np.random.rand(5, 3))
df['date'] = dts
result = DatetimeIndex(df['date'], freq='MS')
assert df['date'].dtype == object
expected.name = 'date'
tm.assert_index_equal(result, expected)
expected = pd.Series(dts, name='date')
tm.assert_series_equal(df['date'], expected)
# GH 6274
# infer freq of same
freq = pd.infer_freq(df['date'])
assert freq == 'MS'
@pytest.mark.parametrize("array", [
np.arange(5), np.array(['a', 'b', 'c']), date_range(
'2000-01-01', periods=3).values
])
def test_constructor_ndarray_like(self, array):
# GH 5460#issuecomment-44474502
# it should be possible to convert any object that satisfies the numpy
# ndarray interface directly into an Index
class ArrayLike:
def __init__(self, array):
self.array = array
def __array__(self, dtype=None):
return self.array
expected = pd.Index(array)
result = pd.Index(ArrayLike(array))
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('dtype', [
int, 'int64', 'int32', 'int16', 'int8', 'uint64', 'uint32',
'uint16', 'uint8'])
def test_constructor_int_dtype_float(self, dtype):
# GH 18400
if is_unsigned_integer_dtype(dtype):
index_type = UInt64Index
else:
index_type = Int64Index
expected = index_type([0, 1, 2, 3])
result = Index([0., 1., 2., 3.], dtype=dtype)
tm.assert_index_equal(result, expected)
def test_constructor_int_dtype_nan(self):
# see gh-15187
data = [np.nan]
expected = Float64Index(data)
result = Index(data, dtype='float')
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("dtype", ['int64', 'uint64'])
def test_constructor_int_dtype_nan_raises(self, dtype):
# see gh-15187
data = [np.nan]
msg = "cannot convert"
with pytest.raises(ValueError, match=msg):
Index(data, dtype=dtype)
def test_constructor_no_pandas_array(self):
ser = pd.Series([1, 2, 3])
result = pd.Index(ser.array)
expected = pd.Index([1, 2, 3])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("klass,dtype,na_val", [
(pd.Float64Index, np.float64, np.nan),
(pd.DatetimeIndex, 'datetime64[ns]', pd.NaT)
])
def test_index_ctor_infer_nan_nat(self, klass, dtype, na_val):
# GH 13467
na_list = [na_val, na_val]
expected = klass(na_list)
assert expected.dtype == dtype
result = Index(na_list)
tm.assert_index_equal(result, expected)
result = Index(np.array(na_list))
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("pos", [0, 1])
@pytest.mark.parametrize("klass,dtype,ctor", [
(pd.DatetimeIndex, 'datetime64[ns]', np.datetime64('nat')),
(pd.TimedeltaIndex, 'timedelta64[ns]', np.timedelta64('nat'))
])
def test_index_ctor_infer_nat_dt_like(self, pos, klass, dtype, ctor,
nulls_fixture):
expected = klass([pd.NaT, pd.NaT])
assert expected.dtype == dtype
data = [ctor]
data.insert(pos, nulls_fixture)
result = Index(data)
tm.assert_index_equal(result, expected)
result = Index(np.array(data, dtype=object))
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("swap_objs", [True, False])
def test_index_ctor_nat_result(self, swap_objs):
# mixed np.datetime64/timedelta64 nat results in object
data = [np.datetime64('nat'), np.timedelta64('nat')]
if swap_objs:
data = data[::-1]
expected = pd.Index(data, dtype=object)
tm.assert_index_equal(Index(data), expected)
tm.assert_index_equal(Index(np.array(data, dtype=object)), expected)
def test_index_ctor_infer_periodindex(self):
xp = period_range('2012-1-1', freq='M', periods=3)
rs = Index(xp)
tm.assert_index_equal(rs, xp)
assert isinstance(rs, PeriodIndex)
@pytest.mark.parametrize("vals,dtype", [
([1, 2, 3, 4, 5], 'int'), ([1.1, np.nan, 2.2, 3.0], 'float'),
(['A', 'B', 'C', np.nan], 'obj')
])
def test_constructor_simple_new(self, vals, dtype):
index = Index(vals, name=dtype)
result = index._simple_new(index.values, dtype)
tm.assert_index_equal(result, index)
@pytest.mark.parametrize("vals", [
[1, 2, 3], np.array([1, 2, 3]), np.array([1, 2, 3], dtype=int),
# below should coerce
[1., 2., 3.], np.array([1., 2., 3.], dtype=float)
])
def test_constructor_dtypes_to_int64(self, vals):
index = Index(vals, dtype=int)
assert isinstance(index, Int64Index)
@pytest.mark.parametrize("vals", [
[1, 2, 3], [1., 2., 3.], np.array([1., 2., 3.]),
np.array([1, 2, 3], dtype=int), np.array([1., 2., 3.], dtype=float)
])
def test_constructor_dtypes_to_float64(self, vals):
index = Index(vals, dtype=float)
assert isinstance(index, Float64Index)
@pytest.mark.parametrize("cast_index", [True, False])
@pytest.mark.parametrize("vals", [
[True, False, True], np.array([True, False, True], dtype=bool)
])
def test_constructor_dtypes_to_object(self, cast_index, vals):
if cast_index:
index = Index(vals, dtype=bool)
else:
index = Index(vals)
assert isinstance(index, Index)
assert index.dtype == object
@pytest.mark.parametrize("vals", [
[1, 2, 3], np.array([1, 2, 3], dtype=int),
np.array([np_datetime64_compat('2011-01-01'),
np_datetime64_compat('2011-01-02')]),
[datetime(2011, 1, 1), datetime(2011, 1, 2)]
])
def test_constructor_dtypes_to_categorical(self, vals):
index = Index(vals, dtype='category')
assert isinstance(index, CategoricalIndex)
@pytest.mark.parametrize("cast_index", [True, False])
@pytest.mark.parametrize("vals", [
Index(np.array([np_datetime64_compat('2011-01-01'),
np_datetime64_compat('2011-01-02')])),
Index([datetime(2011, 1, 1), datetime(2011, 1, 2)])
])
def test_constructor_dtypes_to_datetime(self, cast_index, vals):
if cast_index:
index = Index(vals, dtype=object)
assert isinstance(index, Index)
assert index.dtype == object
else:
index = Index(vals)
assert isinstance(index, DatetimeIndex)
@pytest.mark.parametrize("cast_index", [True, False])
@pytest.mark.parametrize("vals", [
np.array([np.timedelta64(1, 'D'), np.timedelta64(1, 'D')]),
[timedelta(1), timedelta(1)]
])
def test_constructor_dtypes_to_timedelta(self, cast_index, vals):
if cast_index:
index = Index(vals, dtype=object)
assert isinstance(index, Index)
assert index.dtype == object
else:
index = Index(vals)
assert isinstance(index, TimedeltaIndex)
@pytest.mark.parametrize("attr, utc", [
['values', False],
['asi8', True]])
@pytest.mark.parametrize("klass", [pd.Index, pd.DatetimeIndex])
def test_constructor_dtypes_datetime(self, tz_naive_fixture, attr, utc,
klass):
# Test constructing with a datetimetz dtype
# .values produces numpy datetimes, so these are considered naive
# .asi8 produces integers, so these are considered epoch timestamps
# ^the above will be true in a later version. Right now we `.view`
# the i8 values as NS_DTYPE, effectively treating them as wall times.
index = pd.date_range('2011-01-01', periods=5)
arg = getattr(index, attr)
index = index.tz_localize(tz_naive_fixture)
dtype = index.dtype
if (tz_naive_fixture and attr == "asi8" and
str(tz_naive_fixture) not in ('UTC', 'tzutc()', 'UTC+00:00')):
ex_warn = FutureWarning
else:
ex_warn = None
# stacklevel is checked elsewhere. We don't do it here since
# Index will have an frame, throwing off the expected.
with tm.assert_produces_warning(ex_warn, check_stacklevel=False):
result = klass(arg, tz=tz_naive_fixture)
tm.assert_index_equal(result, index)
with tm.assert_produces_warning(ex_warn, check_stacklevel=False):
result = klass(arg, dtype=dtype)
tm.assert_index_equal(result, index)
with tm.assert_produces_warning(ex_warn, check_stacklevel=False):
result = klass(list(arg), tz=tz_naive_fixture)
tm.assert_index_equal(result, index)
with tm.assert_produces_warning(ex_warn, check_stacklevel=False):
result = klass(list(arg), dtype=dtype)
tm.assert_index_equal(result, index)
@pytest.mark.parametrize("attr", ['values', 'asi8'])
@pytest.mark.parametrize("klass", [pd.Index, pd.TimedeltaIndex])
def test_constructor_dtypes_timedelta(self, attr, klass):
index = pd.timedelta_range('1 days', periods=5)
dtype = index.dtype
values = getattr(index, attr)
result = klass(values, dtype=dtype)
tm.assert_index_equal(result, index)
result = klass(list(values), dtype=dtype)
tm.assert_index_equal(result, index)
@pytest.mark.parametrize("value", [[], iter([]), (x for x in [])])
@pytest.mark.parametrize("klass",
[Index, Float64Index, Int64Index, UInt64Index,
CategoricalIndex, DatetimeIndex, TimedeltaIndex])
def test_constructor_empty(self, value, klass):
empty = klass(value)
assert isinstance(empty, klass)
assert not len(empty)
@pytest.mark.parametrize("empty,klass", [
(PeriodIndex([], freq='B'), PeriodIndex),
(PeriodIndex(iter([]), freq='B'), PeriodIndex),
(PeriodIndex((x for x in []), freq='B'), PeriodIndex),
(RangeIndex(step=1), pd.RangeIndex),
(MultiIndex(levels=[[1, 2], ['blue', 'red']],
codes=[[], []]), MultiIndex)
])
def test_constructor_empty_special(self, empty, klass):
assert isinstance(empty, klass)
assert not len(empty)
def test_constructor_overflow_int64(self):
# see gh-15832
msg = ("The elements provided in the data cannot "
"all be casted to the dtype int64")
with pytest.raises(OverflowError, match=msg):
Index([np.iinfo(np.uint64).max - 1], dtype="int64")
@pytest.mark.xfail(reason="see GH#21311: Index "
"doesn't enforce dtype argument")
def test_constructor_cast(self):
msg = "could not convert string to float"
with pytest.raises(ValueError, match=msg):
Index(["a", "b", "c"], dtype=float)
def test_view_with_args(self):
restricted = ['unicodeIndex', 'strIndex', 'catIndex', 'boolIndex',
'empty']
for i in list(set(self.indices.keys()) - set(restricted)):
ind = self.indices[i]
ind.view('i8')
@pytest.mark.parametrize('index_type', [
'unicodeIndex',
'strIndex',
pytest.param('catIndex', marks=pytest.mark.xfail(reason="gh-25464")),
'boolIndex',
'empty'])
def test_view_with_args_object_array_raises(self, index_type):
ind = self.indices[index_type]
msg = "Cannot change data-type for object array"
with pytest.raises(TypeError, match=msg):
ind.view('i8')
def test_astype(self):
casted = self.intIndex.astype('i8')
# it works!
casted.get_loc(5)
# pass on name
self.intIndex.name = 'foobar'
casted = self.intIndex.astype('i8')
assert casted.name == 'foobar'
def test_equals_object(self):
# same
assert Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c']))
@pytest.mark.parametrize("comp", [
Index(['a', 'b']), Index(['a', 'b', 'd']), ['a', 'b', 'c']])
def test_not_equals_object(self, comp):
assert not Index(['a', 'b', 'c']).equals(comp)
def test_insert(self):
# GH 7256
# validate neg/pos inserts
result = Index(['b', 'c', 'd'])
# test 0th element
tm.assert_index_equal(Index(['a', 'b', 'c', 'd']),
result.insert(0, 'a'))
# test Nth element that follows Python list behavior
tm.assert_index_equal(Index(['b', 'c', 'e', 'd']),
result.insert(-1, 'e'))
# test loc +/- neq (0, -1)
tm.assert_index_equal(result.insert(1, 'z'), result.insert(-2, 'z'))
# test empty
null_index = Index([])
tm.assert_index_equal(Index(['a']), null_index.insert(0, 'a'))
def test_insert_missing(self, nulls_fixture):
# GH 22295
# test there is no mangling of NA values
expected = Index(['a', nulls_fixture, 'b', 'c'])
result = Index(list('abc')).insert(1, nulls_fixture)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("pos,expected", [
(0, Index(['b', 'c', 'd'], name='index')),
(-1, Index(['a', 'b', 'c'], name='index'))
])
def test_delete(self, pos, expected):
index = Index(['a', 'b', 'c', 'd'], name='index')
result = index.delete(pos)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
def test_delete_raises(self):
index = Index(['a', 'b', 'c', 'd'], name='index')
msg = "index 5 is out of bounds for axis 0 with size 4"
with pytest.raises(IndexError, match=msg):
index.delete(5)
def test_identical(self):
# index
i1 = Index(['a', 'b', 'c'])
i2 = Index(['a', 'b', 'c'])
assert i1.identical(i2)
i1 = i1.rename('foo')
assert i1.equals(i2)
assert not i1.identical(i2)
i2 = i2.rename('foo')
assert i1.identical(i2)
i3 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')])
i4 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')], tupleize_cols=False)
assert not i3.identical(i4)
def test_is_(self):
ind = Index(range(10))
assert ind.is_(ind)
assert ind.is_(ind.view().view().view().view())
assert not ind.is_(Index(range(10)))
assert not ind.is_(ind.copy())
assert not ind.is_(ind.copy(deep=False))
assert not ind.is_(ind[:])
assert not ind.is_(np.array(range(10)))
# quasi-implementation dependent
assert ind.is_(ind.view())
ind2 = ind.view()
ind2.name = 'bob'
assert ind.is_(ind2)
assert ind2.is_(ind)
# doesn't matter if Indices are *actually* views of underlying data,
assert not ind.is_(Index(ind.values))
arr = np.array(range(1, 11))
ind1 = Index(arr, copy=False)
ind2 = Index(arr, copy=False)
assert not ind1.is_(ind2)
def test_asof(self):
d = self.dateIndex[0]
assert self.dateIndex.asof(d) == d
assert isna(self.dateIndex.asof(d - timedelta(1)))
d = self.dateIndex[-1]
assert self.dateIndex.asof(d + timedelta(1)) == d
d = self.dateIndex[0].to_pydatetime()
assert isinstance(self.dateIndex.asof(d), Timestamp)
def test_asof_datetime_partial(self):
index = pd.date_range('2010-01-01', periods=2, freq='m')
expected = Timestamp('2010-02-28')
result = index.asof('2010-02')
assert result == expected
assert not isinstance(result, Index)
def test_nanosecond_index_access(self):
s = Series([Timestamp('20130101')]).values.view('i8')[0]
r = DatetimeIndex([s + 50 + i for i in range(100)])
x = Series(np.random.randn(100), index=r)
first_value = x.asof(x.index[0])
# this does not yet work, as parsing strings is done via dateutil
# assert first_value == x['2013-01-01 00:00:00.000000050+0000']
expected_ts = np_datetime64_compat('2013-01-01 00:00:00.000000050+'
'0000', 'ns')
assert first_value == x[Timestamp(expected_ts)]
def test_booleanindex(self):
boolIndex = np.repeat(True, len(self.strIndex)).astype(bool)
boolIndex[5:30:2] = False
subIndex = self.strIndex[boolIndex]
for i, val in enumerate(subIndex):
assert subIndex.get_loc(val) == i
subIndex = self.strIndex[list(boolIndex)]
for i, val in enumerate(subIndex):
assert subIndex.get_loc(val) == i
def test_fancy(self):
sl = self.strIndex[[1, 2, 3]]
for i in sl:
assert i == sl[sl.get_loc(i)]
@pytest.mark.parametrize("attr", [
'strIndex', 'intIndex', 'floatIndex'])
@pytest.mark.parametrize("dtype", [np.int_, np.bool_])
def test_empty_fancy(self, attr, dtype):
empty_arr = np.array([], dtype=dtype)
index = getattr(self, attr)
empty_index = index.__class__([])
assert index[[]].identical(empty_index)
assert index[empty_arr].identical(empty_index)
@pytest.mark.parametrize("attr", [
'strIndex', 'intIndex', 'floatIndex'])
def test_empty_fancy_raises(self, attr):
# pd.DatetimeIndex is excluded, because it overrides getitem and should
# be tested separately.
empty_farr = np.array([], dtype=np.float_)
index = getattr(self, attr)
empty_index = index.__class__([])
assert index[[]].identical(empty_index)
# np.ndarray only accepts ndarray of int & bool dtypes, so should Index
msg = r"arrays used as indices must be of integer \(or boolean\) type"
with pytest.raises(IndexError, match=msg):
index[empty_farr]
@pytest.mark.parametrize("sort", [None, False])
def test_intersection(self, sort):
first = self.strIndex[:20]
second = self.strIndex[:10]
intersect = first.intersection(second, sort=sort)
if sort is None:
tm.assert_index_equal(intersect, second.sort_values())
assert tm.equalContents(intersect, second)
# Corner cases
inter = first.intersection(first, sort=sort)
assert inter is first
@pytest.mark.parametrize("index2,keeps_name", [
(Index([3, 4, 5, 6, 7], name="index"), True), # preserve same name
(Index([3, 4, 5, 6, 7], name="other"), False), # drop diff names
(Index([3, 4, 5, 6, 7]), False)])
@pytest.mark.parametrize("sort", [None, False])
def test_intersection_name_preservation(self, index2, keeps_name, sort):
index1 = Index([1, 2, 3, 4, 5], name='index')
expected = Index([3, 4, 5])
result = index1.intersection(index2, sort)
if keeps_name:
expected.name = 'index'
assert result.name == expected.name
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("first_name,second_name,expected_name", [
('A', 'A', 'A'), ('A', 'B', None), (None, 'B', None)])
@pytest.mark.parametrize("sort", [None, False])
def test_intersection_name_preservation2(self, first_name, second_name,
expected_name, sort):
first = self.strIndex[5:20]
second = self.strIndex[:10]
first.name = first_name
second.name = second_name
intersect = first.intersection(second, sort=sort)
assert intersect.name == expected_name
@pytest.mark.parametrize("index2,keeps_name", [
(Index([4, 7, 6, 5, 3], name='index'), True),
(Index([4, 7, 6, 5, 3], name='other'), False)])
@pytest.mark.parametrize("sort", [None, False])
def test_intersection_monotonic(self, index2, keeps_name, sort):
index1 = Index([5, 3, 2, 4, 1], name='index')
expected = Index([5, 3, 4])
if keeps_name:
expected.name = "index"
result = index1.intersection(index2, sort=sort)
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("index2,expected_arr", [
(Index(['B', 'D']), ['B']),
(Index(['B', 'D', 'A']), ['A', 'B', 'A'])])
@pytest.mark.parametrize("sort", [None, False])
def test_intersection_non_monotonic_non_unique(self, index2, expected_arr,
sort):
# non-monotonic non-unique
index1 = Index(['A', 'B', 'A', 'C'])
expected = Index(expected_arr, dtype='object')
result = index1.intersection(index2, sort=sort)
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("sort", [None, False])
def test_intersect_str_dates(self, sort):
dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]
i1 = Index(dt_dates, dtype=object)
i2 = Index(['aa'], dtype=object)
result = i2.intersection(i1, sort=sort)
assert len(result) == 0
def test_intersect_nosort(self):
result = pd.Index(['c', 'b', 'a']).intersection(['b', 'a'])
expected = pd.Index(['b', 'a'])
tm.assert_index_equal(result, expected)
def test_intersection_equal_sort(self):
idx = pd.Index(['c', 'a', 'b'])
tm.assert_index_equal(idx.intersection(idx, sort=False), idx)
tm.assert_index_equal(idx.intersection(idx, sort=None), idx)
@pytest.mark.xfail(reason="Not implemented")
def test_intersection_equal_sort_true(self):
# TODO decide on True behaviour
idx = pd.Index(['c', 'a', 'b'])
sorted_ = pd.Index(['a', 'b', 'c'])
tm.assert_index_equal(idx.intersection(idx, sort=True), sorted_)
@pytest.mark.parametrize("sort", [None, False])
def test_chained_union(self, sort):
# Chained unions handles names correctly
i1 = Index([1, 2], name='i1')
i2 = Index([5, 6], name='i2')
i3 = Index([3, 4], name='i3')
union = i1.union(i2.union(i3, sort=sort), sort=sort)
expected = i1.union(i2, sort=sort).union(i3, sort=sort)
tm.assert_index_equal(union, expected)
j1 = Index([1, 2], name='j1')
j2 = Index([], name='j2')
j3 = Index([], name='j3')
union = j1.union(j2.union(j3, sort=sort), sort=sort)
expected = j1.union(j2, sort=sort).union(j3, sort=sort)
tm.assert_index_equal(union, expected)
@pytest.mark.parametrize("sort", [None, False])
def test_union(self, sort):
# TODO: Replace with fixturesult
first = self.strIndex[5:20]
second = self.strIndex[:10]
everything = self.strIndex[:20]
union = first.union(second, sort=sort)
if sort is None:
tm.assert_index_equal(union, everything.sort_values())
assert tm.equalContents(union, everything)
@pytest.mark.parametrize('slice_', [slice(None), slice(0)])
def test_union_sort_other_special(self, slice_):
# https://github.com/pandas-dev/pandas/issues/24959
idx = pd.Index([1, 0, 2])
# default, sort=None
other = idx[slice_]
tm.assert_index_equal(idx.union(other), idx)
tm.assert_index_equal(other.union(idx), idx)
# sort=False
tm.assert_index_equal(idx.union(other, sort=False), idx)
@pytest.mark.xfail(reason="Not implemented")
@pytest.mark.parametrize('slice_', [slice(None), slice(0)])
def test_union_sort_special_true(self, slice_):
# TODO decide on True behaviour
# sort=True
idx = pd.Index([1, 0, 2])
# default, sort=None
other = idx[slice_]
result = idx.union(other, sort=True)
expected = pd.Index([0, 1, 2])
tm.assert_index_equal(result, expected)
def test_union_sort_other_incomparable(self):
# https://github.com/pandas-dev/pandas/issues/24959
idx = pd.Index([1, pd.Timestamp('2000')])
# default (sort=None)
with tm.assert_produces_warning(RuntimeWarning):
result = idx.union(idx[:1])
tm.assert_index_equal(result, idx)
# sort=None
with tm.assert_produces_warning(RuntimeWarning):
result = idx.union(idx[:1], sort=None)
tm.assert_index_equal(result, idx)
# sort=False
result = idx.union(idx[:1], sort=False)
tm.assert_index_equal(result, idx)
@pytest.mark.xfail(reason="Not implemented")
def test_union_sort_other_incomparable_true(self):
# TODO decide on True behaviour
# sort=True
idx = pd.Index([1, pd.Timestamp('2000')])
with pytest.raises(TypeError, match='.*'):
idx.union(idx[:1], sort=True)
@pytest.mark.parametrize("klass", [
np.array, Series, list])
@pytest.mark.parametrize("sort", [None, False])
def test_union_from_iterables(self, klass, sort):
# GH 10149
# TODO: Replace with fixturesult
first = self.strIndex[5:20]
second = self.strIndex[:10]
everything = self.strIndex[:20]
case = klass(second.values)
result = first.union(case, sort=sort)
if sort is None:
tm.assert_index_equal(result, everything.sort_values())
assert tm.equalContents(result, everything)
@pytest.mark.parametrize("sort", [None, False])
def test_union_identity(self, sort):
# TODO: replace with fixturesult
first = self.strIndex[5:20]
union = first.union(first, sort=sort)
# i.e. identity is not preserved when sort is True
assert (union is first) is (not sort)
union = first.union([], sort=sort)
assert (union is first) is (not sort)
union = Index([]).union(first, sort=sort)
assert (union is first) is (not sort)
@pytest.mark.parametrize("first_list", [list('ba'), list()])
@pytest.mark.parametrize("second_list", [list('ab'), list()])
@pytest.mark.parametrize("first_name, second_name, expected_name", [
('A', 'B', None), (None, 'B', None), ('A', None, None)])
@pytest.mark.parametrize("sort", [None, False])
def test_union_name_preservation(self, first_list, second_list, first_name,
second_name, expected_name, sort):
first = Index(first_list, name=first_name)
second = Index(second_list, name=second_name)
union = first.union(second, sort=sort)
vals = set(first_list).union(second_list)
if sort is None and len(first_list) > 0 and len(second_list) > 0:
expected = Index(sorted(vals), name=expected_name)
tm.assert_index_equal(union, expected)
else:
expected = Index(vals, name=expected_name)
assert tm.equalContents(union, expected)
@pytest.mark.parametrize("sort", [None, False])
def test_union_dt_as_obj(self, sort):
# TODO: Replace with fixturesult
firstCat = self.strIndex.union(self.dateIndex)
secondCat = self.strIndex.union(self.strIndex)
if self.dateIndex.dtype == np.object_:
appended = np.append(self.strIndex, self.dateIndex)
else:
appended = np.append(self.strIndex, self.dateIndex.astype('O'))
assert tm.equalContents(firstCat, appended)
assert tm.equalContents(secondCat, self.strIndex)
tm.assert_contains_all(self.strIndex, firstCat)
tm.assert_contains_all(self.strIndex, secondCat)
tm.assert_contains_all(self.dateIndex, firstCat)
@pytest.mark.parametrize("method", ['union', 'intersection', 'difference',
'symmetric_difference'])
def test_setops_disallow_true(self, method):
idx1 = pd.Index(['a', 'b'])
idx2 = pd.Index(['b', 'c'])
with pytest.raises(ValueError, match="The 'sort' keyword only takes"):
getattr(idx1, method)(idx2, sort=True)
def test_map_identity_mapping(self):
# GH 12766
# TODO: replace with fixture
for name, cur_index in self.indices.items():
tm.assert_index_equal(cur_index, cur_index.map(lambda x: x))
def test_map_with_tuples(self):
# GH 12766
# Test that returning a single tuple from an Index
# returns an Index.
index = tm.makeIntIndex(3)
result = tm.makeIntIndex(3).map(lambda x: (x,))
expected = Index([(i,) for i in index])
tm.assert_index_equal(result, expected)
# Test that returning a tuple from a map of a single index
# returns a MultiIndex object.
result = index.map(lambda x: (x, x == 1))
expected = MultiIndex.from_tuples([(i, i == 1) for i in index])
tm.assert_index_equal(result, expected)
def test_map_with_tuples_mi(self):
# Test that returning a single object from a MultiIndex
# returns an Index.
first_level = ['foo', 'bar', 'baz']
multi_index = MultiIndex.from_tuples(lzip(first_level, [1, 2, 3]))
reduced_index = multi_index.map(lambda x: x[0])
tm.assert_index_equal(reduced_index, Index(first_level))
@pytest.mark.parametrize("attr", [
'makeDateIndex', 'makePeriodIndex', 'makeTimedeltaIndex'])
def test_map_tseries_indices_return_index(self, attr):
index = getattr(tm, attr)(10)
expected = Index([1] * 10)
result = index.map(lambda x: 1)
tm.assert_index_equal(expected, result)
def test_map_tseries_indices_accsr_return_index(self):
date_index = tm.makeDateIndex(24, freq='h', name='hourly')
expected = Index(range(24), name='hourly')
tm.assert_index_equal(expected, date_index.map(lambda x: x.hour))
@pytest.mark.parametrize(
"mapper",
[
lambda values, index: {i: e for e, i in zip(values, index)},
lambda values, index: pd.Series(values, index)])
def test_map_dictlike(self, mapper):
# GH 12756
expected = Index(['foo', 'bar', 'baz'])
index = tm.makeIntIndex(3)
result = index.map(mapper(expected.values, index))
tm.assert_index_equal(result, expected)
# TODO: replace with fixture
for name in self.indices.keys():
if name == 'catIndex':
# Tested in test_categorical
continue
elif name == 'repeats':
# Cannot map duplicated index
continue
index = self.indices[name]
expected = Index(np.arange(len(index), 0, -1))
# to match proper result coercion for uints
if name == 'empty':
expected = Index([])
result = index.map(mapper(expected, index))
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("mapper", [
Series(['foo', 2., 'baz'], index=[0, 2, -1]),
{0: 'foo', 2: 2.0, -1: 'baz'}])
def test_map_with_non_function_missing_values(self, mapper):
# GH 12756
expected = Index([2., np.nan, 'foo'])
result = Index([2, 1, 0]).map(mapper)
tm.assert_index_equal(expected, result)
def test_map_na_exclusion(self):
index = Index([1.5, np.nan, 3, np.nan, 5])
result = index.map(lambda x: x * 2, na_action='ignore')
expected = index * 2
tm.assert_index_equal(result, expected)
def test_map_defaultdict(self):
index = Index([1, 2, 3])
default_dict = defaultdict(lambda: 'blank')
default_dict[1] = 'stuff'
result = index.map(default_dict)
expected = Index(['stuff', 'blank', 'blank'])
tm.assert_index_equal(result, expected)
def test_append_multiple(self):
index = Index(['a', 'b', 'c', 'd', 'e', 'f'])
foos = [index[:2], index[2:4], index[4:]]
result = foos[0].append(foos[1:])
tm.assert_index_equal(result, index)
# empty
result = index.append([])
tm.assert_index_equal(result, index)
@pytest.mark.parametrize("name,expected", [
('foo', 'foo'), ('bar', None)])
def test_append_empty_preserve_name(self, name, expected):
left = Index([], name='foo')
right = Index([1, 2, 3], name=name)
result = left.append(right)
assert result.name == expected
@pytest.mark.parametrize("second_name,expected", [
(None, None), ('name', 'name')])
@pytest.mark.parametrize("sort", [None, False])
def test_difference_name_preservation(self, second_name, expected, sort):
# TODO: replace with fixturesult
first = self.strIndex[5:20]
second = self.strIndex[:10]
answer = self.strIndex[10:20]
first.name = 'name'
second.name = second_name
result = first.difference(second, sort=sort)
assert tm.equalContents(result, answer)
if expected is None:
assert result.name is None
else:
assert result.name == expected
@pytest.mark.parametrize("sort", [None, False])
def test_difference_empty_arg(self, sort):
first = self.strIndex[5:20]
first.name == 'name'
result = first.difference([], sort)
assert tm.equalContents(result, first)
assert result.name == first.name
@pytest.mark.parametrize("sort", [None, False])
def test_difference_identity(self, sort):
first = self.strIndex[5:20]
first.name == 'name'
result = first.difference(first, sort)
assert len(result) == 0
assert result.name == first.name
@pytest.mark.parametrize("sort", [None, False])
def test_difference_sort(self, sort):
first = self.strIndex[5:20]
second = self.strIndex[:10]
result = first.difference(second, sort)
expected = self.strIndex[10:20]
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("sort", [None, False])
def test_symmetric_difference(self, sort):
# smoke
index1 = Index([5, 2, 3, 4], name='index1')
index2 = Index([2, 3, 4, 1])
result = index1.symmetric_difference(index2, sort=sort)
expected = Index([5, 1])
assert tm.equalContents(result, expected)
assert result.name is None
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(result, expected)
# __xor__ syntax
expected = index1 ^ index2
assert tm.equalContents(result, expected)
assert result.name is None
@pytest.mark.parametrize('opname', ['difference', 'symmetric_difference'])
def test_difference_incomparable(self, opname):
a = pd.Index([3, pd.Timestamp('2000'), 1])
b = pd.Index([2, pd.Timestamp('1999'), 1])
op = operator.methodcaller(opname, b)
# sort=None, the default
result = op(a)
expected = pd.Index([3, pd.Timestamp('2000'), 2, pd.Timestamp('1999')])
if opname == 'difference':
expected = expected[:2]
tm.assert_index_equal(result, expected)
# sort=False
op = operator.methodcaller(opname, b, sort=False)
result = op(a)
tm.assert_index_equal(result, expected)
@pytest.mark.xfail(reason="Not implemented")
@pytest.mark.parametrize('opname', ['difference', 'symmetric_difference'])
def test_difference_incomparable_true(self, opname):
# TODO decide on True behaviour
# # sort=True, raises
a = pd.Index([3, pd.Timestamp('2000'), 1])
b = pd.Index([2, pd.Timestamp('1999'), 1])
op = operator.methodcaller(opname, b, sort=True)
with pytest.raises(TypeError, match='Cannot compare'):
op(a)
@pytest.mark.parametrize("sort", [None, False])
def test_symmetric_difference_mi(self, sort):
index1 = MultiIndex.from_tuples(self.tuples)
index2 = MultiIndex.from_tuples([('foo', 1), ('bar', 3)])
result = index1.symmetric_difference(index2, sort=sort)
expected = MultiIndex.from_tuples([('bar', 2), ('baz', 3), ('bar', 3)])
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(result, expected)
assert tm.equalContents(result, expected)
@pytest.mark.parametrize("index2,expected", [
(Index([0, 1, np.nan]), Index([2.0, 3.0, 0.0])),
(Index([0, 1]), Index([np.nan, 2.0, 3.0, 0.0]))])
@pytest.mark.parametrize("sort", [None, False])
def test_symmetric_difference_missing(self, index2, expected, sort):
# GH 13514 change: {nan} - {nan} == {}
# (GH 6444, sorting of nans, is no longer an issue)
index1 = Index([1, np.nan, 2, 3])
result = index1.symmetric_difference(index2, sort=sort)
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("sort", [None, False])
def test_symmetric_difference_non_index(self, sort):
index1 = Index([1, 2, 3, 4], name='index1')
index2 = np.array([2, 3, 4, 5])
expected = Index([1, 5])
result = index1.symmetric_difference(index2, sort=sort)
assert tm.equalContents(result, expected)
assert result.name == 'index1'
result = index1.symmetric_difference(index2, result_name='new_name',
sort=sort)
assert tm.equalContents(result, expected)
assert result.name == 'new_name'
@pytest.mark.parametrize("sort", [None, False])
def test_difference_type(self, sort):
# GH 20040
# If taking difference of a set and itself, it
# needs to preserve the type of the index
skip_index_keys = ['repeats']
for key, index in self.generate_index_types(skip_index_keys):
result = index.difference(index, sort=sort)
expected = index.drop(index)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("sort", [None, False])
def test_intersection_difference(self, sort):
# GH 20040
# Test that the intersection of an index with an
# empty index produces the same index as the difference
# of an index with itself. Test for all types
skip_index_keys = ['repeats']
for key, index in self.generate_index_types(skip_index_keys):
inter = index.intersection(index.drop(index))
diff = index.difference(index, sort=sort)
tm.assert_index_equal(inter, diff)
@pytest.mark.parametrize("attr,expected", [
('strIndex', False), ('boolIndex', False), ('catIndex', False),
('intIndex', True), ('dateIndex', False), ('floatIndex', True)])
def test_is_numeric(self, attr, expected):
assert getattr(self, attr).is_numeric() == expected
@pytest.mark.parametrize("attr,expected", [
('strIndex', True), ('boolIndex', True), ('catIndex', False),
('intIndex', False), ('dateIndex', False), ('floatIndex', False)])
def test_is_object(self, attr, expected):
assert getattr(self, attr).is_object() == expected
@pytest.mark.parametrize("attr,expected", [
('strIndex', False), ('boolIndex', False), ('catIndex', False),
('intIndex', False), ('dateIndex', True), ('floatIndex', False)])
def test_is_all_dates(self, attr, expected):
assert getattr(self, attr).is_all_dates == expected
def test_summary(self):
self._check_method_works(Index._summary)
# GH3869
ind = Index(['{other}%s', "~:{range}:0"], name='A')
result = ind._summary()
# shouldn't be formatted accidentally.
assert '~:{range}:0' in result
assert '{other}%s' in result
# GH18217
def test_summary_deprecated(self):
ind = Index(['{other}%s', "~:{range}:0"], name='A')
with tm.assert_produces_warning(FutureWarning):
ind.summary()
def test_format(self):
self._check_method_works(Index.format)
# GH 14626
# windows has different precision on datetime.datetime.now (it doesn't
# include us since the default for Timestamp shows these but Index
# formatting does not we are skipping)
now = datetime.now()
if not str(now).endswith("000"):
index = Index([now])
formatted = index.format()
expected = [str(index[0])]
assert formatted == expected
self.strIndex[:0].format()
@pytest.mark.parametrize("vals", [
[1, 2.0 + 3.0j, 4.], ['a', 'b', 'c']])
def test_format_missing(self, vals, nulls_fixture):
# 2845
vals = list(vals) # Copy for each iteration
vals.append(nulls_fixture)
index = Index(vals)
formatted = index.format()
expected = [str(index[0]), str(index[1]), str(index[2]), 'NaN']
assert formatted == expected
assert index[3] is nulls_fixture
def test_format_with_name_time_info(self):
# bug I fixed 12/20/2011
inc = timedelta(hours=4)
dates = Index([dt + inc for dt in self.dateIndex], name='something')
formatted = dates.format(name=True)
assert formatted[0] == 'something'
def test_format_datetime_with_time(self):
t = Index([datetime(2012, 2, 7), datetime(2012, 2, 7, 23)])
result = t.format()
expected = ['2012-02-07 00:00:00', '2012-02-07 23:00:00']
assert len(result) == 2
assert result == expected
@pytest.mark.parametrize("op", ['any', 'all'])
def test_logical_compat(self, op):
index = self.create_index()
assert getattr(index, op)() == getattr(index.values, op)()
def _check_method_works(self, method):
# TODO: make this a dedicated test with parametrized methods
method(self.empty)
method(self.dateIndex)
method(self.unicodeIndex)
method(self.strIndex)
method(self.intIndex)
method(self.tuples)
method(self.catIndex)
def test_get_indexer(self):
index1 = Index([1, 2, 3, 4, 5])
index2 = Index([2, 4, 6])
r1 = index1.get_indexer(index2)
e1 = np.array([1, 3, -1], dtype=np.intp)
assert_almost_equal(r1, e1)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("expected,method", [
(np.array([-1, 0, 0, 1, 1], dtype=np.intp), 'pad'),
(np.array([-1, 0, 0, 1, 1], dtype=np.intp), 'ffill'),
(np.array([0, 0, 1, 1, 2], dtype=np.intp), 'backfill'),
(np.array([0, 0, 1, 1, 2], dtype=np.intp), 'bfill')])
def test_get_indexer_methods(self, reverse, expected, method):
index1 = Index([1, 2, 3, 4, 5])
index2 = Index([2, 4, 6])
if reverse:
index1 = index1[::-1]
expected = expected[::-1]
result = index2.get_indexer(index1, method=method)
assert_almost_equal(result, expected)
def test_get_indexer_invalid(self):
# GH10411
index = Index(np.arange(10))
with pytest.raises(ValueError, match='tolerance argument'):
index.get_indexer([1, 0], tolerance=1)
with pytest.raises(ValueError, match='limit argument'):
index.get_indexer([1, 0], limit=1)
@pytest.mark.parametrize(
'method, tolerance, indexer, expected',
[
('pad', None, [0, 5, 9], [0, 5, 9]),
('backfill', None, [0, 5, 9], [0, 5, 9]),
('nearest', None, [0, 5, 9], [0, 5, 9]),
('pad', 0, [0, 5, 9], [0, 5, 9]),
('backfill', 0, [0, 5, 9], [0, 5, 9]),
('nearest', 0, [0, 5, 9], [0, 5, 9]),
('pad', None, [0.2, 1.8, 8.5], [0, 1, 8]),
('backfill', None, [0.2, 1.8, 8.5], [1, 2, 9]),
('nearest', None, [0.2, 1.8, 8.5], [0, 2, 9]),
('pad', 1, [0.2, 1.8, 8.5], [0, 1, 8]),
('backfill', 1, [0.2, 1.8, 8.5], [1, 2, 9]),
('nearest', 1, [0.2, 1.8, 8.5], [0, 2, 9]),
('pad', 0.2, [0.2, 1.8, 8.5], [0, -1, -1]),
('backfill', 0.2, [0.2, 1.8, 8.5], [-1, 2, -1]),
('nearest', 0.2, [0.2, 1.8, 8.5], [0, 2, -1])])
def test_get_indexer_nearest(self, method, tolerance, indexer, expected):
index = Index(np.arange(10))
actual = index.get_indexer(indexer, method=method, tolerance=tolerance)
tm.assert_numpy_array_equal(actual, np.array(expected,
dtype=np.intp))
@pytest.mark.parametrize('listtype', [list, tuple, Series, np.array])
@pytest.mark.parametrize(
'tolerance, expected',
list(zip([[0.3, 0.3, 0.1], [0.2, 0.1, 0.1],
[0.1, 0.5, 0.5]],
[[0, 2, -1], [0, -1, -1],
[-1, 2, 9]])))
def test_get_indexer_nearest_listlike_tolerance(self, tolerance,
expected, listtype):
index = Index(np.arange(10))
actual = index.get_indexer([0.2, 1.8, 8.5], method='nearest',
tolerance=listtype(tolerance))
tm.assert_numpy_array_equal(actual, np.array(expected,
dtype=np.intp))
def test_get_indexer_nearest_error(self):
index = Index(np.arange(10))
with pytest.raises(ValueError, match='limit argument'):
index.get_indexer([1, 0], method='nearest', limit=1)
with pytest.raises(ValueError, match='tolerance size must match'):
index.get_indexer([1, 0], method='nearest',
tolerance=[1, 2, 3])
@pytest.mark.parametrize("method,expected", [
('pad', [8, 7, 0]), ('backfill', [9, 8, 1]), ('nearest', [9, 7, 0])])
def test_get_indexer_nearest_decreasing(self, method, expected):
index = Index(np.arange(10))[::-1]
actual = index.get_indexer([0, 5, 9], method=method)
tm.assert_numpy_array_equal(actual, np.array([9, 4, 0], dtype=np.intp))
actual = index.get_indexer([0.2, 1.8, 8.5], method=method)
tm.assert_numpy_array_equal(actual, np.array(expected, dtype=np.intp))
@pytest.mark.parametrize("method,expected", [
('pad', np.array([-1, 0, 1, 1], dtype=np.intp)),
('backfill', np.array([0, 0, 1, -1], dtype=np.intp))])
def test_get_indexer_strings(self, method, expected):
index = pd.Index(['b', 'c'])
actual = index.get_indexer(['a', 'b', 'c', 'd'], method=method)
tm.assert_numpy_array_equal(actual, expected)
def test_get_indexer_strings_raises(self):
index = pd.Index(['b', 'c'])
msg = r"unsupported operand type\(s\) for -: 'str' and 'str'"
with pytest.raises(TypeError, match=msg):
index.get_indexer(['a', 'b', 'c', 'd'], method='nearest')
with pytest.raises(TypeError, match=msg):
index.get_indexer(['a', 'b', 'c', 'd'], method='pad', tolerance=2)
with pytest.raises(TypeError, match=msg):
index.get_indexer(['a', 'b', 'c', 'd'], method='pad',
tolerance=[2, 2, 2, 2])
def test_get_indexer_numeric_index_boolean_target(self):
# GH 16877
numeric_index = pd.Index(range(4))
result = numeric_index.get_indexer([True, False, True])
expected = np.array([-1, -1, -1], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
def test_get_indexer_with_NA_values(self, unique_nulls_fixture,
unique_nulls_fixture2):
# GH 22332
# check pairwise, that no pair of na values
# is mangled
if unique_nulls_fixture is unique_nulls_fixture2:
return # skip it, values are not unique
arr = np.array([unique_nulls_fixture,
unique_nulls_fixture2], dtype=np.object)
index = pd.Index(arr, dtype=np.object)
result = index.get_indexer([unique_nulls_fixture,
unique_nulls_fixture2, 'Unknown'])
expected = np.array([0, 1, -1], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("method", [None, 'pad', 'backfill', 'nearest'])
def test_get_loc(self, method):
index = pd.Index([0, 1, 2])
assert index.get_loc(1, method=method) == 1
if method:
assert index.get_loc(1, method=method, tolerance=0) == 1
@pytest.mark.parametrize("method", [None, 'pad', 'backfill', 'nearest'])
def test_get_loc_raises_bad_label(self, method):
index = pd.Index([0, 1, 2])
if method:
# Messages vary across versions
if PY36:
msg = 'not supported between'
else:
msg = 'unorderable types'
else:
msg = 'invalid key'
with pytest.raises(TypeError, match=msg):
index.get_loc([1, 2], method=method)
@pytest.mark.parametrize("method,loc", [
('pad', 1), ('backfill', 2), ('nearest', 1)])
def test_get_loc_tolerance(self, method, loc):
index = pd.Index([0, 1, 2])
assert index.get_loc(1.1, method) == loc
assert index.get_loc(1.1, method, tolerance=1) == loc
@pytest.mark.parametrize("method", ['pad', 'backfill', 'nearest'])
def test_get_loc_outside_tolerance_raises(self, method):
index = pd.Index([0, 1, 2])
with pytest.raises(KeyError, match='1.1'):
index.get_loc(1.1, method, tolerance=0.05)
def test_get_loc_bad_tolerance_raises(self):
index = pd.Index([0, 1, 2])
with pytest.raises(ValueError, match='must be numeric'):
index.get_loc(1.1, 'nearest', tolerance='invalid')
def test_get_loc_tolerance_no_method_raises(self):
index = pd.Index([0, 1, 2])
with pytest.raises(ValueError, match='tolerance .* valid if'):
index.get_loc(1.1, tolerance=1)
def test_get_loc_raises_missized_tolerance(self):
index = pd.Index([0, 1, 2])
with pytest.raises(ValueError, match='tolerance size must match'):
index.get_loc(1.1, 'nearest', tolerance=[1, 1])
def test_get_loc_raises_object_nearest(self):
index = pd.Index(['a', 'c'])
with pytest.raises(TypeError, match='unsupported operand type'):
index.get_loc('a', method='nearest')
def test_get_loc_raises_object_tolerance(self):
index = pd.Index(['a', 'c'])
with pytest.raises(TypeError, match='unsupported operand type'):
index.get_loc('a', method='pad', tolerance='invalid')
@pytest.mark.parametrize("dtype", [int, float])
def test_slice_locs(self, dtype):
index = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype))
n = len(index)
assert index.slice_locs(start=2) == (2, n)
assert index.slice_locs(start=3) == (3, n)
assert index.slice_locs(3, 8) == (3, 6)
assert index.slice_locs(5, 10) == (3, n)
assert index.slice_locs(end=8) == (0, 6)
assert index.slice_locs(end=9) == (0, 7)
# reversed
index2 = index[::-1]
assert index2.slice_locs(8, 2) == (2, 6)
assert index2.slice_locs(7, 3) == (2, 5)
@pytest.mark.parametrize("dtype", [int, float])
def test_slice_float_locs(self, dtype):
index = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype))
n = len(index)
assert index.slice_locs(5.0, 10.0) == (3, n)
assert index.slice_locs(4.5, 10.5) == (3, 8)
index2 = index[::-1]
assert index2.slice_locs(8.5, 1.5) == (2, 6)
assert index2.slice_locs(10.5, -1) == (0, n)
def test_slice_locs_dup(self):
index = Index(['a', 'a', 'b', 'c', 'd', 'd'])
assert index.slice_locs('a', 'd') == (0, 6)
assert index.slice_locs(end='d') == (0, 6)
assert index.slice_locs('a', 'c') == (0, 4)
assert index.slice_locs('b', 'd') == (2, 6)
index2 = index[::-1]
assert index2.slice_locs('d', 'a') == (0, 6)
assert index2.slice_locs(end='a') == (0, 6)
assert index2.slice_locs('d', 'b') == (0, 4)
assert index2.slice_locs('c', 'a') == (2, 6)
@pytest.mark.parametrize("dtype", [int, float])
def test_slice_locs_dup_numeric(self, dtype):
index = Index(np.array([10, 12, 12, 14], dtype=dtype))
assert index.slice_locs(12, 12) == (1, 3)
assert index.slice_locs(11, 13) == (1, 3)
index2 = index[::-1]
assert index2.slice_locs(12, 12) == (1, 3)
assert index2.slice_locs(13, 11) == (1, 3)
def test_slice_locs_na(self):
index = Index([np.nan, 1, 2])
assert index.slice_locs(1) == (1, 3)
assert index.slice_locs(np.nan) == (0, 3)
index = Index([0, np.nan, np.nan, 1, 2])
assert index.slice_locs(np.nan) == (1, 5)
def test_slice_locs_na_raises(self):
index = Index([np.nan, 1, 2])
with pytest.raises(KeyError, match=''):
index.slice_locs(start=1.5)
with pytest.raises(KeyError, match=''):
index.slice_locs(end=1.5)
@pytest.mark.parametrize("in_slice,expected", [
(pd.IndexSlice[::-1], 'yxdcb'), (pd.IndexSlice['b':'y':-1], ''),
(pd.IndexSlice['b'::-1], 'b'), (pd.IndexSlice[:'b':-1], 'yxdcb'),
(pd.IndexSlice[:'y':-1], 'y'), (pd.IndexSlice['y'::-1], 'yxdcb'),
(pd.IndexSlice['y'::-4], 'yb'),
# absent labels
(pd.IndexSlice[:'a':-1], 'yxdcb'), (pd.IndexSlice[:'a':-2], 'ydb'),
(pd.IndexSlice['z'::-1], 'yxdcb'), (pd.IndexSlice['z'::-3], 'yc'),
(pd.IndexSlice['m'::-1], 'dcb'), (pd.IndexSlice[:'m':-1], 'yx'),
(pd.IndexSlice['a':'a':-1], ''), (pd.IndexSlice['z':'z':-1], ''),
(pd.IndexSlice['m':'m':-1], '')
])
def test_slice_locs_negative_step(self, in_slice, expected):
index = Index(list('bcdxy'))
s_start, s_stop = index.slice_locs(in_slice.start, in_slice.stop,
in_slice.step)
result = index[s_start:s_stop:in_slice.step]
expected = pd.Index(list(expected))
tm.assert_index_equal(result, expected)
def test_drop_by_str_label(self):
# TODO: Parametrize these after replacing self.strIndex with fixture
n = len(self.strIndex)
drop = self.strIndex[lrange(5, 10)]
dropped = self.strIndex.drop(drop)
expected = self.strIndex[lrange(5) + lrange(10, n)]
tm.assert_index_equal(dropped, expected)
dropped = self.strIndex.drop(self.strIndex[0])
expected = self.strIndex[1:]
tm.assert_index_equal(dropped, expected)
@pytest.mark.parametrize("keys", [['foo', 'bar'], ['1', 'bar']])
def test_drop_by_str_label_raises_missing_keys(self, keys):
with pytest.raises(KeyError, match=''):
self.strIndex.drop(keys)
def test_drop_by_str_label_errors_ignore(self):
# TODO: Parametrize these after replacing self.strIndex with fixture
# errors='ignore'
n = len(self.strIndex)
drop = self.strIndex[lrange(5, 10)]
mixed = drop.tolist() + ['foo']
dropped = self.strIndex.drop(mixed, errors='ignore')
expected = self.strIndex[lrange(5) + lrange(10, n)]
tm.assert_index_equal(dropped, expected)
dropped = self.strIndex.drop(['foo', 'bar'], errors='ignore')
expected = self.strIndex[lrange(n)]
tm.assert_index_equal(dropped, expected)
def test_drop_by_numeric_label_loc(self):
# TODO: Parametrize numeric and str tests after self.strIndex fixture
index = Index([1, 2, 3])
dropped = index.drop(1)
expected = Index([2, 3])
tm.assert_index_equal(dropped, expected)
def test_drop_by_numeric_label_raises_missing_keys(self):
index = Index([1, 2, 3])
with pytest.raises(KeyError, match=''):
index.drop([3, 4])
@pytest.mark.parametrize("key,expected", [
(4, Index([1, 2, 3])), ([3, 4, 5], Index([1, 2]))])
def test_drop_by_numeric_label_errors_ignore(self, key, expected):
index = Index([1, 2, 3])
dropped = index.drop(key, errors='ignore')
tm.assert_index_equal(dropped, expected)
@pytest.mark.parametrize("values", [['a', 'b', ('c', 'd')],
['a', ('c', 'd'), 'b'],
[('c', 'd'), 'a', 'b']])
@pytest.mark.parametrize("to_drop", [[('c', 'd'), 'a'], ['a', ('c', 'd')]])
def test_drop_tuple(self, values, to_drop):
# GH 18304
index = pd.Index(values)
expected = pd.Index(['b'])
result = index.drop(to_drop)
tm.assert_index_equal(result, expected)
removed = index.drop(to_drop[0])
for drop_me in to_drop[1], [to_drop[1]]:
result = removed.drop(drop_me)
tm.assert_index_equal(result, expected)
removed = index.drop(to_drop[1])
msg = r"\"\[{}\] not found in axis\"".format(
re.escape(to_drop[1].__repr__()))
for drop_me in to_drop[1], [to_drop[1]]:
with pytest.raises(KeyError, match=msg):
removed.drop(drop_me)
@pytest.mark.parametrize("method,expected,sort", [
('intersection', np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B')],
dtype=[('num', int), ('let', 'a1')]),
False),
('intersection', np.array([(1, 'A'), (1, 'B'), (2, 'A'), (2, 'B')],
dtype=[('num', int), ('let', 'a1')]),
None),
('union', np.array([(1, 'A'), (1, 'B'), (1, 'C'), (2, 'A'), (2, 'B'),
(2, 'C')], dtype=[('num', int), ('let', 'a1')]),
None)
])
def test_tuple_union_bug(self, method, expected, sort):
index1 = Index(np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B')],
dtype=[('num', int), ('let', 'a1')]))
index2 = Index(np.array([(1, 'A'), (2, 'A'), (1, 'B'),
(2, 'B'), (1, 'C'), (2, 'C')],
dtype=[('num', int), ('let', 'a1')]))
result = getattr(index1, method)(index2, sort=sort)
assert result.ndim == 1
expected = Index(expected)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("attr", [
'is_monotonic_increasing', 'is_monotonic_decreasing',
'_is_strictly_monotonic_increasing',
'_is_strictly_monotonic_decreasing'])
def test_is_monotonic_incomparable(self, attr):
index = Index([5, datetime.now(), 7])
assert not getattr(index, attr)
def test_get_set_value(self):
# TODO: Remove function? GH 19728
values = np.random.randn(100)
date = self.dateIndex[67]
assert_almost_equal(self.dateIndex.get_value(values, date), values[67])
self.dateIndex.set_value(values, date, 10)
assert values[67] == 10
@pytest.mark.parametrize("values", [
['foo', 'bar', 'quux'], {'foo', 'bar', 'quux'}])
@pytest.mark.parametrize("index,expected", [
(Index(['qux', 'baz', 'foo', 'bar']),
np.array([False, False, True, True])),
(Index([]), np.array([], dtype=bool)) # empty
])
def test_isin(self, values, index, expected):
result = index.isin(values)
tm.assert_numpy_array_equal(result, expected)
def test_isin_nan_common_object(self, nulls_fixture, nulls_fixture2):
# Test cartesian product of null fixtures and ensure that we don't
# mangle the various types (save a corner case with PyPy)
# all nans are the same
if (isinstance(nulls_fixture, float) and
isinstance(nulls_fixture2, float) and
math.isnan(nulls_fixture) and
math.isnan(nulls_fixture2)):
tm.assert_numpy_array_equal(Index(['a', nulls_fixture]).isin(
[nulls_fixture2]), np.array([False, True]))
elif nulls_fixture is nulls_fixture2: # should preserve NA type
tm.assert_numpy_array_equal(Index(['a', nulls_fixture]).isin(
[nulls_fixture2]), np.array([False, True]))
else:
tm.assert_numpy_array_equal(Index(['a', nulls_fixture]).isin(
[nulls_fixture2]), np.array([False, False]))
def test_isin_nan_common_float64(self, nulls_fixture):
if nulls_fixture is pd.NaT:
pytest.skip("pd.NaT not compatible with Float64Index")
# Float64Index overrides isin, so must be checked separately
tm.assert_numpy_array_equal(Float64Index([1.0, nulls_fixture]).isin(
[np.nan]), np.array([False, True]))
# we cannot compare NaT with NaN
tm.assert_numpy_array_equal(Float64Index([1.0, nulls_fixture]).isin(
[pd.NaT]), np.array([False, False]))
@pytest.mark.parametrize("level", [0, -1])
@pytest.mark.parametrize("index", [
Index(['qux', 'baz', 'foo', 'bar']),
# Float64Index overrides isin, so must be checked separately
Float64Index([1.0, 2.0, 3.0, 4.0])])
def test_isin_level_kwarg(self, level, index):
values = index.tolist()[-2:] + ['nonexisting']
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(expected, index.isin(values, level=level))
index.name = 'foobar'
tm.assert_numpy_array_equal(expected,
index.isin(values, level='foobar'))
@pytest.mark.parametrize("level", [1, 10, -2])
@pytest.mark.parametrize("index", [
Index(['qux', 'baz', 'foo', 'bar']),
# Float64Index overrides isin, so must be checked separately
Float64Index([1.0, 2.0, 3.0, 4.0])])
def test_isin_level_kwarg_raises_bad_index(self, level, index):
with pytest.raises(IndexError, match='Too many levels'):
index.isin([], level=level)
@pytest.mark.parametrize("level", [1.0, 'foobar', 'xyzzy', np.nan])
@pytest.mark.parametrize("index", [
Index(['qux', 'baz', 'foo', 'bar']),
Float64Index([1.0, 2.0, 3.0, 4.0])])
def test_isin_level_kwarg_raises_key(self, level, index):
with pytest.raises(KeyError, match='must be same as name'):
index.isin([], level=level)
@pytest.mark.parametrize("empty", [[], Series(), np.array([])])
def test_isin_empty(self, empty):
# see gh-16991
index = Index(["a", "b"])
expected = np.array([False, False])
result = index.isin(empty)
tm.assert_numpy_array_equal(expected, result)
@pytest.mark.parametrize("values", [
[1, 2, 3, 4],
[1., 2., 3., 4.],
[True, True, True, True],
["foo", "bar", "baz", "qux"],
pd.date_range('2018-01-01', freq='D', periods=4)])
def test_boolean_cmp(self, values):
index = Index(values)
result = (index == values)
expected = np.array([True, True, True, True], dtype=bool)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("name,level", [
(None, 0), ('a', 'a')])
def test_get_level_values(self, name, level):
expected = self.strIndex.copy()
if name:
expected.name = name
result = expected.get_level_values(level)
tm.assert_index_equal(result, expected)
def test_slice_keep_name(self):
index = Index(['a', 'b'], name='asdf')
assert index.name == index[1:].name
# instance attributes of the form self.<name>Index
@pytest.mark.parametrize('index_kind',
['unicode', 'str', 'date', 'int', 'float'])
def test_join_self(self, join_type, index_kind):
res = getattr(self, '{0}Index'.format(index_kind))
joined = res.join(res, how=join_type)
assert res is joined
@pytest.mark.parametrize("method", ['strip', 'rstrip', 'lstrip'])
def test_str_attribute(self, method):
# GH9068
index = Index([' jack', 'jill ', ' jesse ', 'frank'])
expected = Index([getattr(str, method)(x) for x in index.values])
result = getattr(index.str, method)()
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("index", [
Index(range(5)), tm.makeDateIndex(10),
MultiIndex.from_tuples([('foo', '1'), ('bar', '3')]),
period_range(start='2000', end='2010', freq='A')])
def test_str_attribute_raises(self, index):
with pytest.raises(AttributeError, match='only use .str accessor'):
index.str.repeat(2)
@pytest.mark.parametrize("expand,expected", [
(None, Index([['a', 'b', 'c'], ['d', 'e'], ['f']])),
(False, Index([['a', 'b', 'c'], ['d', 'e'], ['f']])),
(True, MultiIndex.from_tuples([('a', 'b', 'c'), ('d', 'e', np.nan),
('f', np.nan, np.nan)]))])
def test_str_split(self, expand, expected):
index = Index(['a b c', 'd e', 'f'])
if expand is not None:
result = index.str.split(expand=expand)
else:
result = index.str.split()
tm.assert_index_equal(result, expected)
def test_str_bool_return(self):
# test boolean case, should return np.array instead of boolean Index
index = Index(['a1', 'a2', 'b1', 'b2'])
result = index.str.startswith('a')
expected = np.array([True, True, False, False])
tm.assert_numpy_array_equal(result, expected)
assert isinstance(result, np.ndarray)
def test_str_bool_series_indexing(self):
index = Index(['a1', 'a2', 'b1', 'b2'])
s = Series(range(4), index=index)
result = s[s.index.str.startswith('a')]
expected = Series(range(2), index=['a1', 'a2'])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("index,expected", [
(Index(list('abcd')), True), (Index(range(4)), False)])
def test_tab_completion(self, index, expected):
# GH 9910
result = 'str' in dir(index)
assert result == expected
def test_indexing_doesnt_change_class(self):
index = Index([1, 2, 3, 'a', 'b', 'c'])
assert index[1:3].identical(pd.Index([2, 3], dtype=np.object_))
assert index[[0, 1]].identical(pd.Index([1, 2], dtype=np.object_))
def test_outer_join_sort(self):
left_index = Index(np.random.permutation(15))
right_index = tm.makeDateIndex(10)
with tm.assert_produces_warning(RuntimeWarning):
result = left_index.join(right_index, how='outer')
# right_index in this case because DatetimeIndex has join precedence
# over Int64Index
with tm.assert_produces_warning(RuntimeWarning):
expected = right_index.astype(object).union(
left_index.astype(object))
tm.assert_index_equal(result, expected)
def test_nan_first_take_datetime(self):
index = Index([pd.NaT, Timestamp('20130101'), Timestamp('20130102')])
result = index.take([-1, 0, 1])
expected = Index([index[-1], index[0], index[1]])
tm.assert_index_equal(result, expected)
def test_take_fill_value(self):
# GH 12631
index = pd.Index(list('ABC'), name='xxx')
result = index.take(np.array([1, 0, -1]))
expected = pd.Index(list('BAC'), name='xxx')
tm.assert_index_equal(result, expected)
# fill_value
result = index.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.Index(['B', 'A', np.nan], name='xxx')
tm.assert_index_equal(result, expected)
# allow_fill=False
result = index.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
expected = pd.Index(['B', 'A', 'C'], name='xxx')
tm.assert_index_equal(result, expected)
def test_take_fill_value_none_raises(self):
index = pd.Index(list('ABC'), name='xxx')
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with pytest.raises(ValueError, match=msg):
index.take(np.array([1, 0, -2]), fill_value=True)
with pytest.raises(ValueError, match=msg):
index.take(np.array([1, 0, -5]), fill_value=True)
def test_take_bad_bounds_raises(self):
index = pd.Index(list('ABC'), name='xxx')
with pytest.raises(IndexError, match='out of bounds'):
index.take(np.array([1, -5]))
@pytest.mark.parametrize("name", [None, 'foobar'])
@pytest.mark.parametrize("labels", [
[], np.array([]), ['A', 'B', 'C'], ['C', 'B', 'A'],
np.array(['A', 'B', 'C']), np.array(['C', 'B', 'A']),
# Must preserve name even if dtype changes
pd.date_range('20130101', periods=3).values,
pd.date_range('20130101', periods=3).tolist()])
def test_reindex_preserves_name_if_target_is_list_or_ndarray(self, name,
labels):
# GH6552
index = pd.Index([0, 1, 2])
index.name = name
assert index.reindex(labels)[0].name == name
@pytest.mark.parametrize("labels", [
[], np.array([]), np.array([], dtype=np.int64)])
def test_reindex_preserves_type_if_target_is_empty_list_or_array(self,
labels):
# GH7774
index = pd.Index(list('abc'))
assert index.reindex(labels)[0].dtype.type == np.object_
@pytest.mark.parametrize("labels,dtype", [
(pd.Int64Index([]), np.int64),
(pd.Float64Index([]), np.float64),
(pd.DatetimeIndex([]), np.datetime64)])
def test_reindex_doesnt_preserve_type_if_target_is_empty_index(self,
labels,
dtype):
# GH7774
index = pd.Index(list('abc'))
assert index.reindex(labels)[0].dtype.type == dtype
def test_reindex_no_type_preserve_target_empty_mi(self):
index = pd.Index(list('abc'))
result = index.reindex(pd.MultiIndex(
[pd.Int64Index([]), pd.Float64Index([])], [[], []]))[0]
assert result.levels[0].dtype.type == np.int64
assert result.levels[1].dtype.type == np.float64
def test_groupby(self):
index = Index(range(5))
result = index.groupby(np.array([1, 1, 2, 2, 2]))
expected = {1: pd.Index([0, 1]), 2: pd.Index([2, 3, 4])}
tm.assert_dict_equal(result, expected)
@pytest.mark.parametrize("mi,expected", [
(MultiIndex.from_tuples([(1, 2), (4, 5)]), np.array([True, True])),
(MultiIndex.from_tuples([(1, 2), (4, 6)]), np.array([True, False]))])
def test_equals_op_multiindex(self, mi, expected):
# GH9785
# test comparisons of multiindex
df = pd.read_csv(StringIO('a,b,c\n1,2,3\n4,5,6'), index_col=[0, 1])
result = df.index == mi
tm.assert_numpy_array_equal(result, expected)
def test_equals_op_multiindex_identify(self):
df = pd.read_csv(StringIO('a,b,c\n1,2,3\n4,5,6'), index_col=[0, 1])
result = df.index == df.index
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("index", [
MultiIndex.from_tuples([(1, 2), (4, 5), (8, 9)]),
Index(['foo', 'bar', 'baz'])])
def test_equals_op_mismatched_multiindex_raises(self, index):
df = pd.read_csv(StringIO('a,b,c\n1,2,3\n4,5,6'), index_col=[0, 1])
with pytest.raises(ValueError, match="Lengths must match"):
df.index == index
def test_equals_op_index_vs_mi_same_length(self):
mi = MultiIndex.from_tuples([(1, 2), (4, 5), (8, 9)])
index = Index(['foo', 'bar', 'baz'])
result = mi == index
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dt_conv", [
pd.to_datetime, pd.to_timedelta])
def test_dt_conversion_preserves_name(self, dt_conv):
# GH 10875
index = pd.Index(['01:02:03', '01:02:04'], name='label')
assert index.name == dt_conv(index).name
@pytest.mark.parametrize("index,expected", [
# ASCII
# short
(pd.Index(['a', 'bb', 'ccc']),
"""Index(['a', 'bb', 'ccc'], dtype='object')"""),
# multiple lines
(pd.Index(['a', 'bb', 'ccc'] * 10),
"""\
Index(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc',
'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc',
'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],
dtype='object')"""),
# truncated
(pd.Index(['a', 'bb', 'ccc'] * 100),
"""\
Index(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a',
...
'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],
dtype='object', length=300)"""),
# Non-ASCII
# short
(pd.Index(['あ', 'いい', 'ううう']),
"""Index(['あ', 'いい', 'ううう'], dtype='object')"""),
# multiple lines
(pd.Index(['あ', 'いい', 'ううう'] * 10),
("Index(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', "
"'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n"
" 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', "
"'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n"
" 'あ', 'いい', 'ううう', 'あ', 'いい', "
"'ううう'],\n"
" dtype='object')")),
# truncated
(pd.Index(['あ', 'いい', 'ううう'] * 100),
("Index(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', "
"'あ', 'いい', 'ううう', 'あ',\n"
" ...\n"
" 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', "
"'ううう', 'あ', 'いい', 'ううう'],\n"
" dtype='object', length=300)"))])
def test_string_index_repr(self, index, expected):
result = repr(index)
assert result == expected
@pytest.mark.parametrize("index,expected", [
# short
(pd.Index(['あ', 'いい', 'ううう']),
("Index(['あ', 'いい', 'ううう'], "
"dtype='object')")),
# multiple lines
(pd.Index(['あ', 'いい', 'ううう'] * 10),
("Index(['あ', 'いい', 'ううう', 'あ', 'いい', "
"'ううう', 'あ', 'いい', 'ううう',\n"
" 'あ', 'いい', 'ううう', 'あ', 'いい', "
"'ううう', 'あ', 'いい', 'ううう',\n"
" 'あ', 'いい', 'ううう', 'あ', 'いい', "
"'ううう', 'あ', 'いい', 'ううう',\n"
" 'あ', 'いい', 'ううう'],\n"
" dtype='object')""")),
# truncated
(pd.Index(['あ', 'いい', 'ううう'] * 100),
("Index(['あ', 'いい', 'ううう', 'あ', 'いい', "
"'ううう', 'あ', 'いい', 'ううう',\n"
" 'あ',\n"
" ...\n"
" 'ううう', 'あ', 'いい', 'ううう', 'あ', "
"'いい', 'ううう', 'あ', 'いい',\n"
" 'ううう'],\n"
" dtype='object', length=300)"))])
def test_string_index_repr_with_unicode_option(self, index, expected):
# Enable Unicode option -----------------------------------------
with cf.option_context('display.unicode.east_asian_width', True):
result = repr(index)
assert result == expected
def test_cached_properties_not_settable(self):
index = pd.Index([1, 2, 3])
with pytest.raises(AttributeError, match="Can't set attribute"):
index.is_unique = False
def test_get_duplicates_deprecated(self):
index = pd.Index([1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
index.get_duplicates()
def test_tab_complete_warning(self, ip):
# https://github.com/pandas-dev/pandas/issues/16409
pytest.importorskip('IPython', minversion="6.0.0")
from IPython.core.completer import provisionalcompleter
code = "import pandas as pd; idx = pd.Index([1, 2])"
ip.run_code(code)
with tm.assert_produces_warning(None):
with provisionalcompleter('ignore'):
list(ip.Completer.completions('idx.', 4))
class TestMixedIntIndex(Base):
# Mostly the tests from common.py for which the results differ
# in py2 and py3 because ints and strings are uncomparable in py3
# (GH 13514)
_holder = Index
def setup_method(self, method):
self.indices = dict(mixedIndex=Index([0, 'a', 1, 'b', 2, 'c']))
self.setup_indices()
def create_index(self):
return self.mixedIndex
def test_argsort(self):
index = self.create_index()
if PY36:
with pytest.raises(TypeError, match="'>|<' not supported"):
index.argsort()
else:
with pytest.raises(TypeError, match="unorderable types"):
index.argsort()
def test_numpy_argsort(self):
index = self.create_index()
if PY36:
with pytest.raises(TypeError, match="'>|<' not supported"):
np.argsort(index)
else:
with pytest.raises(TypeError, match="unorderable types"):
np.argsort(index)
def test_copy_name(self):
# Check that "name" argument passed at initialization is honoured
# GH12309
index = self.create_index()
first = index.__class__(index, copy=True, name='mario')
second = first.__class__(first, copy=False)
# Even though "copy=False", we want a new object.
assert first is not second
tm.assert_index_equal(first, second)
assert first.name == 'mario'
assert second.name == 'mario'
s1 = Series(2, index=first)
s2 = Series(3, index=second[:-1])
s3 = s1 * s2
assert s3.index.name == 'mario'
def test_copy_name2(self):
# Check that adding a "name" parameter to the copy is honored
# GH14302
index = pd.Index([1, 2], name='MyName')
index1 = index.copy()
tm.assert_index_equal(index, index1)
index2 = index.copy(name='NewName')
tm.assert_index_equal(index, index2, check_names=False)
assert index.name == 'MyName'
assert index2.name == 'NewName'
index3 = index.copy(names=['NewName'])
tm.assert_index_equal(index, index3, check_names=False)
assert index.name == 'MyName'
assert index.names == ['MyName']
assert index3.name == 'NewName'
assert index3.names == ['NewName']
def test_union_base(self):
index = self.create_index()
first = index[3:]
second = index[:5]
result = first.union(second)
expected = Index([0, 1, 2, 'a', 'b', 'c'])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("klass", [
np.array, Series, list])
def test_union_different_type_base(self, klass):
# GH 10149
index = self.create_index()
first = index[3:]
second = index[:5]
result = first.union(klass(second.values))
assert tm.equalContents(result, index)
def test_unique_na(self):
idx = pd.Index([2, np.nan, 2, 1], name='my_index')
expected = pd.Index([2, np.nan, 1], name='my_index')
result = idx.unique()
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("sort", [None, False])
def test_intersection_base(self, sort):
# (same results for py2 and py3 but sortedness not tested elsewhere)
index = self.create_index()
first = index[:5]
second = index[:3]
expected = Index([0, 1, 'a']) if sort is None else Index([0, 'a', 1])
result = first.intersection(second, sort=sort)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("klass", [
np.array, Series, list])
@pytest.mark.parametrize("sort", [None, False])
def test_intersection_different_type_base(self, klass, sort):
# GH 10149
index = self.create_index()
first = index[:5]
second = index[:3]
result = first.intersection(klass(second.values), sort=sort)
assert tm.equalContents(result, second)
@pytest.mark.parametrize("sort", [None, False])
def test_difference_base(self, sort):
# (same results for py2 and py3 but sortedness not tested elsewhere)
index = self.create_index()
first = index[:4]
second = index[3:]
result = first.difference(second, sort)
expected = Index([0, 'a', 1])
if sort is None:
expected = Index(safe_sort(expected))
tm.assert_index_equal(result, expected)
def test_symmetric_difference(self):
# (same results for py2 and py3 but sortedness not tested elsewhere)
index = self.create_index()
first = index[:4]
second = index[3:]
result = first.symmetric_difference(second)
expected = Index([0, 1, 2, 'a', 'c'])
tm.assert_index_equal(result, expected)
def test_logical_compat(self):
index = self.create_index()
assert index.all() == index.values.all()
assert index.any() == index.values.any()
@pytest.mark.parametrize("how", ['any', 'all'])
@pytest.mark.parametrize("dtype", [
None, object, 'category'])
@pytest.mark.parametrize("vals,expected", [
([1, 2, 3], [1, 2, 3]), ([1., 2., 3.], [1., 2., 3.]),
([1., 2., np.nan, 3.], [1., 2., 3.]),
(['A', 'B', 'C'], ['A', 'B', 'C']),
(['A', np.nan, 'B', 'C'], ['A', 'B', 'C'])])
def test_dropna(self, how, dtype, vals, expected):
# GH 6194
index = pd.Index(vals, dtype=dtype)
result = index.dropna(how=how)
expected = pd.Index(expected, dtype=dtype)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("how", ['any', 'all'])
@pytest.mark.parametrize("index,expected", [
(pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03']),
pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'])),
(pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03', pd.NaT]),
pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'])),
(pd.TimedeltaIndex(['1 days', '2 days', '3 days']),
pd.TimedeltaIndex(['1 days', '2 days', '3 days'])),
(pd.TimedeltaIndex([pd.NaT, '1 days', '2 days', '3 days', pd.NaT]),
pd.TimedeltaIndex(['1 days', '2 days', '3 days'])),
(pd.PeriodIndex(['2012-02', '2012-04', '2012-05'], freq='M'),
pd.PeriodIndex(['2012-02', '2012-04', '2012-05'], freq='M')),
(pd.PeriodIndex(['2012-02', '2012-04', 'NaT', '2012-05'], freq='M'),
pd.PeriodIndex(['2012-02', '2012-04', '2012-05'], freq='M'))])
def test_dropna_dt_like(self, how, index, expected):
result = index.dropna(how=how)
tm.assert_index_equal(result, expected)
def test_dropna_invalid_how_raises(self):
msg = "invalid how option: xxx"
with pytest.raises(ValueError, match=msg):
pd.Index([1, 2, 3]).dropna(how='xxx')
def test_get_combined_index(self):
result = _get_combined_index([])
expected = Index([])
tm.assert_index_equal(result, expected)
def test_repeat(self):
repeats = 2
index = pd.Index([1, 2, 3])
expected = pd.Index([1, 1, 2, 2, 3, 3])
result = index.repeat(repeats)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("index", [
pd.Index([np.nan]), pd.Index([np.nan, 1]),
pd.Index([1, 2, np.nan]), pd.Index(['a', 'b', np.nan]),
pd.to_datetime(['NaT']), pd.to_datetime(['NaT', '2000-01-01']),
pd.to_datetime(['2000-01-01', 'NaT', '2000-01-02']),
pd.to_timedelta(['1 day', 'NaT'])])
def test_is_monotonic_na(self, index):
assert index.is_monotonic_increasing is False
assert index.is_monotonic_decreasing is False
assert index._is_strictly_monotonic_increasing is False
assert index._is_strictly_monotonic_decreasing is False
def test_repr_summary(self):
with cf.option_context('display.max_seq_items', 10):
result = repr(pd.Index(np.arange(1000)))
assert len(result) < 200
assert "..." in result
@pytest.mark.parametrize("klass", [Series, DataFrame])
def test_int_name_format(self, klass):
index = Index(['a', 'b', 'c'], name=0)
result = klass(lrange(3), index=index)
assert '0' in repr(result)
def test_print_unicode_columns(self):
df = pd.DataFrame({"\u05d0": [1, 2, 3],
"\u05d1": [4, 5, 6],
"c": [7, 8, 9]})
repr(df.columns) # should not raise UnicodeDecodeError
@pytest.mark.parametrize("func", [str, bytes])
def test_with_unicode(self, func):
index = Index(lrange(1000))
func(index)
def test_intersect_str_dates(self):
dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]
index1 = Index(dt_dates, dtype=object)
index2 = Index(['aa'], dtype=object)
result = index2.intersection(index1)
expected = Index([], dtype=object)
tm.assert_index_equal(result, expected)
class TestIndexUtils:
@pytest.mark.parametrize('data, names, expected', [
([[1, 2, 3]], None, Index([1, 2, 3])),
([[1, 2, 3]], ['name'], Index([1, 2, 3], name='name')),
([['a', 'a'], ['c', 'd']], None,
MultiIndex([['a'], ['c', 'd']], [[0, 0], [0, 1]])),
([['a', 'a'], ['c', 'd']], ['L1', 'L2'],
MultiIndex([['a'], ['c', 'd']], [[0, 0], [0, 1]],
names=['L1', 'L2'])),
])
def test_ensure_index_from_sequences(self, data, names, expected):
result = ensure_index_from_sequences(data, names)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('opname', ['eq', 'ne', 'le', 'lt', 'ge', 'gt',
'add', 'radd', 'sub', 'rsub',
'mul', 'rmul', 'truediv', 'rtruediv',
'floordiv', 'rfloordiv',
'pow', 'rpow', 'mod', 'divmod'])
def test_generated_op_names(opname, indices):
index = indices
if isinstance(index, ABCIndex) and opname == 'rsub':
# pd.Index.__rsub__ does not exist; though the method does exist
# for subclasses. see GH#19723
return
opname = '__{name}__'.format(name=opname)
method = getattr(index, opname)
assert method.__name__ == opname
@pytest.mark.parametrize('index_maker', tm.index_subclass_makers_generator())
def test_index_subclass_constructor_wrong_kwargs(index_maker):
# GH #19348
with pytest.raises(TypeError, match='unexpected keyword argument'):
index_maker(foo='bar')
def test_deprecated_fastpath():
with tm.assert_produces_warning(FutureWarning):
idx = pd.Index(
np.array(['a', 'b'], dtype=object), name='test', fastpath=True)
expected = pd.Index(['a', 'b'], name='test')
tm.assert_index_equal(idx, expected)
with tm.assert_produces_warning(FutureWarning):
idx = pd.Int64Index(
np.array([1, 2, 3], dtype='int64'), name='test', fastpath=True)
expected = pd.Index([1, 2, 3], name='test', dtype='int64')
tm.assert_index_equal(idx, expected)
with tm.assert_produces_warning(FutureWarning):
idx = pd.RangeIndex(0, 5, 2, name='test', fastpath=True)
expected = pd.RangeIndex(0, 5, 2, name='test')
tm.assert_index_equal(idx, expected)
with tm.assert_produces_warning(FutureWarning):
idx = pd.CategoricalIndex(['a', 'b', 'c'], name='test', fastpath=True)
expected = pd.CategoricalIndex(['a', 'b', 'c'], name='test')
tm.assert_index_equal(idx, expected)
|
the-stack_106_22507 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Run list encoding utilities.
.. versionadded:: 1.1
'''
from builtins import str
from builtins import zip
from builtins import next
from builtins import object
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
class _Run(object):
def __init__(self, value, count):
self.value = value
self.count = count
def __repr__(self):
return 'Run(%r, %d)' % (self.value, self.count)
class RunList(object):
'''List of contiguous runs of values.
A `RunList` is an efficient encoding of a sequence of values. For
example, the sequence ``aaaabbccccc`` is encoded as ``(4, a), (2, b),
(5, c)``. The class provides methods for modifying and querying the
run list without needing to deal with the tricky cases of splitting and
merging the run list entries.
Run lists are used to represent formatted character data in pyglet. A
separate run list is maintained for each style attribute, for example,
bold, italic, font size, and so on. Unless you are overriding the
document interfaces, the only interaction with run lists is via
`RunIterator`.
The length and ranges of a run list always refer to the character
positions in the decoded list. For example, in the above sequence,
``set_run(2, 5, 'x')`` would change the sequence to ``aaxxxbccccc``.
'''
def __init__(self, size, initial):
'''Create a run list of the given size and a default value.
:Parameters:
`size` : int
Number of characters to represent initially.
`initial` : object
The value of all characters in the run list.
'''
self.runs = [_Run(initial, size)]
def insert(self, pos, length):
'''Insert characters into the run list.
The inserted characters will take on the value immediately preceding
the insertion point (or the value of the first character, if `pos` is
0).
:Parameters:
`pos` : int
Insertion index
`length` : int
Number of characters to insert.
'''
i = 0
for run in self.runs:
if i <= pos <= i + run.count:
run.count += length
i += run.count
def delete(self, start, end):
'''Remove characters from the run list.
:Parameters:
`start` : int
Starting index to remove from.
`end` : int
End index, exclusive.
'''
i = 0
for run in self.runs:
if end - start == 0:
break
if i <= start <= i + run.count:
trim = min(end - start, i + run.count - start)
run.count -= trim
end -= trim
i += run.count
self.runs = [r for r in self.runs if r.count > 0]
# Don't leave an empty list
if not self.runs:
self.runs = [_Run(run.value, 0)]
def set_run(self, start, end, value):
'''Set the value of a range of characters.
:Parameters:
`start` : int
Start index of range.
`end` : int
End of range, exclusive.
`value` : object
Value to set over the range.
'''
if end - start <= 0:
return
# Find runs that need to be split
i = 0
start_i = None
start_trim = 0
end_i = None
end_trim = 0
for run_i, run in enumerate(self.runs):
count = run.count
if i < start < i + count:
start_i = run_i
start_trim = start - i
if i < end < i + count:
end_i = run_i
end_trim = end - i
i += count
# Split runs
if start_i is not None:
run = self.runs[start_i]
self.runs.insert(start_i, _Run(run.value, start_trim))
run.count -= start_trim
if end_i is not None:
if end_i == start_i:
end_trim -= start_trim
end_i += 1
if end_i is not None:
run = self.runs[end_i]
self.runs.insert(end_i, _Run(run.value, end_trim))
run.count -= end_trim
# Set new value on runs
i = 0
for run in self.runs:
if start <= i and i + run.count <= end:
run.value = value
i += run.count
# Merge adjacent runs
last_run = self.runs[0]
for run in self.runs[1:]:
if run.value == last_run.value:
run.count += last_run.count
last_run.count = 0
last_run = run
# Delete collapsed runs
self.runs = [r for r in self.runs if r.count > 0]
def __iter__(self):
i = 0
for run in self.runs:
yield i, i + run.count, run.value
i += run.count
def get_run_iterator(self):
'''Get an extended iterator over the run list.
:rtype: `RunIterator`
'''
return RunIterator(self)
def __getitem__(self, index):
'''Get the value at a character position.
:Parameters:
`index` : int
Index of character. Must be within range and non-negative.
:rtype: object
'''
i = 0
for run in self.runs:
if i <= index < i + run.count:
return run.value
i += run.count
# Append insertion point
if index == i:
return self.runs[-1].value
assert False, 'Index not in range'
def __repr__(self):
return str(list(self))
class AbstractRunIterator(object):
'''Range iteration over `RunList`.
`AbstractRunIterator` objects allow any monotonically non-decreasing
access of the iteration, including repeated iteration over the same index.
Use the ``[index]`` operator to get the value at a particular index within
the document. For example::
run_iter = iter(run_list)
value = run_iter[0]
value = run_iter[0] # non-decreasing access is OK
value = run_iter[15]
value = run_iter[17]
value = run_iter[16] # this is illegal, the index decreased.
Using `AbstractRunIterator` to access increasing indices of the value runs
is more efficient than calling `RunList.__getitem__` repeatedly.
You can also iterate over monotonically non-decreasing ranges over the
iteration. For example::
run_iter = iter(run_list)
for start, end, value in run_iter.ranges(0, 20):
pass
for start, end, value in run_iter.ranges(25, 30):
pass
for start, end, value in run_iter.ranges(30, 40):
pass
Both start and end indices of the slice are required and must be positive.
'''
def __getitem__(self, index):
'''Get the value at a given index.
See the class documentation for examples of valid usage.
:Parameters:
`index` : int
Document position to query.
:rtype: object
'''
def ranges(self, start, end):
'''Iterate over a subrange of the run list.
See the class documentation for examples of valid usage.
:Parameters:
`start` : int
Start index to iterate from.
`end` : int
End index, exclusive.
:rtype: iterator
:return: Iterator over (start, end, value) tuples.
'''
class RunIterator(AbstractRunIterator):
def __init__(self, run_list):
self._run_list_iter = iter(run_list)
self.start, self.end, self.value = next(self)
def __next__(self):
return next(self._run_list_iter)
def __getitem__(self, index):
while index >= self.end and index > self.start:
# condition has special case for 0-length run (fixes issue 471)
self.start, self.end, self.value = next(self)
return self.value
def ranges(self, start, end):
while start >= self.end:
self.start, self.end, self.value = next(self)
yield start, min(self.end, end), self.value
while end > self.end:
self.start, self.end, self.value = next(self)
yield self.start, min(self.end, end), self.value
class OverriddenRunIterator(AbstractRunIterator):
'''Iterator over a `RunIterator`, with a value temporarily replacing
a given range.
'''
def __init__(self, base_iterator, start, end, value):
'''Create a derived iterator.
:Parameters:
`start` : int
Start of range to override
`end` : int
End of range to override, exclusive
`value` : object
Value to replace over the range
'''
self.iter = base_iterator
self.override_start = start
self.override_end = end
self.override_value = value
def ranges(self, start, end):
if end <= self.override_start or start >= self.override_end:
# No overlap
for r in self.iter.ranges(start, end):
yield r
else:
# Overlap: before, override, after
if start < self.override_start < end:
for r in self.iter.ranges(start, self.override_start):
yield r
yield (max(self.override_start, start),
min(self.override_end, end),
self.override_value)
if start < self.override_end < end:
for r in self.iter.ranges(self.override_end, end):
yield r
def __getitem__(self, index):
if self.override_start <= index < self.override_end:
return self.override_value
else:
return self.iter[index]
class FilteredRunIterator(AbstractRunIterator):
'''Iterate over an `AbstractRunIterator` with filtered values replaced
by a default value.
'''
def __init__(self, base_iterator, filter, default):
'''Create a filtered run iterator.
:Parameters:
`base_iterator` : `AbstractRunIterator`
Source of runs.
`filter` : ``lambda object: bool``
Function taking a value as parameter, and returning ``True``
if the value is acceptable, and ``False`` if the default value
should be substituted.
`default` : object
Default value to replace filtered values.
'''
self.iter = base_iterator
self.filter = filter
self.default = default
def ranges(self, start, end):
for start, end, value in self.iter.ranges(start, end):
if self.filter(value):
yield start, end, value
else:
yield start, end, self.default
def __getitem__(self, index):
value = self.iter[index]
if self.filter(value):
return value
return self.default
class ZipRunIterator(AbstractRunIterator):
'''Iterate over multiple run iterators concurrently.'''
def __init__(self, range_iterators):
self.range_iterators = range_iterators
def ranges(self, start, end):
iterators = [i.ranges(start, end) for i in self.range_iterators]
starts, ends, values = zip(*[next(i) for i in iterators])
starts = list(starts)
ends = list(ends)
values = list(values)
while start < end:
min_end = min(ends)
yield start, min_end, values
start = min_end
for i, iterator in enumerate(iterators):
if ends[i] == min_end:
starts[i], ends[i], values[i] = next(iterator)
def __getitem__(self, index):
return [i[index] for i in self.range_iterators]
class ConstRunIterator(AbstractRunIterator):
'''Iterate over a constant value without creating a RunList.'''
def __init__(self, length, value):
self.length = length
self.value = value
def __next__(self):
yield 0, self.length, self.value
def ranges(self, start, end):
yield start, end, self.value
def __getitem__(self, index):
return self.value
|
the-stack_106_22508 | # Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2018 Dan Tès <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from pyrogram.api.core import *
class InputBotInlineResultPhoto(Object):
"""Attributes:
ID: ``0xa8d864a7``
Args:
id: ``str``
type: ``str``
photo: Either :obj:`InputPhotoEmpty <pyrogram.api.types.InputPhotoEmpty>` or :obj:`InputPhoto <pyrogram.api.types.InputPhoto>`
send_message: Either :obj:`InputBotInlineMessageMediaAuto <pyrogram.api.types.InputBotInlineMessageMediaAuto>`, :obj:`InputBotInlineMessageText <pyrogram.api.types.InputBotInlineMessageText>`, :obj:`InputBotInlineMessageMediaGeo <pyrogram.api.types.InputBotInlineMessageMediaGeo>`, :obj:`InputBotInlineMessageMediaVenue <pyrogram.api.types.InputBotInlineMessageMediaVenue>`, :obj:`InputBotInlineMessageMediaContact <pyrogram.api.types.InputBotInlineMessageMediaContact>` or :obj:`InputBotInlineMessageGame <pyrogram.api.types.InputBotInlineMessageGame>`
"""
ID = 0xa8d864a7
def __init__(self, id: str, type: str, photo, send_message):
self.id = id # string
self.type = type # string
self.photo = photo # InputPhoto
self.send_message = send_message # InputBotInlineMessage
@staticmethod
def read(b: BytesIO, *args) -> "InputBotInlineResultPhoto":
# No flags
id = String.read(b)
type = String.read(b)
photo = Object.read(b)
send_message = Object.read(b)
return InputBotInlineResultPhoto(id, type, photo, send_message)
def write(self) -> bytes:
b = BytesIO()
b.write(Int(self.ID, False))
# No flags
b.write(String(self.id))
b.write(String(self.type))
b.write(self.photo.write())
b.write(self.send_message.write())
return b.getvalue()
|
the-stack_106_22509 | import math
import torch
from torch import nn
class PositionalEncoding(nn.Module):
"""
Implementation of the positional encoding from Vaswani et al. 2017
"""
def __init__(self, d_model, dropout=0., max_len=5000, affinity=False, batch_first=True):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
if affinity:
self.affinity = nn.Linear(d_model, d_model)
else:
self.affinity = None
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer('pe', pe)
self.batch_first = batch_first
def forward(self, x):
if self.affinity is not None:
x = self.affinity(x)
pe = self.pe[:x.size(1), :] if self.batch_first else self.pe[:x.size(0), :]
x = x + pe
return self.dropout(x) |
the-stack_106_22510 | #!/usr/bin/env python3
#coding=utf-8
import numpy as np
import time
from math import cos, sin, sqrt, pi, atan, asin, atan, atan2
from scipy.optimize import least_squares
from scipy.spatial.transform import Rotation
from std_msgs.msg import String
from angles import normalize_angle
import rospy
from origarm_ros.srv import ik
from origarm_ros.msg import *
import traceback
class softArm(object):
def __init__(self, c1 =6 * 400 * 0.09 / 2.3e-3 / 2, lengthD=0.403,length=0.403
,length0=0.403,alphaD=.0,betaD=.0,alpha=.0,beta=.0,actuator_type='small'):
# meter radian
self.initialBellowConfigurationAngle = [.0, pi/3, 2*pi/3, pi, -2*pi/3 ,-pi/3]
self.bellowConfigurationAngleCos = np.array([.0]*6)
self.bellowConfigurationAngleSin = np.array([.0]*6)
self.pressureD = [.0]*6
self.pressure = [.0]*6
self.pressureLimit_Upper = 210 #Kpa
self.pressureLimit_Lower = -100
if actuator_type == 'big':
self.c1 = c1
self.radR = 0.09
elif actuator_type == 'small':
self.c1 = c1
self.radR = 0.0615
self.angleULimit = pi
self.angleDLimit = 0
self.lengthULimit = 10
self.lengthDLimit = 0
self.posVector = np.array([.0, .0, .0])
self.posVector_D = np.array([.0, .0, .0])
self.speedVector = np.array([.0, .0, .0])
self.angleVelocity = np.array([.0, .0, .0])
self.lengthD = lengthD
self.length = length
self.length0 = length0
self.alphaD = alphaD
self.betaD = betaD
self.alpha = alpha
self.beta = beta
self.flag = 0
for i in range(6):
self.bellowConfigurationAngleCos[i] = cos(self.initialBellowConfigurationAngle[i])
self.bellowConfigurationAngleSin[i] = sin(self.initialBellowConfigurationAngle[i])
self.ABLD2PD()
def constriant(self, *args):
if self.angleDLimit <= args[0] <= self.angleULimit\
and self.lengthDLimit <= args[1] <= self.lengthULimit:
return 1
else:
return 0
def ABLD2PD(self):
b1 = 2 * self.c1 * (self.lengthD - self.length0) / self.radR / 6
btemp = self.c1 * self.alphaD / 6
b2 = btemp * cos(self.betaD)
b3 = 1.7320508 * btemp * sin(self.betaD)
self.pressureD[0] = b1 + b2 * 2
self.pressureD[1] = b1 + b2 + b3
self.pressureD[2] = b1 - b2 + b3
self.pressureD[3] = b1 - b2 * 2
self.pressureD[4] = b1 - b2 - b3
self.pressureD[5] = b1 + b2 - b3
return self.pressureD
def PD2ABLD(self):
phycD = np.dot(self.pressureD,self.bellowConfigurationAngleCos)
physD = np.dot(self.pressureD,self.bellowConfigurationAngleSin)
self.alphaD = sqrt(phycD**2+physD**2)/self.c1
self.betaD = atan2(physD,phycD)
self.lengthD = (self.pressureD[0]+self.pressureD[1]+self.pressureD[2]+self.pressureD[3]+
self.pressureD[4]+self.pressureD[5])/self.c1/2*self.radR+self.length0
return self.alphaD, self.betaD, self.lengthD
def ABL2P(self):
b1 = 2 * self.c1 * (self.length - self.length0) / self.radR / 6
btemp = self.c1 * self.alpha / 6
b2 = btemp * cos(self.beta)
b3 = 1.7320508 * btemp * sin(self.beta)
self.pressure[0] = b1 + b2 * 2
self.pressure[1] = b1 + b2 + b3
self.pressure[2] = b1 - b2 + b3
self.pressure[3] = b1 - b2 * 2
self.pressure[4] = b1 - b2 - b3
self.pressure[5] = b1 + b2 - b3
return self.pressure
def P2ABL(self):
phyc = np.dot(self.pressure,self.bellowConfigurationAngleCos)
phys = np.dot(self.pressure,self.bellowConfigurationAngleSin)
self.alpha = sqrt(phyc**2+phys**2)/self.c1
self.beta = atan2(phys,phyc)
self.length = (self.pressure[0]+self.pressure[1]+self.pressure[2]+self.pressure[3]+
self.pressure[4]+self.pressure[5])/self.c1/2*self.radR+self.length0
return self.alpha, self.beta, self.length
def Acturate(self):
self.alpha += self.angleVelocity[0]
self.beta += self.angleVelocity[1]
self.length += self.angleVelocity[2]
if self.alpha >= pi/2:
self.alpha = pi/2
if self.alpha <= 0:
self.alpha = 0
def SetW(self, alphaW=None, betaW=None, length=None): #设定alpha beta角速度
if alphaW != None:
self.angleVelocity[0] = alphaW
if betaW != None:
self.angleVelocity[1] = betaW
if length != None:
self.angleVelocity[2] = length
def SetPos(self, X=None, Y=None, Z=None):
if X != None:
self.posVector[0] = X
if Y != None:
self.posVector[1] = Y
if Z != None:
self.posVector[2] = Z
def SetPara(self, A=None, B=None, L=None):
if A != None:
self.alpha = A
if B != None:
self.beta = B
if L != None:
self.length = L
def GetPara(self, mode=0): #供animated3Dplot 获取ABL
self.Acturate()
self.ABL2P()
return self.alpha, self.beta, self.length
def GetPressureD(self): #返回理想ABL 对应的pressure
self.ABLD2PD()
return self.pressureD
def UpdateD(self, alpha, beta, length): #更新理想值数据
if self.constriant(alpha, length):
self.alpha = alpha
self.beta = beta
self.length = length
self.ABL2P()
return 1
else:
return 0
def UpdateP(self, pre):
for i in range(6):
self.pressure[i] = pre[i]
self.P2ABL()
class SoftObject(object):
def __init__(self, *Arms):
self.num = len(Arms)
self.seg = dict()
self.pts = dict()
self.dst_pos = [0,0,0]
self.dst_dir = [0,0,0]
self.desired = 0
for i in range(self.num):
self.seg[i] = Arms[i]
def inverse_kinematic(self, pts, quat, x0, single = 0):
def test_square(dst, x0, a, n, R, single = 0): # a1 a2 a3 b1 b2 b3 r1 r2 r3
def Pos2ABLD():
x = dst[0]
y = dst[1]
z = dst[2]
mod = x**2 + y**2
alphaD, betaD, lengthD = 0,0,0
if x==.0 and y==.0:
alphaD = 0
betaD = 0
lengthD = z
else:
if mod <= z**2:
alphaD = asin(2*sqrt(mod)*z/(z**2+mod))
#print(self.alphaD)
if x == .0: #根据x的正负性质 确定beta atan输出值域(-90,90)
if y > .0:
betaD = -pi/2
else:
betaD = pi/2
elif x > 0:
betaD = atan(y/x)
elif x < 0:
betaD = atan(y/x) + pi
lengthD = (z**2+mod)/(2*sqrt(mod))*alphaD
elif mod >z**2:
betaD = atan(y / x)
return [alphaD, betaD, lengthD]
def single(x):
a = float(x[0])
b = float(x[1])
l = float(x[2])
if a != 0:
return [
l/a*(1-cos(a))*cos(b),
l/a*(1-cos(a))*sin(b),
l/a*sin(a)
]
else:
return [
0,
0,
l
]
def string_type(x):
a1 = float(x[0])
a2 = float(x[1])
a3 = float(x[2])
b1 = float(x[3])
b2 = float(x[4])
b3 = float(x[5])
lm1 = float(x[6])
lm2 = float(x[7])
lm3 = float(x[8])
if a1 == 0:
l1 = lm1
else:
l1 = 2*lm1*a1/sin(a1/2)
if a2 == 0:
l2 = lm2
else:
l2 = 2*lm2*a2/sin(a2/2)
if a3 == 0:
l3 = lm3
else:
l3 = 2*lm3*a3/sin(a3/2)
result = np.array(
[lm1 * sin(a1 / 2) * cos(b1) - (1 - cos(a1)) * (
lm2 * sin(a2 / 2) * sin(b2) - lm3 * (1 - cos(a2)) * sin(a3 / 2) * sin(b2) * cos(b2) * cos(
b3) + lm3 * (-(1 - cos(a2)) * sin(b2) ** 2 + 1) * sin(a3 / 2) * sin(b3) + lm3 * sin(a2) * sin(
b2) * cos(
a3 / 2)) * sin(b1) * cos(b1) + (-(1 - cos(a1)) * cos(b1) ** 2 + 1) * (
lm2 * sin(a2 / 2) * cos(b2) - lm3 * (1 - cos(a2)) * sin(a3 / 2) * sin(b2) * sin(b3) * cos(
b2) + lm3 * (-(1 - cos(a2)) * cos(b2) ** 2 + 1) * sin(a3 / 2) * cos(b3) + lm3 * sin(a2) * cos(
a3 / 2) * cos(b2)) + (
lm2 * cos(a2 / 2) - lm3 * sin(a2) * sin(a3 / 2) * sin(b2) * sin(b3) - lm3 * sin(a2) * sin(
a3 / 2) * cos(b2) * cos(b3) + lm3 * cos(a2) * cos(a3 / 2)) * sin(a1) * cos(b1)-dst[0],
lm1 * sin(a1 / 2) * sin(b1) - (1 - cos(a1)) * (
lm2 * sin(a2 / 2) * cos(b2) - lm3 * (1 - cos(a2)) * sin(a3 / 2) * sin(b2) * sin(b3) * cos(
b2) + lm3 * (-(1 - cos(a2)) * cos(b2) ** 2 + 1) * sin(a3 / 2) * cos(b3) + lm3 * sin(a2) * cos(
a3 / 2) * cos(b2)) * sin(b1) * cos(b1) + (-(1 - cos(a1)) * sin(b1) ** 2 + 1) * (
lm2 * sin(a2 / 2) * sin(b2) - lm3 * (1 - cos(a2)) * sin(a3 / 2) * sin(b2) * cos(b2) * cos(
b3) + lm3 * (-(1 - cos(a2)) * sin(b2) ** 2 + 1) * sin(a3 / 2) * sin(b3) + lm3 * sin(a2) * sin(
b2) * cos(a3 / 2)) + (
lm2 * cos(a2 / 2) - lm3 * sin(a2) * sin(a3 / 2) * sin(b2) * sin(b3) - lm3 * sin(a2) * sin(
a3 / 2) * cos(b2) * cos(b3) + lm3 * cos(a2) * cos(a3 / 2)) * sin(a1) * sin(b1)-dst[1],
lm1 * cos(a1 / 2) + (
lm2 * cos(a2 / 2) - lm3 * sin(a2) * sin(a3 / 2) * sin(b2) * sin(b3) - lm3 * sin(
a2) * sin(
a3 / 2) * cos(b2) * cos(b3) + lm3 * cos(a2) * cos(a3 / 2)) * cos(a1) - (
lm2 * sin(a2 / 2) * sin(b2) - lm3 * (1 - cos(a2)) * sin(a3 / 2) * sin(b2) * cos(b2) * cos(
b3) + lm3 * (-(1 - cos(a2)) * sin(b2) ** 2 + 1) * sin(a3 / 2) * sin(b3) + lm3 * sin(a2) * sin(
b2) * cos(a3 / 2)) * sin(a1) * sin(b1) - (
lm2 * sin(a2 / 2) * cos(b2) - lm3 * (1 - cos(a2)) * sin(a3 / 2) * sin(b2) * sin(b3) * cos(
b2) + lm3 * (-(1 - cos(a2)) * cos(b2) ** 2 + 1) * sin(a3 / 2) * cos(b3) + lm3 * sin(a2) * cos(
a3 / 2) * cos(b2)) * sin(a1) * cos(b1)-dst[2],
(-(1 - cos(a1)) * (-(1 - cos(a2)) * sin(b2) ** 2 + 1) * sin(b1) * cos(b1) - (1 - cos(a2)) * (
-(1 - cos(a1)) * cos(b1) ** 2 + 1) * sin(b2) * cos(b2) - sin(a1) * sin(a2) * sin(b2) * cos(
b1)) * sin(a3) * sin(b3) + (-(1 - cos(a1)) * sin(a2) * sin(b1) * sin(b2) * cos(b1) + (
-(1 - cos(a1)) * cos(b1) ** 2 + 1) * sin(a2) * cos(b2) + sin(a1) * cos(a2) * cos(
b1)) * cos(
a3) + (
(1 - cos(a1)) * (1 - cos(a2)) * sin(b1) * sin(b2) * cos(b1) * cos(b2) + (
-(1 - cos(a1)) * cos(b1) ** 2 + 1) * (-(1 - cos(a2)) * cos(b2) ** 2 + 1) - sin(a1) * sin(
a2) * cos(b1) * cos(b2)) * sin(a3) * cos(b3) - a[0],
(-(1 - cos(a1)) * (-(1 - cos(a2)) * cos(b2) ** 2 + 1) * sin(b1) * cos(b1) - (1 - cos(a2)) * (
-(1 - cos(a1)) * sin(b1) ** 2 + 1) * sin(b2) * cos(b2) - sin(a1) * sin(a2) * sin(b1) * cos(
b2)) * sin(a3) * cos(b3) + (-(1 - cos(a1)) * sin(a2) * sin(b1) * cos(b1) * cos(b2) + (
-(1 - cos(a1)) * sin(b1) ** 2 + 1) * sin(a2) * sin(b2) + sin(a1) * sin(b1) * cos(
a2)) * cos(
a3) + ((1 - cos(a1)) * (1 - cos(a2)) * sin(b1) * sin(b2) * cos(b1) * cos(b2) + (
-(1 - cos(a1)) * sin(b1) ** 2 + 1) * (-(1 - cos(a2)) * sin(b2) ** 2 + 1) - sin(a1) * sin(
a2) * sin(b1) * sin(b2)) * sin(a3) * sin(b3) - a[1],
(-sin(a1) * sin(a2) * sin(b1) * sin(b2) - sin(a1) * sin(a2) * cos(b1) * cos(b2) + cos(a1) * cos(
a2)) * cos(
a3) + (
(1 - cos(a2)) * sin(a1) * sin(b1) * sin(b2) * cos(b2) - (
-(1 - cos(a2)) * cos(b2) ** 2 + 1) * sin(
a1) * cos(b1) - sin(a2) * cos(a1) * cos(b2)) * sin(a3) * cos(b3) + (
(1 - cos(a2)) * sin(a1) * sin(b2) * cos(b1) * cos(b2) - (
-(1 - cos(a2)) * sin(b2) ** 2 + 1) * sin(
a1) * sin(b1) - sin(a2) * sin(b2) * cos(a1)) * sin(a3) * sin(b3) - a[2],
-(1 - cos(a3)) * (-(1 - cos(a1)) * (-(1 - cos(a2)) * sin(b2) ** 2 + 1) * sin(b1) * cos(b1) - (
1 - cos(a2)) * (
-(1 - cos(a1)) * cos(b1) ** 2 + 1) * sin(b2) * cos(b2) - sin(a1) * sin(
a2) * sin(b2) * cos(
b1)) * sin(b3) * cos(b3) + (-(1 - cos(a3)) * cos(b3) ** 2 + 1) * (
(1 - cos(a1)) * (1 - cos(a2)) * sin(b1) * sin(b2) * cos(b1) * cos(b2) + (
-(1 - cos(a1)) * cos(b1) ** 2 + 1) * (-(1 - cos(a2)) * cos(b2) ** 2 + 1) - sin(a1) * sin(
a2) * cos(b1) * cos(b2)) - (-(1 - cos(a1)) * sin(a2) * sin(b1) * sin(b2) * cos(b1) + (
-(1 - cos(a1)) * cos(b1) ** 2 + 1) * sin(a2) * cos(b2) + sin(a1) * cos(a2) * cos(
b1)) * sin(
a3) * cos(b3) - n[0],
-(1 - cos(a3)) * ((1 - cos(a1)) * (1 - cos(a2)) * sin(b1) * sin(b2) * cos(b1) * cos(b2) + (
-(1 - cos(a1)) * sin(b1) ** 2 + 1) * (-(1 - cos(a2)) * sin(b2) ** 2 + 1) - sin(a1) * sin(
a2) * sin(b1) * sin(b2)) * sin(b3) * cos(b3) + (-(1 - cos(a3)) * cos(b3) ** 2 + 1) * (
-(1 - cos(a1)) * (-(1 - cos(a2)) * cos(b2) ** 2 + 1) * sin(b1) * cos(b1) - (
1 - cos(a2)) * (
-(1 - cos(a1)) * sin(b1) ** 2 + 1) * sin(b2) * cos(b2) - sin(a1) * sin(a2) * sin(
b1) * cos(
b2)) - (-(1 - cos(a1)) * sin(a2) * sin(b1) * cos(b1) * cos(b2) + (
-(1 - cos(a1)) * sin(b1) ** 2 + 1) * sin(a2) * sin(b2) + sin(a1) * sin(b1) * cos(
a2)) * sin(
a3) * cos(b3) - n[1],
-(1 - cos(a3)) * ((1 - cos(a2)) * sin(a1) * sin(b2) * cos(b1) * cos(b2) - (
-(1 - cos(a2)) * sin(b2) ** 2 + 1) * sin(a1) * sin(b1) - sin(a2) * sin(b2) * cos(
a1)) * sin(
b3) * cos(b3) + (-(1 - cos(a3)) * cos(b3) ** 2 + 1) * (
(1 - cos(a2)) * sin(a1) * sin(b1) * sin(b2) * cos(b2) - (
-(1 - cos(a2)) * cos(b2) ** 2 + 1) * sin(
a1) * cos(b1) - sin(a2) * cos(a1) * cos(b2)) - (
-sin(a1) * sin(a2) * sin(b1) * sin(b2) - sin(a1) * sin(a2) * cos(b1) * cos(b2) + cos(
a1) * cos(
a2)) * sin(a3) * cos(b3) - n[2],
1 / 3 * (((2 * a1 - pi * 2 / 4) / (pi * 2 / 4)) ** 2 + (
(2 * a2 - pi * 2 / 4) / (pi * 2 / 4)) ** 2 + (
(2 * a3 - pi * 2 / 4) / (pi * 2 / 4)) ** 2) / 200,
1/3*((b1-b2)**2+(b1-b3)**2+(b3-b2)**2)/200,
(l1-l2),
(l2-l3)
]
)
return result.astype('float64')
result = [0]*9
result[0] = new[0]
result[1] = new[1]
result[2] = new[6]
result[3] = new[2]
result[4] = new[3]
result[5] = new[6]
result[6] = new[4]
result[7] = new[5]
result[8] = new[6]
return result
def tranformation_string(res):
result = [0]*len(res)
for i in range(int(len(res)/3)):
if 0 > res[3*i]:
result[3*i] = -res[3*i]
result[3*i+1] = res[3*i+1] + pi
elif 0 < res[3*i]:
result[3*i] = res[3*i] # a1
result[3*i+1] = res[3*i+1] # b1
# lm1
result[3*i] = normalize_angle(result[3*i])
result[3*i+1] = normalize_angle(result[3*i+1])
result[3*i+2] = res[3*i+2] * res[3*i] / sin(res[3*i] / 2) / 2
return result
now = time.time()
x0_rosenbrock = np.array(x0).astype('float64')
# string type
if single:
res = Pos2ABLD()
print(res)
print(single(res))
result = [
res[0], res[1], res[2]
]
else:
res = least_squares(string_type, x0_rosenbrock,
bounds=([-pi, -pi, -pi, -2*pi, -2*pi, -2*pi, 0.05, 0.05, 0.05],
[pi, pi, pi, 2*pi, 2*pi, 2*pi, 1, 1, 1]))
new = np.array([res.x[0], res.x[3], res.x[6],
res.x[1], res.x[4], res.x[7],
res.x[2], res.x[5], res.x[8]
]).astype('float64') # a1 b1 l1 a2 b2 l2 a3 b3 l3
result = tranformation_string(new)
print('time cost:', time.time() - now)
# end
return result
# a1 a2 a3 b1 b2 b3 l1 l2 l3
pts = [pts.x, pts.y, pts.z]
quat = [quat.x, quat.y, quat.z, quat.w]
R = self.quat_transform(quat)
n = [R[0][0],R[1][0],R[2][0]]
a = [R[0][2],R[1][2],R[2][2]]
# print(R)
# normal type
'''pos_now = [self.seg[0].alpha*3, self.seg[0].beta,
self.seg[3].alpha*3, self.seg[3].beta,
self.seg[6].alpha*3, self.seg[6].beta,
self.seg[6].length*3
]'''
# string type
# x0 = [ self.seg[0].alpha * 3, self.seg[0].beta,
# self.seg[3].alpha * 3, self.seg[3].beta,
# self.seg[6].alpha * 3, self.seg[6].beta,
# self.seg[0].length * 2 / self.seg[0].alpha * sin(self.seg[0].alpha / 6),
# self.seg[3].length * 2 / self.seg[3].alpha * sin(self.seg[3].alpha / 6),
# self.seg[6].length * 2 / self.seg[6].alpha * sin(self.seg[6].alpha / 6)
# ]
x0 =1
self.desired = test_square(pts, x0, a, n, R, single)
return self.desired
def quat_transform(self, qua): # alpha beta gamma
R1 = Rotation.from_quat(qua).as_matrix()
return R1
def outputPressure(self):
1
class ik_solver:
def __init__(self):
x = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.2, 0.2, 0.2]
soft1 = softArm(alpha=x[0], beta=x[3], length=x[6], actuator_type='big')
soft2 = softArm(alpha=x[1], beta=x[3], length=x[7], actuator_type='big')
soft3 = softArm(alpha=x[2], beta=x[3], length=x[8], actuator_type='big')
soft4 = softArm(alpha=x[0], beta=x[4], length=x[6])
soft5 = softArm(alpha=x[1], beta=x[4], length=x[7])
soft6 = softArm(alpha=x[2], beta=x[4], length=x[8])
soft7 = softArm(alpha=x[0], beta=x[5], length=x[6])
soft8 = softArm(alpha=x[1], beta=x[5], length=x[7])
soft9 = softArm(alpha=x[2], beta=x[5], length=x[8])
self.softArms = SoftObject(soft1, soft2, soft3, soft4, soft5, soft6, soft7, soft8, soft9)
self.ik_srv_setup()
def handle_ik_srv(self, req):
result = self.softArms.inverse_kinematic(
req.input.pose.position,
req.input.pose.orientation,
req.input.ABL.segment,
1
)
re = Command_ABL()
re.segment[0].A = result[0]
re.segment[0].B = result[1]
re.segment[0].L = result[2]
# print(np.degrees(result[0]))
# print(np.degrees(result[1]))
# print(np.degrees(result[3]))
# print(np.degrees(result[4]))
# print(np.degrees(result[6]))
# print(np.degrees(result[7]))
# print(result[8])
return re
def ik_srv_setup(self):
rospy.init_node('ik_srv')
s = rospy.Service('ik', ik, self.handle_ik_srv)
print('ready for ik service')
rospy.spin()
if __name__ == '__main__':
try:
IK = ik_solver()
print('IK is finished')
except rospy.ServiceException as exc:
print("IK call failed:"+str(exc))
|
the-stack_106_22512 | import cv2
def check_area(area, image_area):
return area / image_area < 0.95
def crop_image(image_path):
img = cv2.imread(image_path)
height, width, channels = img.shape
image_area = height * width
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
retval, thresh_gray = cv2.threshold(gray, thresh=100, maxval=255,
type=cv2.THRESH_BINARY_INV)
contours, hierarchy = cv2.findContours(thresh_gray, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
mx = (0, 0, 0, 0)
mx_area = 0
for cont in contours:
x, y, w, h = cv2.boundingRect(cont)
area = w * h
if area > mx_area and check_area(area, image_area):
mx = x, y, w, h
mx_area = area
x, y, w, h = mx
roi = img[y:y + h, x:x + w]
if roi.any():
cv2.imwrite(image_path, roi) |
the-stack_106_22513 | import random
import math
import unittest
from unittest.mock import patch
from parameterized import parameterized
import pyEpiabm as pe
from pyEpiabm.routine import ToyPopulationFactory
numReps = 1
class TestPopConfig(unittest.TestCase):
"""Test the 'ToyPopConfig' class.
"""
@parameterized.expand([(random.randint(1000, 10000),
random.randint(1, 10),
random.randint(1, 10))
for _ in range(numReps)])
def test_make_pop(self, pop_size, cell_number, microcell_number):
"""Tests for when the population is implemented by default with
no households. Parameters are assigned at random.
"""
# Population is initialised with no households
pop_params = {"population_size": pop_size, "cell_number": cell_number,
"microcell_number": microcell_number}
test_pop = ToyPopulationFactory.make_pop(pop_params)
total_people = 0
count_non_empty_cells = 0
for cell in test_pop.cells:
for microcell in cell.microcells:
total_people += len(microcell.persons)
if len(cell.persons) > 0:
count_non_empty_cells += 1
# Test there are at least one non-empty cell
self.assertTrue(count_non_empty_cells >= 1)
# Test that everyone in the population has been assigned a microcell
self.assertEqual(total_people, pop_size)
# Test a population class object is returned
self.assertIsInstance(test_pop, pe.Population)
@patch("numpy.random.multinomial")
@patch('logging.exception')
def test_make_pop_exception(self, patch_log, patch_random):
"""Tests for when the population is implemented with errors
"""
patch_random.side_effect = ValueError
# Population is initialised with no households
pop_params = {"population_size": 10, "cell_number": 1,
"microcell_number": 1}
ToyPopulationFactory.make_pop(pop_params)
patch_log.assert_called_once_with("ValueError in ToyPopulation"
+ "Factory.make_pop()")
def summarise_pop(self, pop):
# Returns lists of cell and microcell wise populations
# Not a testing function, but used in test below
pop_cells = [] # List of populations in each cell of population
pop_microcells = [] # List of populations in each microcell
for cell in pop.cells:
pop_cells.append(len(cell.persons))
for microcell in cell.microcells:
pop_microcells.append(len(microcell.persons))
return pop_cells, pop_microcells
@parameterized.expand([(random.randint(1000, 10000),
random.randint(5, 10),
random.randint(2, 10),
random.randint(1, 100))
for _ in range(numReps)])
def test_pop_seed(self, pop_size, cell_number, microcell_number, seed):
"""Tests for when the population is implemented by default with
no households. Parameters are assigned at random.
"""
# Define parameters for population generation
pop_params = {"population_size": pop_size, "cell_number": cell_number,
"microcell_number": microcell_number,
"population_seed": seed}
# Create two identical populations with the same seed
seed_pop = ToyPopulationFactory.make_pop(pop_params)
comp_pop = ToyPopulationFactory.make_pop(pop_params)
self.assertEqual(str(seed_pop), str(comp_pop))
seed_cells, seed_microcells = self.summarise_pop(seed_pop)
comp_cells, comp_microcells = self.summarise_pop(comp_pop)
self.assertEqual(seed_cells, comp_cells)
self.assertEqual(seed_microcells, comp_microcells)
# Also compare to a population with a different seed
pop_params["population_seed"] = seed + 1 # Change seed of population
diff_pop = ToyPopulationFactory().make_pop(pop_params)
diff_cells, diff_microcells = self.summarise_pop(diff_pop)
self.assertNotEqual(seed_cells, diff_cells)
self.assertNotEqual(seed_microcells, diff_microcells)
@parameterized.expand([(random.randint(1000, 10000) * numReps,
random.randint(1, 10) * numReps,
random.randint(1, 10) * numReps,
random.randint(1, 10) * numReps)
for _ in range(numReps)])
def test_if_households(self, pop_size, cell_number, microcell_number,
household_number):
"""Tests when households are implemented.
"""
# Initialises population with households
pop_params = {"population_size": pop_size, "cell_number": cell_number,
"microcell_number": microcell_number,
"household_number": household_number}
toy_pop = ToyPopulationFactory.make_pop(pop_params)
total_people = 0
households = []
num_empty_households = 0
for cell in toy_pop.cells:
for microcell in cell.microcells:
for person in microcell.persons:
if person.household not in households:
households.append(person.household)
if len(person.household.persons) == 0:
num_empty_households += 1
total_people += len(person.household.persons)
# Some households may be empty so won't be included
total_households = cell_number * microcell_number \
* household_number
self.assertTrue(len(households) <= total_households)
self.assertTrue(num_empty_households < total_households)
@parameterized.expand([(random.randint(1000, 10000) * numReps,
random.randint(1, 10) * numReps,
random.randint(1, 10) * numReps,
random.randint(1, 10) * numReps)
for _ in range(numReps)])
def test_if_places(self, pop_size, cell_number, microcell_number,
place_number):
"""Tests when places are implemented.
"""
# Initialises population with places
pop_params = {"population_size": pop_size, "cell_number": cell_number,
"microcell_number": microcell_number,
"place_number": place_number}
toy_pop = ToyPopulationFactory.make_pop(pop_params)
places = []
for cell in toy_pop.cells:
for microcell in cell.microcells:
places += microcell.places
# Test the correct number of place shave been added to each microcell
self.assertEqual(place_number,
len(toy_pop.cells[0].microcells[0].places))
@patch('logging.exception')
def test_assign_cell_locations_rand(self, mock_log):
pop_params = {"population_size": 10, "cell_number": 2,
"microcell_number": 2}
test_pop = ToyPopulationFactory.make_pop(pop_params)
for cell in test_pop.cells:
self.assertEqual(cell.location[0], 0)
self.assertEqual(cell.location[1], 0)
ToyPopulationFactory.assign_cell_locations(test_pop)
for cell in test_pop.cells:
self.assertTrue((0 < cell.location[0]) & (1 > cell.location[0]))
self.assertTrue((0 < cell.location[1]) & (1 > cell.location[1]))
mock_log.assert_not_called()
ToyPopulationFactory.assign_cell_locations(test_pop, method='other')
mock_log.assert_called_once_with("ValueError in ToyPopulationFactory"
+ ".assign_cell_locations()")
@parameterized.expand([(random.randint(2, 20) * numReps,
random.randint(2, 20) * numReps)
for _ in range(numReps)])
def test_assign_cell_locations_unix(self, cell_num, mcell_num):
pop_params = {"population_size": 100, "cell_number": cell_num,
"microcell_number": mcell_num}
test_pop = ToyPopulationFactory.make_pop(pop_params)
ToyPopulationFactory.assign_cell_locations(test_pop, "uniform_x")
for i, cell in enumerate(test_pop.cells):
self.assertAlmostEqual(cell.location[0], i / (cell_num - 1))
self.assertAlmostEqual(cell.location[1], 0)
for j, mcell in enumerate(cell.microcells):
self.assertAlmostEqual(mcell.location[0], cell.location[0])
self.assertAlmostEqual(mcell.location[1], j / (mcell_num - 1))
@parameterized.expand([(random.randint(2, 20) * numReps,
random.randint(2, 20) * numReps)
for _ in range(numReps)])
def test_assign_cell_locations_grid(self, cell_num, mcell_num):
pop_params = {"population_size": 100, "cell_number": cell_num,
"microcell_number": mcell_num}
test_pop = ToyPopulationFactory.make_pop(pop_params)
ToyPopulationFactory.assign_cell_locations(test_pop, "grid")
grid_len = math.ceil(math.sqrt(cell_num))
for i, cell in enumerate(test_pop.cells):
self.assertAlmostEqual(cell.location[0],
(i % grid_len) / (grid_len - 1))
self.assertAlmostEqual(cell.location[1],
(i // grid_len) / (grid_len - 1))
mcell_len = math.ceil(math.sqrt(len(cell.microcells)))
for j, mcell in enumerate(cell.microcells):
test_x = (cell.location[0] +
(j % mcell_len - .5 * (mcell_len - 1)) /
(grid_len * (mcell_len - 1)))
test_y = (cell.location[1] +
(j // mcell_len - .5 * (mcell_len - 1)) /
(grid_len * (mcell_len - 1)))
self.assertAlmostEqual(mcell.location[0], test_x)
self.assertAlmostEqual(mcell.location[1], test_y)
def test_assign_cell_locations_known_grid(self):
pop_params = {"population_size": 100, "cell_number": 4,
"microcell_number": 4}
test_pop = ToyPopulationFactory.make_pop(pop_params)
ToyPopulationFactory.assign_cell_locations(test_pop, "grid")
x_pos = [0, 1, 0, 1]
y_pos = [0, 0, 1, 1]
mx_pos0 = [-.25, .25, -.25, .25]
my_pos0 = [-.25, -.25, .25, .25]
mx_pos1 = [.75, 1.25, .75, 1.25]
my_pos1 = [.75, .75, 1.25, 1.25]
for i, cell in enumerate(test_pop.cells):
self.assertAlmostEqual(cell.location[0], x_pos[i])
self.assertAlmostEqual(cell.location[1], y_pos[i])
for j, mcell in enumerate(test_pop.cells[0].microcells):
self.assertAlmostEqual(mcell.location[0], mx_pos0[j])
self.assertAlmostEqual(mcell.location[1], my_pos0[j])
for j, mcell in enumerate(test_pop.cells[3].microcells):
self.assertAlmostEqual(mcell.location[0], mx_pos1[j])
self.assertAlmostEqual(mcell.location[1], my_pos1[j])
if __name__ == '__main__':
unittest.main()
|
the-stack_106_22515 | # --------------------------------------------
# Main part of the plugin
#
# JL Diaz (c) 2019
# MIT License
# --------------------------------------------
from collections import defaultdict
from pathlib import Path
import os
import yaml
import jinja2
from jinja2.ext import Extension
from mkdocs.structure.files import File
from mkdocs.structure.nav import Section
from mkdocs.plugins import BasePlugin
from mkdocs.config.config_options import Type
try:
from pymdownx.slugs import uslugify_cased_encoded as slugify
except ImportError:
from markdown.extensions.toc import slugify
def slugify_this(text):
return slugify(text, '-')
class SlugifyExtension(Extension):
def __init__(self, environment):
super(SlugifyExtension, self).__init__(environment)
environment.filters['slugify'] = slugify_this
class TagsPlugin(BasePlugin):
"""
Creates "tags.md" file containing a list of the pages grouped by tags
It uses the info in the YAML metadata of each page, for the pages which
provide a "tags" keyword (whose value is a list of strings)
"""
config_scheme = (
('tags_filename', Type(str, default='tags.md')),
('tags_folder', Type(str, default='aux')),
('tags_template', Type(str)),
)
def __init__(self):
self.metadata = []
self.tags_filename = "tags.md"
self.tags_folder = "aux"
self.tags_template = None
def on_nav(self, nav, config, files):
# nav.items.insert(1, nav.items.pop(-1))
pass
def on_config(self, config):
# Re assign the options
self.tags_filename = Path(self.config.get(
"tags_filename") or self.tags_filename)
self.tags_folder = Path(self.config.get(
"tags_folder") or self.tags_folder)
# Make sure that the tags folder is absolute, and exists
if not self.tags_folder.is_absolute():
self.tags_folder = Path(
config["docs_dir"]) / ".." / self.tags_folder
if not self.tags_folder.exists():
self.tags_folder.mkdir(parents=True)
if self.config.get("tags_template"):
self.tags_template = Path(self.config.get("tags_template"))
def on_files(self, files, config):
# Scan the list of files to extract tags from meta
for f in files:
if not f.src_path.endswith(".md"):
continue
self.metadata.append(get_metadata(f.src_path, config["docs_dir"]))
# Create new file with tags
self.generate_tags_file()
# New file to add to the build
newfile = File(
path=str(self.tags_filename),
src_dir=str(self.tags_folder),
dest_dir=config["site_dir"],
use_directory_urls=False
)
files.append(newfile)
def generate_tags_page(self, data):
if self.tags_template is None:
templ_path = Path(__file__).parent / Path("templates")
environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(str(templ_path)),
extensions=[SlugifyExtension]
)
templ = environment.get_template("tags.md.template")
else:
environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(
searchpath=str(self.tags_template.parent)),
extensions=[SlugifyExtension]
)
templ = environment.get_template(str(self.tags_template.name))
stags = sorted(data.items(), key=lambda t: t[0].lower())
dtags = {}
for stag in stags:
try:
tagletter = stag[0][0].upper()
if tagletter not in dtags:
dtags[tagletter] = [stag]
else:
dtags[tagletter].append(stag)
except:
pass
ldtags = sorted(dtags.items())
output_text = templ.render(
tags=ldtags,
)
return output_text
def generate_tags_file(self):
if self.metadata:
sorted_meta = sorted(
self.metadata, key=lambda e: e.get("year", 5000) if e else 0)
else:
sorted_meta = {}
tag_dict = defaultdict(list)
for e in sorted_meta:
if not e:
continue
if "title" not in e:
e["title"] = "Untitled"
tags = e.get("topic-tags", e.get("topic-auto", e.get("tags", [])))
if tags is not None:
for tag in tags:
tag_dict[tag].append(e)
t = self.generate_tags_page(tag_dict)
with open(str(self.tags_folder / self.tags_filename), "w") as f:
f.write(t)
# Helper functions
def get_metadata(name, path):
# Extract metadata from the yaml at the beginning of the file
def extract_yaml(f):
result = []
c = 0
for line in f:
if line.strip() == "---":
c += 1
continue
if c == 2:
break
if c == 1:
result.append(line)
return "".join(result)
filename = Path(path) / Path(name)
with filename.open() as f:
meta = []
metadata = extract_yaml(f)
if metadata:
try:
meta = yaml.load(metadata, Loader=yaml.FullLoader)
meta.update(filename=name)
except:
pass
return meta
|
the-stack_106_22516 | class ClockWidget():
def __init__(self):
self.name = "Clock"
self.template = "widget_clock.html"
class PingWidget():
def __init__(self, addrs: list):
self.name = "Ping"
self.template = "widget_ping.html"
self.addrs = addrs
self.targets = self.addrs |
the-stack_106_22517 |
import PyPluMA
import sys
def quote(s):
return '\"' + s + '\"'
def unquote(s):
return s[1:len(s)-1]
class CSV2PLSDAPlugin:
def input(self, filename):
# Parameter file
self.parameters = dict()
paramfile = open(filename, 'r')
for line in paramfile:
contents = line.split('\t')
self.parameters[contents[0]] = contents[1].strip()
normabund = open(PyPluMA.prefix()+"/"+self.parameters["normabund"], "r")
metadata = open(PyPluMA.prefix()+"/"+self.parameters["metadata"], "r")
metadata.readline()
self.categories = dict()
# Assuming two-column
self.diffcat = set()
for line in metadata:
line = line.strip()
contents = line.split(',')
self.categories[contents[0]] = contents[1]
self.diffcat.add(contents[1])
microbes = normabund.readline().strip()
self.contents = microbes.split(',')
self.contents = self.contents[1:]
self.lines = []
for line in normabund:
self.lines.append(line.strip())
def run(self):
pass
def output(self, filename):
obs_names = open(filename+"/"+self.parameters["categories"], 'w') # Categories, tabbed
var_ID = open(filename+"/"+self.parameters["observables"], 'w') # Microbes, one column
testsets = open(filename+"/"+self.parameters["targets"], 'w') # Different categories
normabundmod = open(filename+"/"+self.parameters["samples"], 'w')
present = {}
for microbe in self.contents:
if (microbe not in present):
present[microbe] = 1
else:
present[microbe] += 1
microbe = quote(unquote(microbe) + "_" + str(present[microbe]))
var_ID.write(microbe+"\n")
for entry in self.diffcat:
testsets.write(entry+"\n")
print(self.categories)
for j in range(0, len(self.lines)):
contents = self.lines[j].split(',')
sample = contents[0]
contents = contents[1:]
obs_names.write(self.categories[sample])
if (j != len(self.lines)-1):
obs_names.write('\t')
# Transpose
my_mat = []
for i in range(0, len(self.lines)):
my_mat.append(self.lines[i].strip().split(','))
for j in range(1, len(my_mat[0])):
for i in range(0, len(my_mat)):
normabundmod.write(my_mat[i][j])
if (i != len(my_mat)-1):
normabundmod.write(',')
else:
normabundmod.write('\n')
#abundmat = []
#meta = []
|
the-stack_106_22519 | import parser_with_time
import queue
import uuid
import json
inter_communication_time = 0.1
node_cnt = 10
node_ip = ['', '', '', '', '', '', '', '', '', '']
def init_graph(workflow, group_set):
in_degree_vec = dict()
q = queue.Queue()
q.put(workflow.start)
group_set.append({workflow.start.name})
while q.empty() is False:
node = q.get()
for next_node in node.next:
if next_node.name not in in_degree_vec:
in_degree_vec[next_node.name] = 1
q.put(next_node)
group_set.append({next_node.name})
else:
in_degree_vec[next_node.name] += 1
return in_degree_vec
def find_set(node, group_set):
for node_set in group_set:
if node in node_set:
return node_set
return None
def topo_search(workflow, in_degree_vec, group_set, no_net_latency):
dist_vec = dict() # { name: [dist, max_length] }
prev_vec = dict() # { name: [prev_name, length] }
q = queue.Queue()
q.put(workflow.start)
dist_vec[workflow.start.name] = [workflow.start.runtime, 0]
prev_vec[workflow.start.name] = []
while q.empty() is False:
node = q.get()
pre_dist = dist_vec[node.name]
prev_name = node.name
for index in range(len(node.next)):
next_node = node.next[index]
w = node.nextDis[index]
next_node_name = next_node.name
if no_net_latency is True:
w = 0
elif next_node_name in find_set(prev_name, group_set):
w = inter_communication_time
if next_node.name not in dist_vec:
dist_vec[next_node_name] = [pre_dist[0] + w + next_node.runtime, max(pre_dist[1], w)]
prev_vec[next_node_name] = [prev_name, w]
elif dist_vec[next_node_name][0] < pre_dist[0] + w + next_node.runtime:
dist_vec[next_node_name] = [pre_dist[0] + w + next_node.runtime, max(pre_dist[1], w)]
prev_vec[next_node_name] = [prev_name, w]
elif dist_vec[next_node_name][0] == pre_dist[0] + w + next_node.runtime and max(pre_dist[1], w) > \
dist_vec[next_node_name][1]:
dist_vec[next_node_name][1] = max(pre_dist[1], w)
prev_vec[next_node_name] = [prev_name, w]
in_degree_vec[next_node_name] -= 1
if in_degree_vec[next_node_name] == 0:
q.put(next_node)
return dist_vec, prev_vec
def mergeable(node1, node2, group_set, group_size):
node_set1 = find_set(node1, group_set)
if node2 in node_set1:
return False
node_set2 = find_set(node2, group_set)
if len(node_set1) + len(node_set2) > group_size:
return False
group_set.remove(node_set1)
group_set.remove(node_set2)
group_set.append(node_set1 | node_set2)
return True
penalty_rate = 1.5
def merge_node(crit_vec, group_set, group_size):
merge_flag = False
for edge in crit_vec:
merge_flag = merge_flag | mergeable(edge[0], edge[1][0], group_set, group_size)
if merge_flag:
break
return merge_flag
def get_prerequisite(workflow):
result = []
for node in workflow.nodes:
prerequisite = []
for pre_node in node.prev:
prerequisite.append(pre_node.name)
result.append({'name': node.name, 'prerequisite': prerequisite})
return result
def grouping(req_id, workflow):
topo_search_cnt = 0
group_set = list()
in_degree_vec = init_graph(workflow, group_set)
group_size = 2
total_node_cnt = len(workflow.nodes)
no_latency_dist_vec, _ = topo_search(workflow, in_degree_vec.copy(), group_set, True)
no_latency_crit_length = no_latency_dist_vec[workflow.end.name][0]
while True:
dist_vec, prev_vec = topo_search(workflow, in_degree_vec.copy(), group_set, False)
topo_search_cnt = topo_search_cnt + 1
crit_length = dist_vec[workflow.end.name][0]
if crit_length < no_latency_crit_length * penalty_rate:
break
elif group_size == total_node_cnt:
break
crit_vec = dict()
tmp_node_name = workflow.end.name
while tmp_node_name != workflow.start.name:
crit_vec[tmp_node_name] = prev_vec[tmp_node_name]
tmp_node_name = prev_vec[tmp_node_name][0]
crit_vec = sorted(crit_vec.items(), key=lambda c: c[1][1], reverse=True)
if not merge_node(crit_vec, group_set, group_size):
group_size = group_size + 1
merge_node(crit_vec, group_set, group_size)
group_detail = []
for node_set in group_set:
group_id = str(uuid.uuid4())
for node in node_set:
group_detail.append({'name': node, 'group_id': group_id, 'node_id': hash(group_id) % node_cnt})
prerequisite = get_prerequisite(workflow)
group_size = 1 if topo_search_cnt == 1 else group_size
ratio = crit_length / no_latency_crit_length
return json.dumps(group_detail), json.dumps(prerequisite)
detail, requisite = grouping('0', parser_with_time.mainObject)
print(detail)
print(requisite)
|
the-stack_106_22520 | """
Pyramid views (controllers in the classical meaning) for traversal resources.
Each class of :class:`spree.rest.traversal.APIResource`
and :class:`spree.rest.traversal.APIAction` defines it's own view.
"""
from marshmallow import ValidationError, MarshalResult
from . import events
from .endpoints import (
APICollection,
APIEntity,
APIAction
)
from pyramid import httpexceptions
def validation_error_view(error, request):
"""
Return Marshmallow :class:`ValidationError`
as a JSON response with a proper response code.
:param error: Marshmallow error instance
:type error: ValidationError
:param request: Pyramid request
:type request: pyramid.request.Request
:return: Error messages
:rtype: dict[str,list]
"""
request.response.status_int = 400
return error.messages
def get_serialized_data(serialized):
if isinstance(serialized, MarshalResult):
return serialized.data
return serialized
def process_get_request(context, request):
"""
Process GET requests on either :class:`APICollectionEndpoint`
or :class:`APIEntityEndpoint`
:param context: API endpoint context
:type context: APIEntity|APICollection
:param request: Pyramid request
:type request: pyramid.request.Request
:return: Serialization result, most likely a :class:`dict`
:rtype: dict
"""
retrieved = context.retrieve(request)
if retrieved is None:
raise httpexceptions.HTTPNotFound()
serialized = context.serialize(retrieved)
return get_serialized_data(serialized)
class TraversalResourceView(object):
def __init__(self, context, request):
"""
Create a Traversal REST view.
:param context: API endpoint context
:type context: APIEntity|APICollection
:param request: Pyramid request
:type request: pyramid.request.Request
"""
self.request = request
self.context = context
def collection_get(self):
"""
Process GET requests on :class:`APICollectionEndpoint`
:rtype: dict
"""
return process_get_request(self.context, self.request)
def collection_post(self):
"""
Process POST requests on :class:`APICollectionEndpoint`
:returns: POST action result
:rtype: dict
"""
deserialized = self.context.deserialize(self.request, self.context.create_schema)
# ========== BEFORE EVENTS =========
self.context.before_create(
request=self.request,
deserialized=deserialized
)
# noinspection PyCallByClass,PyTypeChecker
self.request.registry.notify(events.BeforeCreate(
view=self,
deserialized=deserialized
))
# ========== CREATE CALL ==========
created = self.context.create(self.request, deserialized)
# ========== AFTER EVENTS ==========
self.context.after_create(
request=self.request,
created=created,
deserialized=deserialized
)
# noinspection PyCallByClass,PyTypeChecker
self.request.registry.notify(events.AfterCreate(
view=self,
created=created,
deserialized=deserialized
))
# ========== SERIALIZE ==========
serialized = self.context.serialize(created)
return get_serialized_data(serialized)
def entity_get(self):
"""
Process GET requests on :class:`APIEntityEndpoint`
:rtype: dict
"""
return process_get_request(self.context, self.request)
def entity_put(self):
"""
Process PUT requests on :class:`APIEntityEndpoint`
:rtype: dict
"""
deserialized = self.context.deserialize(self.request, self.context.update_schema)
# ========== BEFORE EVENTS =========
self.context.before_update(
request=self.request,
deserialized=deserialized
)
# noinspection PyCallByClass,PyTypeChecker
self.request.registry.notify(events.BeforeUpdate(
view=self,
deserialized=deserialized
))
# ========== UPDATE CALL ==========
updated = self.context.update(self.request, deserialized)
# ========== AFTER EVENTS ==========
self.context.after_update(
request=self.request,
updated=updated,
deserialized=deserialized
)
# noinspection PyCallByClass,PyTypeChecker
self.request.registry.notify(events.AfterUpdate(
view=self,
updated=updated,
deserialized=deserialized
))
# ========== SERIALIZE ==========
serialized = self.context.serialize(updated)
return get_serialized_data(serialized)
def entity_delete(self):
"""
Process DELETE requests on :class:`APIEntityEndpoint`
:rtype: dict
"""
raise NotImplementedError
class TraversalActionView(object):
def __init__(self, context, request):
"""
:type context: APIAction
:type request: pyramid.request.Request
"""
self.context = context
self.request = request
def _method(self, method):
deserialized = self.context.deserialize(self.request)
result = getattr(self.context, method)(self.request, deserialized)
return get_serialized_data(result)
def get(self):
result = self.context.get(self.request)
return get_serialized_data(result)
def post(self):
return self._method('post')
def put(self):
return self._method('put')
def delete(self):
return self._method('delete')
def includeme(config):
"""
Include traversal view configuration
:param config:
"""
# # # # # # # # # # # #
# APIResource #
# # # # # # # # # # # #
config.add_view(
TraversalResourceView, attr='collection_get',
request_method='GET', context=APICollection,
renderer='json', permission='view'
)
config.add_view(
TraversalResourceView, attr='collection_post',
request_method='POST', context=APICollection,
renderer='json', permission='create'
)
config.add_view(
TraversalResourceView, attr='entity_get',
request_method='GET', context=APIEntity,
renderer='json', permission='view'
)
config.add_view(
TraversalResourceView, attr='entity_put',
request_method='PUT', context=APIEntity,
renderer='json', permission='update'
)
# # # # # # # # # # # #
# APIAction #
# # # # # # # # # # # #
config.add_view(
TraversalActionView, attr='get',
request_method='GET', context=APIAction,
renderer='json', permission='view'
)
config.add_view(
TraversalActionView, attr='post',
request_method='POST', context=APIAction,
renderer='json', permission='create'
)
config.add_view(
TraversalActionView, attr='put',
request_method='PUT', context=APIAction,
renderer='json', permission='update'
)
config.add_view(
TraversalActionView, attr='delete',
request_method='DELETE', context=APIAction,
renderer='json', permission='delete'
)
# # # # # # # # # # # #
# ValidationError #
# # # # # # # # # # # #
config.add_view(
validation_error_view, context=ValidationError,
renderer='json'
)
|
the-stack_106_22521 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import os
import pickle as pk
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, LSTM, Dense, Embedding
from tensorflow.keras.optimizers import Nadam
from tensorflow.keras.backend import categorical_crossentropy
from tensorflow.keras.preprocessing import sequence
from nltk.translate.bleu_score import sentence_bleu
# In[2]:
# Conversational Model Metric
def perplexity(y_true, y_pred):
return pow(2, categorical_crossentropy(y_pred, y_true))
def bleu(y_true, y_pred):
y_true = np.array(y_true).tolist()
y_pred = np.array(y_pred).tolist()
return sentence_bleu(y_true, y_pred)
# In[3]:
def createModelDirs(output_dir, model_dir, c=0):
'''Return model directories for outputing logs and checkpoints'''
final_dir = os.path.join(output_dir,model_dir+'_v{}'.format(c))
chpts_dir = os.path.join(final_dir,'chpts')
logs_dir = os.path.join(final_dir,'logs')
if model_dir+'_v{}'.format(c) in os.listdir(output_dir):
c += 1
final_dir, chpts_dir, logs_dir = createModelDirs(output_dir, model_dir, c)
else:
os.mkdir(final_dir)
os.mkdir(chpts_dir)
os.mkdir(logs_dir)
return final_dir, chpts_dir, logs_dir
# In[4]:
# load variables
word_freqs_inp = pk.load(open('output/data_cleaning_nlp/word_freqs_input.pk', 'rb'))
word_freqs_out = pk.load(open('output/data_cleaning_nlp/word_freqs_output.pk', 'rb'))
x = pk.load(open('output/data_cleaning_nlp/input_data.pk', 'rb'))
y = pk.load(open('output/data_cleaning_nlp/target_data.pk', 'rb'))
# In[5]:
## Hyper-parameters
# data features
MAX_FEATURES_input = 1000
input_len = 125
MAX_FEATURES_output = 1000
target_len = 125
# training parameters
num_epochs = 5
batch_size = 32
# model estructure
embed_size = 512
hidden_size = 264
n_encoder_layers = 3
encoder_hidden_sizes = [256, 128, 64]
n_decoder_layers = 3
lstm_hidden_sizes = [256, 128, 64]
# In[6]:
# Define output dir
outDir = 'output/'
actualDir = 'trained_model'
print()
if not(actualDir in os.listdir(outDir)):
os.mkdir(os.path.join(outDir, actualDir))
print('output dir created')
else:
print('output dir already created')
print()
# In[7]:
# Define directories for outputs
actual_outDir = os.path.join(outDir, actualDir)
modelDir = 'model_epochs-{}_batch-{}_hidden-{}_embed-{}'.format(num_epochs, batch_size, hidden_size, embed_size)
finalDir, chptsDir, logsDir = createModelDirs(actual_outDir,modelDir)
# In[8]:
### Build vocabulary of unique words
## Inputs
vocab_size_input = min(MAX_FEATURES_input, len(word_freqs_inp)) + 2
word2index_inp = {x[0]: i+2 for i, x in enumerate(word_freqs_inp.most_common(MAX_FEATURES_input))}
word2index_inp["PAD"] = 0
word2index_inp["UNK"] = 1
index2word_inp = {v:k for k, v in word2index_inp.items()}
## Outputs
vocab_size_output = min(MAX_FEATURES_output, len(word_freqs_out)) + 4
word2index_out = {x[0]: i+4 for i, x in enumerate(word_freqs_out.most_common(MAX_FEATURES_output))}
word2index_out["PAD"] = 0
word2index_out["UNK"] = 1
word2index_out["GO"] = 2
word2index_out["EOS"] = 3
index2word_out = {v:k for k, v in word2index_out.items()}
# Save dictionaries in model directory
pk.dump(word2index_inp, open(os.path.join(finalDir,'word2index_inp.pk'),'wb'))
pk.dump(index2word_inp, open(os.path.join(finalDir,'index2word_inp.pk'),'wb'))
pk.dump(word2index_out, open(os.path.join(finalDir,'word2index_out.pk'),'wb'))
pk.dump(index2word_out, open(os.path.join(finalDir,'index2word_out.pk'),'wb'))
# In[9]:
# Filter records by lenght
x_new = []
y_new = []
for input_, target_ in zip(x,y):
if all([len(input_) <= input_len, len(input_) > 0, len(target_) <= target_len, len(target_) > 0]):
x_new.append(input_)
y_new.append(target_)
print('number of records after filtering by lenght:', len(x_new))
# Create a copy of conversations with the words replaced by their IDs
X_input = np.empty((len(x_new),), dtype=list)
y_input = np.empty((len(y_new),), dtype=list)
y_target_ids = np.empty((len(y_new),), dtype=list)
for i in range(len(x_new)):
seqs_x = []
seqs_y_input = []
seqs_y_target = []
# Replace input sequences IDs
for word in x_new[i]:
if word in word2index_inp:
seqs_x.append(word2index_inp[word])
else:
seqs_x.append(word2index_inp["UNK"]) # Replace words with low frequency with <UNK>
# Target sequences IDs
seqs_y_input = [word2index_out["GO"]] # Start of Sentence ID
for word in y_new[i]:
if word in word2index_out:
seqs_y_input.append(word2index_out[word])
seqs_y_target.append(word2index_out[word])
else:
# Replace words with low frequency with <UNK>
seqs_y_input.append(word2index_out["UNK"])
seqs_y_target.append(word2index_out["UNK"])
seqs_y_target.append(word2index_out["EOS"]) # End of Sentece ID
X_input[i] = seqs_x
y_input[i] = seqs_y_input
y_target_ids[i] = seqs_y_target
X_input = sequence.pad_sequences(X_input, input_len, padding='post')
y_input = sequence.pad_sequences(y_input, target_len, padding='post')
y_target_ids = sequence.pad_sequences(y_target_ids, target_len, padding='post')
# Create one-hot target variable
y_target = np.empty((len(y_target_ids), target_len, vocab_size_output))
for i in range(len(y_target_ids)):
for j in range(target_len):
y_target[i, j, y_target_ids[i,j]] = 1
print("y_target size = %f gigabytes" % ((y_target.size * y_target.itemsize)/1e9))
# Save X and y input
pk.dump(X_input, open(os.path.join(finalDir,'x_inp.pk'),'wb'))
pk.dump(y_input, open(os.path.join(finalDir,'y_inp.pk'),'wb'))
# In[11]:
## Tensorflow Keras Conversational Model
# Define an input sequence and process it.
encoder_inputs = Input(shape=(None,))
# Set up encoder, output lstm states
encoder_embed_layer = Embedding(vocab_size_input, embed_size, mask_zero=True)
encoder_embed = encoder_embed_layer(encoder_inputs)
encoder_layers = [LSTM(encoder_hidden_sizes[i], return_sequences=True, go_backwards=True) for i in range(n_encoder_layers)]
encoder_lstms_outputs = []
for i in range(n_decoder_layers):
if i == 0:
encoder_lstms_outputs.append(encoder_layers[i](encoder_embed))
else:
encoder_lstms_outputs.append(encoder_layers[i](encoder_lstms_outputs[i-1]))
encoder_lstm, state_h, state_c = LSTM(hidden_size, return_state=True,
go_backwards=True)(encoder_lstms_outputs[-1])
encoder_states = [state_h, state_c]
# Set up the decoder, using `encoder_states` as initial state.
decoder_inputs = Input(shape=(None,))
decoder_embed_layer = Embedding(vocab_size_output, embed_size, mask_zero=True)
decoder_embed = decoder_embed_layer(decoder_inputs)
decoder_lstm = LSTM(hidden_size, return_sequences=True, return_state=True)
decoder_outputs, _, _ = decoder_lstm(decoder_embed, initial_state=encoder_states)
decoder_layers = [LSTM(lstm_hidden_sizes[i], return_sequences=True) for i in range(n_decoder_layers)]
decoder_lstms_outputs = []
for i in range(n_decoder_layers):
if i == 0:
decoder_lstms_outputs.append(decoder_layers[i](decoder_outputs))
else:
decoder_lstms_outputs.append(decoder_layers[i](decoder_lstms_outputs[i-1]))
# Create dense vector with next word probability
decoder_dense = Dense(vocab_size_output, activation='softmax')
decoder_outputs = decoder_dense(decoder_lstms_outputs[-1])
# Define the model that will turn 'X_input' and 'y_input' into 'y_target'
model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
# Compile model
model.compile(optimizer=Nadam(), loss='categorical_crossentropy', metrics=[perplexity])
# Model Estructure Summary
model.summary()
# In[12]:
## Inference Model
# Encoder
encoder_model = Model(encoder_inputs, encoder_states)
# Decoder inference inputs
decoder_state_input_h = Input(shape=(hidden_size,))
decoder_state_input_c = Input(shape=(hidden_size,))
decoder_states_input = [decoder_state_input_h, decoder_state_input_c]
# Decoder inference
decoder_embed = decoder_embed_layer(decoder_inputs)
decoder_outputs, state_h, state_c = decoder_lstm(decoder_embed, initial_state=decoder_states_input)
decoder_states = [state_h, state_c]
decoder_layers = [LSTM(lstm_hidden_sizes[i], return_sequences=True) for i in range(n_decoder_layers)]
decoder_lstms_outputs = []
for i in range(n_decoder_layers):
if i == 0:
decoder_lstms_outputs.append(decoder_layers[i](decoder_outputs))
else:
decoder_lstms_outputs.append(decoder_layers[i](decoder_lstms_outputs[i-1]))
decoder_outputs = decoder_dense(decoder_lstms_outputs[-1])
decoder_model = Model(
[decoder_inputs] + decoder_states_input,
[decoder_outputs] + decoder_states)
# Save models
encoder_model.save(os.path.join(finalDir,'encoder_model_{}_{}_{}_{}.h5'.format(hidden_size,batch_size,num_epochs,embed_size)))
decoder_model.save(os.path.join(finalDir,'decoder_model_{}_{}_{}_{}.h5'.format(hidden_size,batch_size,num_epochs,embed_size)))
# In[13]:
# Define callbacks
model_checkpoint = ModelCheckpoint(os.path.join(chptsDir,'{epoch:02d}_{val_loss:.2f}.chpt'),
monitor='val_loss', verbose=0, save_best_only=True,
save_weights_only=False, mode='auto', period=1)
early_stopping = EarlyStopping(monitor='val_loss', patience=50)
tensorboard = TensorBoard(log_dir=logsDir, histogram_freq=20, batch_size=32, write_graph=True,
write_grads=True, embeddings_freq=0, embeddings_layer_names=None,
embeddings_metadata=None, embeddings_data=None)
# In[14]:
# Fit model
model_history = model.fit([X_input, y_input], y_target,
batch_size=batch_size,
epochs=num_epochs,
validation_split=0.05,
callbacks=[early_stopping, model_checkpoint, tensorboard])
# In[ ]:
# Save model history
with open(os.path.join(finalDir,'history_{}_{}_{}_{}.pk'.format(hidden_size,batch_size,num_epochs,embed_size)),'wb') as f:
pk.dump(model_history.history, f)
# lemmatizacion:
# spacy
# clips: pattern.es
#
#
# diccionarios:
# lista de palabras
#
#
# word vectors:
# vert
|
the-stack_106_22523 | #!/usr/bin/env python
import contextlib
import glob
import io
import os
import pathlib
import re
header_restrictions = {
"barrier": "!defined(_LIBCPP_HAS_NO_THREADS)",
"future": "!defined(_LIBCPP_HAS_NO_THREADS)",
"latch": "!defined(_LIBCPP_HAS_NO_THREADS)",
"mutex": "!defined(_LIBCPP_HAS_NO_THREADS)",
"semaphore": "!defined(_LIBCPP_HAS_NO_THREADS)",
"shared_mutex": "!defined(_LIBCPP_HAS_NO_THREADS)",
"stdatomic.h": "__cplusplus > 202002L && !defined(_LIBCPP_HAS_NO_THREADS)",
"thread": "!defined(_LIBCPP_HAS_NO_THREADS)",
"filesystem": "!defined(_LIBCPP_HAS_NO_FILESYSTEM_LIBRARY)",
"clocale": "!defined(_LIBCPP_HAS_NO_LOCALIZATION)",
"codecvt": "!defined(_LIBCPP_HAS_NO_LOCALIZATION)",
"fstream": "!defined(_LIBCPP_HAS_NO_LOCALIZATION)",
"iomanip": "!defined(_LIBCPP_HAS_NO_LOCALIZATION)",
"ios": "!defined(_LIBCPP_HAS_NO_LOCALIZATION)",
"iostream": "!defined(_LIBCPP_HAS_NO_LOCALIZATION)",
"istream": "!defined(_LIBCPP_HAS_NO_LOCALIZATION)",
"locale.h": "!defined(_LIBCPP_HAS_NO_LOCALIZATION)",
"locale": "!defined(_LIBCPP_HAS_NO_LOCALIZATION)",
"ostream": "!defined(_LIBCPP_HAS_NO_LOCALIZATION)",
"regex": "!defined(_LIBCPP_HAS_NO_LOCALIZATION)",
"sstream": "!defined(_LIBCPP_HAS_NO_LOCALIZATION)",
"streambuf": "!defined(_LIBCPP_HAS_NO_LOCALIZATION)",
"strstream": "!defined(_LIBCPP_HAS_NO_LOCALIZATION)",
"wctype.h": "!defined(_LIBCPP_HAS_NO_WIDE_CHARACTERS)",
"cwctype": "!defined(_LIBCPP_HAS_NO_WIDE_CHARACTERS)",
"cwchar": "!defined(_LIBCPP_HAS_NO_WIDE_CHARACTERS)",
"wchar.h": "!defined(_LIBCPP_HAS_NO_WIDE_CHARACTERS)",
"experimental/coroutine": "!defined(_LIBCPP_HAS_NO_EXPERIMENTAL_COROUTINES)",
"experimental/regex": "!defined(_LIBCPP_HAS_NO_LOCALIZATION) && __cplusplus >= 201103L",
"experimental/deque": "__cplusplus >= 201103L",
"experimental/map": "__cplusplus >= 201103L",
"experimental/memory_resource": "__cplusplus >= 201103L",
"experimental/forward_list": "__cplusplus >= 201103L",
"experimental/list": "__cplusplus >= 201103L",
"experimental/set": "__cplusplus >= 201103L",
"experimental/string": "__cplusplus >= 201103L",
"experimental/unordered_map": "__cplusplus >= 201103L",
"experimental/unordered_set": "__cplusplus >= 201103L",
"experimental/vector": "__cplusplus >= 201103L",
}
private_headers_still_public_in_modules = [
'__assert', '__bsd_locale_defaults.h', '__bsd_locale_fallbacks.h', '__config',
'__config_site.in', '__debug', '__hash_table',
'__threading_support', '__tree', '__undef_macros'
]
def find_script(file):
"""Finds the script used to generate a file inside the file itself. The script is delimited by
BEGIN-SCRIPT and END-SCRIPT markers.
"""
with open(file, 'r') as f:
content = f.read()
match = re.search(r'^BEGIN-SCRIPT$(.+)^END-SCRIPT$', content, flags=re.MULTILINE | re.DOTALL)
if not match:
raise RuntimeError("Was unable to find a script delimited with BEGIN-SCRIPT/END-SCRIPT markers in {}".format(test_file))
return match.group(1)
def execute_script(script, variables):
"""Executes the provided Mako template with the given variables available during the
evaluation of the script, and returns the result.
"""
code = compile(script, 'fake-filename', 'exec')
output = io.StringIO()
with contextlib.redirect_stdout(output):
exec(code, variables)
output = output.getvalue()
return output
def generate_new_file(file, new_content):
"""Generates the new content of the file by inserting the new content in-between
two '// GENERATED-MARKER' markers located in the file.
"""
with open(file, 'r') as f:
old_content = f.read()
try:
before, begin_marker, _, end_marker, after = re.split(r'(// GENERATED-MARKER\n)', old_content, flags=re.MULTILINE | re.DOTALL)
except ValueError:
raise RuntimeError("Failed to split {} based on markers, please make sure the file has exactly two '// GENERATED-MARKER' occurrences".format(file))
return before + begin_marker + new_content + end_marker + after
def produce(test_file, variables):
script = find_script(test_file)
result = execute_script(script, variables)
new_content = generate_new_file(test_file, result)
with open(test_file, 'w', newline='\n') as f:
f.write(new_content)
def is_header(file):
"""Returns whether the given file is a header (i.e. not a directory or the modulemap file)."""
return not file.is_dir() and not file.name == 'module.modulemap'
def main():
monorepo_root = pathlib.Path(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
include = pathlib.Path(os.path.join(monorepo_root, 'libcxx', 'include'))
test = pathlib.Path(os.path.join(monorepo_root, 'libcxx', 'test'))
assert(monorepo_root.exists())
toplevel_headers = sorted(str(p.relative_to(include)) for p in include.glob('[a-z]*') if is_header(p))
experimental_headers = sorted(str(p.relative_to(include)) for p in include.glob('experimental/[a-z]*') if is_header(p))
extended_headers = sorted(str(p.relative_to(include)) for p in include.glob('ext/[a-z]*') if is_header(p))
public_headers = toplevel_headers + experimental_headers + extended_headers
private_headers = sorted(str(p.relative_to(include)) for p in include.rglob('*') if is_header(p) and str(p.relative_to(include)).startswith('__'))
variables = {
'toplevel_headers': toplevel_headers,
'experimental_headers': experimental_headers,
'extended_headers': extended_headers,
'public_headers': public_headers,
'private_headers': private_headers,
'header_restrictions': header_restrictions,
'private_headers_still_public_in_modules': private_headers_still_public_in_modules
}
produce(test.joinpath('libcxx/assertions/headers_declare_assertion_handler.sh.cpp'), variables)
produce(test.joinpath('libcxx/clang_tidy.sh.cpp'), variables)
produce(test.joinpath('libcxx/double_include.sh.cpp'), variables)
produce(test.joinpath('libcxx/min_max_macros.compile.pass.cpp'), variables)
produce(test.joinpath('libcxx/nasty_macros.compile.pass.cpp'), variables)
produce(test.joinpath('libcxx/no_assert_include.compile.pass.cpp'), variables)
produce(test.joinpath('libcxx/private_headers.verify.cpp'), variables)
if __name__ == '__main__':
main()
|
the-stack_106_22526 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class GatewayRouteListResult(Model):
"""List of virtual network gateway routes.
:param value: List of gateway routes
:type value: list[~azure.mgmt.network.v2017_06_01.models.GatewayRoute]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[GatewayRoute]'},
}
def __init__(self, **kwargs):
super(GatewayRouteListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.