repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
Milstein/crowdsource-platform
|
crowdsourcing/migrations/0022_auto_20150728_2153.py
|
15
|
6320
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('crowdsourcing', '0021_auto_20150728_2148'),
]
operations = [
migrations.AlterField(
model_name='address',
name='street',
field=models.CharField(error_messages={'required': 'Please specify the street name!'}, max_length=128),
),
migrations.AlterField(
model_name='category',
name='name',
field=models.CharField(error_messages={'required': 'Please enter the category name!'}, max_length=128),
),
migrations.AlterField(
model_name='city',
name='name',
field=models.CharField(error_messages={'required': 'Please specify the city!'}, max_length=64),
),
migrations.AlterField(
model_name='country',
name='code',
field=models.CharField(error_messages={'required': 'Please specify the country code!'}, max_length=8),
),
migrations.AlterField(
model_name='country',
name='name',
field=models.CharField(error_messages={'required': 'Please specify the country!'}, max_length=64),
),
migrations.AlterField(
model_name='language',
name='name',
field=models.CharField(error_messages={'required': 'Please specify the language!'}, max_length=64),
),
migrations.AlterField(
model_name='module',
name='data_set_location',
field=models.CharField(null=True, max_length=256, default='No data set'),
),
migrations.AlterField(
model_name='module',
name='description',
field=models.TextField(error_messages={'required': 'Please enter the module description!'}),
),
migrations.AlterField(
model_name='module',
name='name',
field=models.CharField(error_messages={'required': 'Please enter the module name!'}, max_length=128),
),
migrations.AlterField(
model_name='module',
name='status',
field=models.IntegerField(default=1, choices=[(1, 'Created'), (2, 'In Review'), (3, 'In Progress'), (4, 'Completed')]),
),
migrations.AlterField(
model_name='project',
name='description',
field=models.CharField(max_length=1024, default=''),
),
migrations.AlterField(
model_name='project',
name='name',
field=models.CharField(error_messages={'required': 'Please enter the project name!'}, max_length=128),
),
migrations.AlterField(
model_name='qualification',
name='type',
field=models.IntegerField(default=1, choices=[(1, 'Strict'), (2, 'Flexible')]),
),
migrations.AlterField(
model_name='region',
name='code',
field=models.CharField(error_messages={'required': 'Please specify the region code!'}, max_length=16),
),
migrations.AlterField(
model_name='region',
name='name',
field=models.CharField(error_messages={'required': 'Please specify the region!'}, max_length=64),
),
migrations.AlterField(
model_name='role',
name='name',
field=models.CharField(error_messages={'required': 'Please specify the role name!', 'unique': 'The role %(value)r already exists. Please provide another name!'}, max_length=32, unique=True),
),
migrations.AlterField(
model_name='skill',
name='description',
field=models.CharField(error_messages={'required': 'Please enter the skill description!'}, max_length=512),
),
migrations.AlterField(
model_name='skill',
name='name',
field=models.CharField(error_messages={'required': 'Please enter the skill name!'}, max_length=128),
),
migrations.AlterField(
model_name='task',
name='status',
field=models.IntegerField(default=1, choices=[(1, 'Created'), (2, 'Accepted'), (3, 'Assigned'), (4, 'Finished')]),
),
migrations.AlterField(
model_name='taskworkerresult',
name='status',
field=models.IntegerField(default=1, choices=[(1, 'Created'), (2, 'Accepted'), (3, 'Rejected')]),
),
migrations.AlterField(
model_name='template',
name='name',
field=models.CharField(error_messages={'required': 'Please enter the template name!'}, max_length=128),
),
migrations.AlterField(
model_name='templateitem',
name='layout',
field=models.CharField(max_length=16, default='column'),
),
migrations.AlterField(
model_name='templateitem',
name='name',
field=models.CharField(error_messages={'required': 'Please enter the name of the template item!'}, max_length=128),
),
migrations.AlterField(
model_name='userprofile',
name='birthday',
field=models.DateField(null=True, error_messages={'invalid': 'Please enter a correct date format'}),
),
migrations.AlterField(
model_name='userprofile',
name='gender',
field=models.CharField(max_length=1, choices=[('M', 'Male'), ('F', 'Female')]),
),
migrations.AlterField(
model_name='userprofile',
name='requester_alias',
field=models.CharField(error_messages={'required': 'Please enter an alias!'}, max_length=32),
),
migrations.AlterField(
model_name='userprofile',
name='worker_alias',
field=models.CharField(error_messages={'required': 'Please enter an alias!'}, max_length=32),
),
migrations.AlterField(
model_name='workermoduleapplication',
name='status',
field=models.IntegerField(default=1, choices=[(1, 'Created'), (2, 'Accepted'), (3, 'Rejected')]),
),
]
|
mit
|
autosportlabs/kivy
|
examples/widgets/label_text_size.py
|
21
|
1877
|
'''
Label textsize
============
This example shows how the textsize and line_height property are used
to format label widget
'''
import kivy
kivy.require('1.0.7')
from kivy.app import App
from kivy.uix.label import Label
_long_text = ("""Lorem ipsum dolor sit amet, consectetur adipiscing elit. """
"""Phasellus odio nisi, pellentesque molestie adipiscing vitae, aliquam """
"""at tellus. Fusce quis est ornare erat pulvinar elementum ut sed """
"""felis. Donec vel neque mauris. In sit amet nunc sit amet diam dapibus"""
""" lacinia. In sodales placerat mauris, ut euismod augue laoreet at. """
"""Integer in neque non odio fermentum volutpat nec nec nulla. Donec et """
"""risus non mi viverra posuere. Phasellus cursus augue purus, eget """
"""volutpat leo. Phasellus sed dui vitae ipsum mattis facilisis vehicula"""
""" eu justo.\n\n"""
"""Quisque neque dolor, egestas sed venenatis eget, porta id ipsum. Ut """
"""faucibus, massa vitae imperdiet rutrum, sem dolor rhoncus magna, non """
"""lacinia nulla risus non dui. Nulla sit amet risus orci. Nunc libero """
"""justo, interdum eu pulvinar vel, pulvinar et lectus. Phasellus sed """
"""luctus diam. Pellentesque non feugiat dolor. Cras at dolor velit, """
"""gravida congue velit. Aliquam erat volutpat. Nullam eu nunc dui, quis"""
""" sagittis dolor. Ut nec dui eget odio pulvinar placerat. Pellentesque"""
""" mi metus, tristique et placerat ac, pulvinar vel quam. Nam blandit """
"""magna a urna imperdiet molestie. Nullam ut nisi eget enim laoreet """
"""sodales sit amet a felis.\n""")
class LabelTextSizeTest(App):
def build(self):
z = Label(
text=_long_text,
text_size=(600, None),
line_height=1.5
)
return z
if __name__ == '__main__':
LabelTextSizeTest().run()
|
mit
|
DailyActie/Surrogate-Model
|
01-codes/scikit-learn-master/sklearn/gaussian_process/regression_models.py
|
1
|
2171
|
# -*- coding: utf-8 -*-
# Author: Vincent Dubourg <[email protected]>
# (mostly translation, see implementation details)
# Licence: BSD 3 clause
"""
The built-in regression models submodule for the gaussian_process module.
"""
import numpy as np
def constant(x):
"""
Zero order polynomial (constant, p = 1) regression model.
x --> f(x) = 1
Parameters
----------
x : array_like
An array with shape (n_eval, n_features) giving the locations x at
which the regression model should be evaluated.
Returns
-------
f : array_like
An array with shape (n_eval, p) with the values of the regression
model.
"""
x = np.asarray(x, dtype=np.float64)
n_eval = x.shape[0]
f = np.ones([n_eval, 1])
return f
def linear(x):
"""
First order polynomial (linear, p = n+1) regression model.
x --> f(x) = [ 1, x_1, ..., x_n ].T
Parameters
----------
x : array_like
An array with shape (n_eval, n_features) giving the locations x at
which the regression model should be evaluated.
Returns
-------
f : array_like
An array with shape (n_eval, p) with the values of the regression
model.
"""
x = np.asarray(x, dtype=np.float64)
n_eval = x.shape[0]
f = np.hstack([np.ones([n_eval, 1]), x])
return f
def quadratic(x):
"""
Second order polynomial (quadratic, p = n*(n-1)/2+n+1) regression model.
x --> f(x) = [ 1, { x_i, i = 1,...,n }, { x_i * x_j, (i,j) = 1,...,n } ].T
i > j
Parameters
----------
x : array_like
An array with shape (n_eval, n_features) giving the locations x at
which the regression model should be evaluated.
Returns
-------
f : array_like
An array with shape (n_eval, p) with the values of the regression
model.
"""
x = np.asarray(x, dtype=np.float64)
n_eval, n_features = x.shape
f = np.hstack([np.ones([n_eval, 1]), x])
for k in range(n_features):
f = np.hstack([f, x[:, k, np.newaxis] * x[:, k:]])
return f
|
mit
|
wilvk/ansible
|
lib/ansible/modules/cloud/docker/docker_container.py
|
9
|
80462
|
#!/usr/bin/python
#
# Copyright 2016 Red Hat | Ansible
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: docker_container
short_description: manage docker containers
description:
- Manage the life cycle of docker containers.
- Supports check mode. Run with --check and --diff to view config difference and list of actions to be taken.
version_added: "2.1"
options:
auto_remove:
description:
- enable auto-removal of the container on daemon side when the container's process exits
default: false
version_added: "2.4"
blkio_weight:
description:
- Block IO (relative weight), between 10 and 1000.
default: null
required: false
capabilities:
description:
- List of capabilities to add to the container.
default: null
required: false
cleanup:
description:
- Use with I(detach=false) to remove the container after successful execution.
default: false
required: false
version_added: "2.2"
command:
description:
- Command to execute when the container starts.
A command may be either a string or a list.
Prior to version 2.4, strings were split on commas.
default: null
required: false
cpu_period:
description:
- Limit CPU CFS (Completely Fair Scheduler) period
default: 0
required: false
cpu_quota:
description:
- Limit CPU CFS (Completely Fair Scheduler) quota
default: 0
required: false
cpuset_cpus:
description:
- CPUs in which to allow execution C(1,3) or C(1-3).
default: null
required: false
cpuset_mems:
description:
- Memory nodes (MEMs) in which to allow execution C(0-3) or C(0,1)
default: null
required: false
cpu_shares:
description:
- CPU shares (relative weight).
default: null
required: false
detach:
description:
- Enable detached mode to leave the container running in background.
If disabled, the task will reflect the status of the container run (failed if the command failed).
default: true
required: false
devices:
description:
- "List of host device bindings to add to the container. Each binding is a mapping expressed
in the format: <path_on_host>:<path_in_container>:<cgroup_permissions>"
default: null
required: false
dns_servers:
description:
- List of custom DNS servers.
default: null
required: false
dns_search_domains:
description:
- List of custom DNS search domains.
default: null
required: false
domainname:
description:
- Container domainname.
default: null
required: false
version_added: "2.5"
env:
description:
- Dictionary of key,value pairs.
default: null
required: false
env_file:
version_added: "2.2"
description:
- Path to a file containing environment variables I(FOO=BAR).
- If variable also present in C(env), then C(env) value will override.
- Requires docker-py >= 1.4.0.
default: null
required: false
entrypoint:
description:
- Command that overwrites the default ENTRYPOINT of the image.
default: null
required: false
etc_hosts:
description:
- Dict of host-to-IP mappings, where each host name is a key in the dictionary.
Each host name will be added to the container's /etc/hosts file.
default: null
required: false
exposed_ports:
description:
- List of additional container ports which informs Docker that the container
listens on the specified network ports at runtime.
If the port is already exposed using EXPOSE in a Dockerfile, it does not
need to be exposed again.
default: null
required: false
aliases:
- exposed
force_kill:
description:
- Use the kill command when stopping a running container.
default: false
required: false
groups:
description:
- List of additional group names and/or IDs that the container process will run as.
default: null
required: false
hostname:
description:
- Container hostname.
default: null
required: false
ignore_image:
description:
- When C(state) is I(present) or I(started) the module compares the configuration of an existing
container to requested configuration. The evaluation includes the image version. If
the image version in the registry does not match the container, the container will be
recreated. Stop this behavior by setting C(ignore_image) to I(True).
default: false
required: false
version_added: "2.2"
image:
description:
- Repository path and tag used to create the container. If an image is not found or pull is true, the image
will be pulled from the registry. If no tag is included, 'latest' will be used.
default: null
required: false
interactive:
description:
- Keep stdin open after a container is launched, even if not attached.
default: false
required: false
ipc_mode:
description:
- Set the IPC mode for the container. Can be one of 'container:<name|id>' to reuse another
container's IPC namespace or 'host' to use the host's IPC namespace within the container.
default: null
required: false
keep_volumes:
description:
- Retain volumes associated with a removed container.
default: true
required: false
kill_signal:
description:
- Override default signal used to kill a running container.
default: null
required: false
kernel_memory:
description:
- "Kernel memory limit (format: <number>[<unit>]). Number is a positive integer.
Unit can be one of b, k, m, or g. Minimum is 4M."
default: 0
required: false
labels:
description:
- Dictionary of key value pairs.
default: null
required: false
links:
description:
- List of name aliases for linked containers in the format C(container_name:alias)
default: null
required: false
log_driver:
description:
- Specify the logging driver. Docker uses json-file by default.
choices:
- none
- json-file
- syslog
- journald
- gelf
- fluentd
- awslogs
- splunk
default: null
required: false
log_options:
description:
- Dictionary of options specific to the chosen log_driver. See https://docs.docker.com/engine/admin/logging/overview/
for details.
required: false
default: null
mac_address:
description:
- Container MAC address (e.g. 92:d0:c6:0a:29:33)
default: null
required: false
memory:
description:
- "Memory limit (format: <number>[<unit>]). Number is a positive integer.
Unit can be one of b, k, m, or g"
default: 0
required: false
memory_reservation:
description:
- "Memory soft limit (format: <number>[<unit>]). Number is a positive integer.
Unit can be one of b, k, m, or g"
default: 0
required: false
memory_swap:
description:
- Total memory limit (memory + swap, format:<number>[<unit>]).
Number is a positive integer. Unit can be one of b, k, m, or g.
default: 0
required: false
memory_swappiness:
description:
- Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100.
default: 0
required: false
name:
description:
- Assign a name to a new container or match an existing container.
- When identifying an existing container name may be a name or a long or short container ID.
required: true
network_mode:
description:
- Connect the container to a network.
choices:
- bridge
- container:<name|id>
- host
- none
default: null
required: false
userns_mode:
description:
- User namespace to use
default: null
required: false
version_added: "2.5"
networks:
description:
- List of networks the container belongs to.
- Each network is a dict with keys C(name), C(ipv4_address), C(ipv6_address), C(links), C(aliases).
- For each network C(name) is required, all other keys are optional.
- If included, C(links) or C(aliases) are lists.
- For examples of the data structure and usage see EXAMPLES below.
- To remove a container from one or more networks, use the C(purge_networks) option.
default: null
required: false
version_added: "2.2"
oom_killer:
description:
- Whether or not to disable OOM Killer for the container.
default: false
required: false
oom_score_adj:
description:
- An integer value containing the score given to the container in order to tune OOM killer preferences.
default: 0
required: false
version_added: "2.2"
paused:
description:
- Use with the started state to pause running processes inside the container.
default: false
required: false
pid_mode:
description:
- Set the PID namespace mode for the container. Currently only supports 'host'.
default: null
required: false
privileged:
description:
- Give extended privileges to the container.
default: false
required: false
published_ports:
description:
- List of ports to publish from the container to the host.
- "Use docker CLI syntax: C(8000), C(9000:8000), or C(0.0.0.0:9000:8000), where 8000 is a
container port, 9000 is a host port, and 0.0.0.0 is a host interface."
- Container ports must be exposed either in the Dockerfile or via the C(expose) option.
- A value of all will publish all exposed container ports to random host ports, ignoring
any other mappings.
- If C(networks) parameter is provided, will inspect each network to see if there exists
a bridge network with optional parameter com.docker.network.bridge.host_binding_ipv4.
If such a network is found, then published ports where no host IP address is specified
will be bound to the host IP pointed to by com.docker.network.bridge.host_binding_ipv4.
Note that the first bridge network with a com.docker.network.bridge.host_binding_ipv4
value encountered in the list of C(networks) is the one that will be used.
aliases:
- ports
required: false
default: null
pull:
description:
- If true, always pull the latest version of an image. Otherwise, will only pull an image when missing.
default: false
required: false
purge_networks:
description:
- Remove the container from ALL networks not included in C(networks) parameter.
- Any default networks such as I(bridge), if not found in C(networks), will be removed as well.
default: false
required: false
version_added: "2.2"
read_only:
description:
- Mount the container's root file system as read-only.
default: false
required: false
recreate:
description:
- Use with present and started states to force the re-creation of an existing container.
default: false
required: false
restart:
description:
- Use with started state to force a matching container to be stopped and restarted.
default: false
required: false
restart_policy:
description:
- Container restart policy. Place quotes around I(no) option.
choices:
- always
- no
- on-failure
- unless-stopped
default: on-failure
required: false
restart_retries:
description:
- Use with restart policy to control maximum number of restart attempts.
default: 0
required: false
shm_size:
description:
- Size of `/dev/shm`. The format is `<number><unit>`. `number` must be greater than `0`.
Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), or `g` (gigabytes).
- Omitting the unit defaults to bytes. If you omit the size entirely, the system uses `64m`.
default: null
required: false
security_opts:
description:
- List of security options in the form of C("label:user:User")
default: null
required: false
state:
description:
- 'I(absent) - A container matching the specified name will be stopped and removed. Use force_kill to kill the container
rather than stopping it. Use keep_volumes to retain volumes associated with the removed container.'
- 'I(present) - Asserts the existence of a container matching the name and any provided configuration parameters. If no
container matches the name, a container will be created. If a container matches the name but the provided configuration
does not match, the container will be updated, if it can be. If it cannot be updated, it will be removed and re-created
with the requested config. Image version will be taken into account when comparing configuration. To ignore image
version use the ignore_image option. Use the recreate option to force the re-creation of the matching container. Use
force_kill to kill the container rather than stopping it. Use keep_volumes to retain volumes associated with a removed
container.'
- 'I(started) - Asserts there is a running container matching the name and any provided configuration. If no container
matches the name, a container will be created and started. If a container matching the name is found but the
configuration does not match, the container will be updated, if it can be. If it cannot be updated, it will be removed
and a new container will be created with the requested configuration and started. Image version will be taken into
account when comparing configuration. To ignore image version use the ignore_image option. Use recreate to always
re-create a matching container, even if it is running. Use restart to force a matching container to be stopped and
restarted. Use force_kill to kill a container rather than stopping it. Use keep_volumes to retain volumes associated
with a removed container.'
- 'I(stopped) - Asserts that the container is first I(present), and then if the container is running moves it to a stopped
state. Use force_kill to kill a container rather than stopping it.'
required: false
default: started
choices:
- absent
- present
- stopped
- started
stop_signal:
description:
- Override default signal used to stop the container.
default: null
required: false
stop_timeout:
description:
- Number of seconds to wait for the container to stop before sending SIGKILL.
required: false
default: null
trust_image_content:
description:
- If true, skip image verification.
default: false
required: false
tmpfs:
description:
- Mount a tmpfs directory
default: null
required: false
version_added: 2.4
tty:
description:
- Allocate a pseudo-TTY.
default: false
required: false
ulimits:
description:
- "List of ulimit options. A ulimit is specified as C(nofile:262144:262144)"
default: null
required: false
sysctls:
description:
- Dictionary of key,value pairs.
default: null
required: false
version_added: 2.4
user:
description:
- Sets the username or UID used and optionally the groupname or GID for the specified command.
- "Can be [ user | user:group | uid | uid:gid | user:gid | uid:group ]"
default: null
required: false
uts:
description:
- Set the UTS namespace mode for the container.
default: null
required: false
volumes:
description:
- List of volumes to mount within the container.
- "Use docker CLI-style syntax: C(/host:/container[:mode])"
- You can specify a read mode for the mount with either C(ro) or C(rw).
- SELinux hosts can additionally use C(z) or C(Z) to use a shared or
private label for the volume.
default: null
required: false
volume_driver:
description:
- The container volume driver.
default: none
required: false
volumes_from:
description:
- List of container names or Ids to get volumes from.
default: null
required: false
working_dir:
description:
- Path to the working directory.
default: null
required: false
version_added: "2.4"
extends_documentation_fragment:
- docker
author:
- "Cove Schneider (@cove)"
- "Joshua Conner (@joshuaconner)"
- "Pavel Antonov (@softzilla)"
- "Thomas Steinbach (@ThomasSteinbach)"
- "Philippe Jandot (@zfil)"
- "Daan Oosterveld (@dusdanig)"
- "James Tanner (@jctanner)"
- "Chris Houseknecht (@chouseknecht)"
- "Kassian Sun (@kassiansun)"
requirements:
- "python >= 2.6"
- "docker-py >= 1.7.0"
- "Docker API >= 1.20"
'''
EXAMPLES = '''
- name: Create a data container
docker_container:
name: mydata
image: busybox
volumes:
- /data
- name: Re-create a redis container
docker_container:
name: myredis
image: redis
command: redis-server --appendonly yes
state: present
recreate: yes
exposed_ports:
- 6379
volumes_from:
- mydata
- name: Restart a container
docker_container:
name: myapplication
image: someuser/appimage
state: started
restart: yes
links:
- "myredis:aliasedredis"
devices:
- "/dev/sda:/dev/xvda:rwm"
ports:
- "8080:9000"
- "127.0.0.1:8081:9001/udp"
env:
SECRET_KEY: ssssh
- name: Container present
docker_container:
name: mycontainer
state: present
image: ubuntu:14.04
command: sleep infinity
- name: Stop a container
docker_container:
name: mycontainer
state: stopped
- name: Start 4 load-balanced containers
docker_container:
name: "container{{ item }}"
recreate: yes
image: someuser/anotherappimage
command: sleep 1d
with_sequence: count=4
- name: remove container
docker_container:
name: ohno
state: absent
- name: Syslogging output
docker_container:
name: myservice
image: busybox
log_driver: syslog
log_options:
syslog-address: tcp://my-syslog-server:514
syslog-facility: daemon
# NOTE: in Docker 1.13+ the "syslog-tag" option was renamed to "tag" for
# older docker installs, use "syslog-tag" instead
tag: myservice
- name: Create db container and connect to network
docker_container:
name: db_test
image: "postgres:latest"
networks:
- name: "{{ docker_network_name }}"
- name: Start container, connect to network and link
docker_container:
name: sleeper
image: ubuntu:14.04
networks:
- name: TestingNet
ipv4_address: "172.1.1.100"
aliases:
- sleepyzz
links:
- db_test:db
- name: TestingNet2
- name: Start a container with a command
docker_container:
name: sleepy
image: ubuntu:14.04
command: ["sleep", "infinity"]
- name: Add container to networks
docker_container:
name: sleepy
networks:
- name: TestingNet
ipv4_address: 172.1.1.18
links:
- sleeper
- name: TestingNet2
ipv4_address: 172.1.10.20
- name: Update network with aliases
docker_container:
name: sleepy
networks:
- name: TestingNet
aliases:
- sleepyz
- zzzz
- name: Remove container from one network
docker_container:
name: sleepy
networks:
- name: TestingNet2
purge_networks: yes
- name: Remove container from all networks
docker_container:
name: sleepy
purge_networks: yes
'''
RETURN = '''
docker_container:
description:
- Before 2.3 this was 'ansible_docker_container' but was renamed due to conflicts with the connection plugin.
- Facts representing the current state of the container. Matches the docker inspection output.
- Note that facts are not part of registered vars but accessible directly.
- Empty if C(state) is I(absent)
- If detached is I(False), will include Output attribute containing any output from container run.
returned: always
type: dict
sample: '{
"AppArmorProfile": "",
"Args": [],
"Config": {
"AttachStderr": false,
"AttachStdin": false,
"AttachStdout": false,
"Cmd": [
"/usr/bin/supervisord"
],
"Domainname": "",
"Entrypoint": null,
"Env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"ExposedPorts": {
"443/tcp": {},
"80/tcp": {}
},
"Hostname": "8e47bf643eb9",
"Image": "lnmp_nginx:v1",
"Labels": {},
"OnBuild": null,
"OpenStdin": false,
"StdinOnce": false,
"Tty": false,
"User": "",
"Volumes": {
"/tmp/lnmp/nginx-sites/logs/": {}
},
...
}'
'''
import os
import re
import shlex
from ansible.module_utils.basic import human_to_bytes
from ansible.module_utils.docker_common import HAS_DOCKER_PY_2, AnsibleDockerClient, DockerBaseClass
from ansible.module_utils.six import string_types
try:
from docker import utils
if HAS_DOCKER_PY_2:
from docker.types import Ulimit, LogConfig
else:
from docker.utils.types import Ulimit, LogConfig
except:
# missing docker-py handled in ansible.module_utils.docker
pass
REQUIRES_CONVERSION_TO_BYTES = [
'memory',
'memory_reservation',
'memory_swap',
'shm_size'
]
VOLUME_PERMISSIONS = ('rw', 'ro', 'z', 'Z')
class TaskParameters(DockerBaseClass):
'''
Access and parse module parameters
'''
def __init__(self, client):
super(TaskParameters, self).__init__()
self.client = client
self.auto_remove = None
self.blkio_weight = None
self.capabilities = None
self.cleanup = None
self.command = None
self.cpu_period = None
self.cpu_quota = None
self.cpuset_cpus = None
self.cpuset_mems = None
self.cpu_shares = None
self.detach = None
self.debug = None
self.devices = None
self.dns_servers = None
self.dns_opts = None
self.dns_search_domains = None
self.domainname = None
self.env = None
self.env_file = None
self.entrypoint = None
self.etc_hosts = None
self.exposed_ports = None
self.force_kill = None
self.groups = None
self.hostname = None
self.ignore_image = None
self.image = None
self.interactive = None
self.ipc_mode = None
self.keep_volumes = None
self.kernel_memory = None
self.kill_signal = None
self.labels = None
self.links = None
self.log_driver = None
self.log_options = None
self.mac_address = None
self.memory = None
self.memory_reservation = None
self.memory_swap = None
self.memory_swappiness = None
self.name = None
self.network_mode = None
self.userns_mode = None
self.networks = None
self.oom_killer = None
self.oom_score_adj = None
self.paused = None
self.pid_mode = None
self.privileged = None
self.purge_networks = None
self.pull = None
self.read_only = None
self.recreate = None
self.restart = None
self.restart_retries = None
self.restart_policy = None
self.shm_size = None
self.security_opts = None
self.state = None
self.stop_signal = None
self.stop_timeout = None
self.tmpfs = None
self.trust_image_content = None
self.tty = None
self.user = None
self.uts = None
self.volumes = None
self.volume_binds = dict()
self.volumes_from = None
self.volume_driver = None
self.working_dir = None
for key, value in client.module.params.items():
setattr(self, key, value)
for param_name in REQUIRES_CONVERSION_TO_BYTES:
if client.module.params.get(param_name):
try:
setattr(self, param_name, human_to_bytes(client.module.params.get(param_name)))
except ValueError as exc:
self.fail("Failed to convert %s to bytes: %s" % (param_name, exc))
self.publish_all_ports = False
self.published_ports = self._parse_publish_ports()
if self.published_ports in ('all', 'ALL'):
self.publish_all_ports = True
self.published_ports = None
self.ports = self._parse_exposed_ports(self.published_ports)
self.log("expose ports:")
self.log(self.ports, pretty_print=True)
self.links = self._parse_links(self.links)
if self.volumes:
self.volumes = self._expand_host_paths()
self.tmpfs = self._parse_tmpfs()
self.env = self._get_environment()
self.ulimits = self._parse_ulimits()
self.sysctls = self._parse_sysctls()
self.log_config = self._parse_log_config()
self.exp_links = None
self.volume_binds = self._get_volume_binds(self.volumes)
self.log("volumes:")
self.log(self.volumes, pretty_print=True)
self.log("volume binds:")
self.log(self.volume_binds, pretty_print=True)
if self.networks:
for network in self.networks:
if not network.get('name'):
self.fail("Parameter error: network must have a name attribute.")
network['id'] = self._get_network_id(network['name'])
if not network['id']:
self.fail("Parameter error: network named %s could not be found. Does it exist?" % network['name'])
if network.get('links'):
network['links'] = self._parse_links(network['links'])
if self.entrypoint:
# convert from list to str.
self.entrypoint = ' '.join([str(x) for x in self.entrypoint])
if self.command:
# convert from list to str
if isinstance(self.command, list):
self.command = ' '.join([str(x) for x in self.command])
def fail(self, msg):
self.client.module.fail_json(msg=msg)
@property
def update_parameters(self):
'''
Returns parameters used to update a container
'''
update_parameters = dict(
blkio_weight='blkio_weight',
cpu_period='cpu_period',
cpu_quota='cpu_quota',
cpu_shares='cpu_shares',
cpuset_cpus='cpuset_cpus',
mem_limit='memory',
mem_reservation='memory_reservation',
memswap_limit='memory_swap',
kernel_memory='kernel_memory',
)
result = dict()
for key, value in update_parameters.items():
if getattr(self, value, None) is not None:
result[key] = getattr(self, value)
return result
@property
def create_parameters(self):
'''
Returns parameters used to create a container
'''
create_params = dict(
command='command',
domainname='domainname',
hostname='hostname',
user='user',
detach='detach',
stdin_open='interactive',
tty='tty',
ports='ports',
environment='env',
name='name',
entrypoint='entrypoint',
cpu_shares='cpu_shares',
mac_address='mac_address',
labels='labels',
stop_signal='stop_signal',
volume_driver='volume_driver',
working_dir='working_dir',
)
result = dict(
host_config=self._host_config(),
volumes=self._get_mounts(),
)
for key, value in create_params.items():
if getattr(self, value, None) is not None:
result[key] = getattr(self, value)
return result
def _expand_host_paths(self):
new_vols = []
for vol in self.volumes:
if ':' in vol:
if len(vol.split(':')) == 3:
host, container, mode = vol.split(':')
if re.match(r'[\.~]', host):
host = os.path.abspath(host)
new_vols.append("%s:%s:%s" % (host, container, mode))
continue
elif len(vol.split(':')) == 2:
parts = vol.split(':')
if parts[1] not in VOLUME_PERMISSIONS and re.match(r'[\.~]', parts[0]):
host = os.path.abspath(parts[0])
new_vols.append("%s:%s:rw" % (host, parts[1]))
continue
new_vols.append(vol)
return new_vols
def _get_mounts(self):
'''
Return a list of container mounts.
:return:
'''
result = []
if self.volumes:
for vol in self.volumes:
if ':' in vol:
if len(vol.split(':')) == 3:
host, container, _ = vol.split(':')
result.append(container)
continue
if len(vol.split(':')) == 2:
parts = vol.split(':')
if parts[1] not in VOLUME_PERMISSIONS:
result.append(parts[1])
continue
result.append(vol)
self.log("mounts:")
self.log(result, pretty_print=True)
return result
def _host_config(self):
'''
Returns parameters used to create a HostConfig object
'''
host_config_params = dict(
port_bindings='published_ports',
publish_all_ports='publish_all_ports',
links='links',
privileged='privileged',
dns='dns_servers',
dns_search='dns_search_domains',
binds='volume_binds',
volumes_from='volumes_from',
network_mode='network_mode',
userns_mode='userns_mode',
cap_add='capabilities',
extra_hosts='etc_hosts',
read_only='read_only',
ipc_mode='ipc_mode',
security_opt='security_opts',
ulimits='ulimits',
sysctls='sysctls',
log_config='log_config',
mem_limit='memory',
memswap_limit='memory_swap',
mem_swappiness='memory_swappiness',
oom_score_adj='oom_score_adj',
oom_kill_disable='oom_killer',
shm_size='shm_size',
group_add='groups',
devices='devices',
pid_mode='pid_mode',
tmpfs='tmpfs'
)
if HAS_DOCKER_PY_2:
# auto_remove is only supported in docker>=2
host_config_params['auto_remove'] = 'auto_remove'
params = dict()
for key, value in host_config_params.items():
if getattr(self, value, None) is not None:
params[key] = getattr(self, value)
if self.restart_policy:
params['restart_policy'] = dict(Name=self.restart_policy,
MaximumRetryCount=self.restart_retries)
return self.client.create_host_config(**params)
@property
def default_host_ip(self):
ip = '0.0.0.0'
if not self.networks:
return ip
for net in self.networks:
if net.get('name'):
network = self.client.inspect_network(net['name'])
if network.get('Driver') == 'bridge' and \
network.get('Options', {}).get('com.docker.network.bridge.host_binding_ipv4'):
ip = network['Options']['com.docker.network.bridge.host_binding_ipv4']
break
return ip
def _parse_publish_ports(self):
'''
Parse ports from docker CLI syntax
'''
if self.published_ports is None:
return None
if 'all' in self.published_ports:
return 'all'
default_ip = self.default_host_ip
binds = {}
for port in self.published_ports:
parts = str(port).split(':')
container_port = parts[-1]
if '/' not in container_port:
container_port = int(parts[-1])
p_len = len(parts)
if p_len == 1:
bind = (default_ip,)
elif p_len == 2:
bind = (default_ip, int(parts[0]))
elif p_len == 3:
bind = (parts[0], int(parts[1])) if parts[1] else (parts[0],)
if container_port in binds:
old_bind = binds[container_port]
if isinstance(old_bind, list):
old_bind.append(bind)
else:
binds[container_port] = [binds[container_port], bind]
else:
binds[container_port] = bind
return binds
@staticmethod
def _get_volume_binds(volumes):
'''
Extract host bindings, if any, from list of volume mapping strings.
:return: dictionary of bind mappings
'''
result = dict()
if volumes:
for vol in volumes:
host = None
if ':' in vol:
if len(vol.split(':')) == 3:
host, container, mode = vol.split(':')
if len(vol.split(':')) == 2:
parts = vol.split(':')
if parts[1] not in VOLUME_PERMISSIONS:
host, container, mode = (vol.split(':') + ['rw'])
if host is not None:
result[host] = dict(
bind=container,
mode=mode
)
return result
def _parse_exposed_ports(self, published_ports):
'''
Parse exposed ports from docker CLI-style ports syntax.
'''
exposed = []
if self.exposed_ports:
for port in self.exposed_ports:
port = str(port).strip()
protocol = 'tcp'
match = re.search(r'(/.+$)', port)
if match:
protocol = match.group(1).replace('/', '')
port = re.sub(r'/.+$', '', port)
exposed.append((port, protocol))
if published_ports:
# Any published port should also be exposed
for publish_port in published_ports:
match = False
if isinstance(publish_port, string_types) and '/' in publish_port:
port, protocol = publish_port.split('/')
port = int(port)
else:
protocol = 'tcp'
port = int(publish_port)
for exposed_port in exposed:
if isinstance(exposed_port[0], string_types) and '-' in exposed_port[0]:
start_port, end_port = exposed_port[0].split('-')
if int(start_port) <= port <= int(end_port):
match = True
elif exposed_port[0] == port:
match = True
if not match:
exposed.append((port, protocol))
return exposed
@staticmethod
def _parse_links(links):
'''
Turn links into a dictionary
'''
if links is None:
return None
result = []
for link in links:
parsed_link = link.split(':', 1)
if len(parsed_link) == 2:
result.append((parsed_link[0], parsed_link[1]))
else:
result.append((parsed_link[0], parsed_link[0]))
return result
def _parse_ulimits(self):
'''
Turn ulimits into an array of Ulimit objects
'''
if self.ulimits is None:
return None
results = []
for limit in self.ulimits:
limits = dict()
pieces = limit.split(':')
if len(pieces) >= 2:
limits['name'] = pieces[0]
limits['soft'] = int(pieces[1])
limits['hard'] = int(pieces[1])
if len(pieces) == 3:
limits['hard'] = int(pieces[2])
try:
results.append(Ulimit(**limits))
except ValueError as exc:
self.fail("Error parsing ulimits value %s - %s" % (limit, exc))
return results
def _parse_sysctls(self):
'''
Turn sysctls into an hash of Sysctl objects
'''
return self.sysctls
def _parse_log_config(self):
'''
Create a LogConfig object
'''
if self.log_driver is None:
return None
options = dict(
Type=self.log_driver,
Config=dict()
)
if self.log_options is not None:
options['Config'] = self.log_options
try:
return LogConfig(**options)
except ValueError as exc:
self.fail('Error parsing logging options - %s' % (exc))
def _parse_tmpfs(self):
'''
Turn tmpfs into a hash of Tmpfs objects
'''
result = dict()
if self.tmpfs is None:
return result
for tmpfs_spec in self.tmpfs:
split_spec = tmpfs_spec.split(":", 1)
if len(split_spec) > 1:
result[split_spec[0]] = split_spec[1]
else:
result[split_spec[0]] = ""
return result
def _get_environment(self):
"""
If environment file is combined with explicit environment variables, the explicit environment variables
take precedence.
"""
final_env = {}
if self.env_file:
parsed_env_file = utils.parse_env_file(self.env_file)
for name, value in parsed_env_file.items():
final_env[name] = str(value)
if self.env:
for name, value in self.env.items():
final_env[name] = str(value)
return final_env
def _get_network_id(self, network_name):
network_id = None
try:
for network in self.client.networks(names=[network_name]):
if network['Name'] == network_name:
network_id = network['Id']
break
except Exception as exc:
self.fail("Error getting network id for %s - %s" % (network_name, str(exc)))
return network_id
class Container(DockerBaseClass):
def __init__(self, container, parameters):
super(Container, self).__init__()
self.raw = container
self.Id = None
self.container = container
if container:
self.Id = container['Id']
self.Image = container['Image']
self.log(self.container, pretty_print=True)
self.parameters = parameters
self.parameters.expected_links = None
self.parameters.expected_ports = None
self.parameters.expected_exposed = None
self.parameters.expected_volumes = None
self.parameters.expected_ulimits = None
self.parameters.expected_sysctls = None
self.parameters.expected_etc_hosts = None
self.parameters.expected_env = None
def fail(self, msg):
self.parameters.client.module.fail_json(msg=msg)
@property
def exists(self):
return True if self.container else False
@property
def running(self):
if self.container and self.container.get('State'):
if self.container['State'].get('Running') and not self.container['State'].get('Ghost', False):
return True
return False
def has_different_configuration(self, image):
'''
Diff parameters vs existing container config. Returns tuple: (True | False, List of differences)
'''
self.log('Starting has_different_configuration')
self.parameters.expected_entrypoint = self._get_expected_entrypoint()
self.parameters.expected_links = self._get_expected_links()
self.parameters.expected_ports = self._get_expected_ports()
self.parameters.expected_exposed = self._get_expected_exposed(image)
self.parameters.expected_volumes = self._get_expected_volumes(image)
self.parameters.expected_binds = self._get_expected_binds(image)
self.parameters.expected_ulimits = self._get_expected_ulimits(self.parameters.ulimits)
self.parameters.expected_sysctls = self._get_expected_sysctls(self.parameters.sysctls)
self.parameters.expected_etc_hosts = self._convert_simple_dict_to_list('etc_hosts')
self.parameters.expected_env = self._get_expected_env(image)
self.parameters.expected_cmd = self._get_expected_cmd()
self.parameters.expected_devices = self._get_expected_devices()
if not self.container.get('HostConfig'):
self.fail("has_config_diff: Error parsing container properties. HostConfig missing.")
if not self.container.get('Config'):
self.fail("has_config_diff: Error parsing container properties. Config missing.")
if not self.container.get('NetworkSettings'):
self.fail("has_config_diff: Error parsing container properties. NetworkSettings missing.")
host_config = self.container['HostConfig']
log_config = host_config.get('LogConfig', dict())
restart_policy = host_config.get('RestartPolicy', dict())
config = self.container['Config']
network = self.container['NetworkSettings']
# The previous version of the docker module ignored the detach state by
# assuming if the container was running, it must have been detached.
detach = not (config.get('AttachStderr') and config.get('AttachStdout'))
# "ExposedPorts": null returns None type & causes AttributeError - PR #5517
if config.get('ExposedPorts') is not None:
expected_exposed = [re.sub(r'/.+$', '', p) for p in config.get('ExposedPorts', dict()).keys()]
else:
expected_exposed = []
# Map parameters to container inspect results
config_mapping = dict(
auto_remove=host_config.get('AutoRemove'),
expected_cmd=config.get('Cmd'),
domainname=config.get('Domainname'),
hostname=config.get('Hostname'),
user=config.get('User'),
detach=detach,
interactive=config.get('OpenStdin'),
capabilities=host_config.get('CapAdd'),
expected_devices=host_config.get('Devices'),
dns_servers=host_config.get('Dns'),
dns_opts=host_config.get('DnsOptions'),
dns_search_domains=host_config.get('DnsSearch'),
expected_env=(config.get('Env') or []),
expected_entrypoint=config.get('Entrypoint'),
expected_etc_hosts=host_config['ExtraHosts'],
expected_exposed=expected_exposed,
groups=host_config.get('GroupAdd'),
ipc_mode=host_config.get("IpcMode"),
labels=config.get('Labels'),
expected_links=host_config.get('Links'),
log_driver=log_config.get('Type'),
log_options=log_config.get('Config'),
mac_address=network.get('MacAddress'),
memory_swappiness=host_config.get('MemorySwappiness'),
network_mode=host_config.get('NetworkMode'),
userns_mode=host_config.get('UsernsMode'),
oom_killer=host_config.get('OomKillDisable'),
oom_score_adj=host_config.get('OomScoreAdj'),
pid_mode=host_config.get('PidMode'),
privileged=host_config.get('Privileged'),
expected_ports=host_config.get('PortBindings'),
read_only=host_config.get('ReadonlyRootfs'),
restart_policy=restart_policy.get('Name'),
restart_retries=restart_policy.get('MaximumRetryCount'),
# Cannot test shm_size, as shm_size is not included in container inspection results.
# shm_size=host_config.get('ShmSize'),
security_opts=host_config.get("SecurityOpt"),
stop_signal=config.get("StopSignal"),
tmpfs=host_config.get('Tmpfs'),
tty=config.get('Tty'),
expected_ulimits=host_config.get('Ulimits'),
expected_sysctls=host_config.get('Sysctls'),
uts=host_config.get('UTSMode'),
expected_volumes=config.get('Volumes'),
expected_binds=host_config.get('Binds'),
volumes_from=host_config.get('VolumesFrom'),
volume_driver=host_config.get('VolumeDriver'),
working_dir=host_config.get('WorkingDir')
)
differences = []
for key, value in config_mapping.items():
self.log('check differences %s %s vs %s' % (key, getattr(self.parameters, key), str(value)))
if getattr(self.parameters, key, None) is not None:
if isinstance(getattr(self.parameters, key), list) and isinstance(value, list):
if len(getattr(self.parameters, key)) > 0 and isinstance(getattr(self.parameters, key)[0], dict):
# compare list of dictionaries
self.log("comparing list of dict: %s" % key)
match = self._compare_dictionary_lists(getattr(self.parameters, key), value)
else:
# compare two lists. Is list_a in list_b?
self.log("comparing lists: %s" % key)
set_a = set(getattr(self.parameters, key))
set_b = set(value)
match = (set_b >= set_a)
elif isinstance(getattr(self.parameters, key), list) and not len(getattr(self.parameters, key)) \
and value is None:
# an empty list and None are ==
continue
elif isinstance(getattr(self.parameters, key), dict) and isinstance(value, dict):
# compare two dicts
self.log("comparing two dicts: %s" % key)
match = self._compare_dicts(getattr(self.parameters, key), value)
elif isinstance(getattr(self.parameters, key), dict) and \
not len(list(getattr(self.parameters, key).keys())) and value is None:
# an empty dict and None are ==
continue
else:
# primitive compare
self.log("primitive compare: %s" % key)
match = (getattr(self.parameters, key) == value)
if not match:
# no match. record the differences
item = dict()
item[key] = dict(
parameter=getattr(self.parameters, key),
container=value
)
differences.append(item)
has_differences = True if len(differences) > 0 else False
return has_differences, differences
def _compare_dictionary_lists(self, list_a, list_b):
'''
If all of list_a exists in list_b, return True
'''
if not isinstance(list_a, list) or not isinstance(list_b, list):
return False
matches = 0
for dict_a in list_a:
for dict_b in list_b:
if self._compare_dicts(dict_a, dict_b):
matches += 1
break
result = (matches == len(list_a))
return result
def _compare_dicts(self, dict_a, dict_b):
'''
If dict_a in dict_b, return True
'''
if not isinstance(dict_a, dict) or not isinstance(dict_b, dict):
return False
for key, value in dict_a.items():
if isinstance(value, dict):
match = self._compare_dicts(value, dict_b.get(key))
elif isinstance(value, list):
if len(value) > 0 and isinstance(value[0], dict):
match = self._compare_dictionary_lists(value, dict_b.get(key))
else:
set_a = set(value)
set_b = set(dict_b.get(key))
match = (set_a == set_b)
else:
match = (value == dict_b.get(key))
if not match:
return False
return True
def has_different_resource_limits(self):
'''
Diff parameters and container resource limits
'''
if not self.container.get('HostConfig'):
self.fail("limits_differ_from_container: Error parsing container properties. HostConfig missing.")
host_config = self.container['HostConfig']
config_mapping = dict(
cpu_period=host_config.get('CpuPeriod'),
cpu_quota=host_config.get('CpuQuota'),
cpuset_cpus=host_config.get('CpusetCpus'),
cpuset_mems=host_config.get('CpusetMems'),
cpu_shares=host_config.get('CpuShares'),
kernel_memory=host_config.get("KernelMemory"),
memory=host_config.get('Memory'),
memory_reservation=host_config.get('MemoryReservation'),
memory_swap=host_config.get('MemorySwap'),
oom_score_adj=host_config.get('OomScoreAdj'),
oom_killer=host_config.get('OomKillDisable'),
)
differences = []
for key, value in config_mapping.items():
if getattr(self.parameters, key, None) and getattr(self.parameters, key) != value:
# no match. record the differences
item = dict()
item[key] = dict(
parameter=getattr(self.parameters, key),
container=value
)
differences.append(item)
different = (len(differences) > 0)
return different, differences
def has_network_differences(self):
'''
Check if the container is connected to requested networks with expected options: links, aliases, ipv4, ipv6
'''
different = False
differences = []
if not self.parameters.networks:
return different, differences
if not self.container.get('NetworkSettings'):
self.fail("has_missing_networks: Error parsing container properties. NetworkSettings missing.")
connected_networks = self.container['NetworkSettings']['Networks']
for network in self.parameters.networks:
if connected_networks.get(network['name'], None) is None:
different = True
differences.append(dict(
parameter=network,
container=None
))
else:
diff = False
if network.get('ipv4_address') and network['ipv4_address'] != connected_networks[network['name']].get('IPAddress'):
diff = True
if network.get('ipv6_address') and network['ipv6_address'] != connected_networks[network['name']].get('GlobalIPv6Address'):
diff = True
if network.get('aliases') and not connected_networks[network['name']].get('Aliases'):
diff = True
if network.get('aliases') and connected_networks[network['name']].get('Aliases'):
for alias in network.get('aliases'):
if alias not in connected_networks[network['name']].get('Aliases', []):
diff = True
if network.get('links') and not connected_networks[network['name']].get('Links'):
diff = True
if network.get('links') and connected_networks[network['name']].get('Links'):
expected_links = []
for link, alias in network['links']:
expected_links.append("%s:%s" % (link, alias))
for link in expected_links:
if link not in connected_networks[network['name']].get('Links', []):
diff = True
if diff:
different = True
differences.append(dict(
parameter=network,
container=dict(
name=network['name'],
ipv4_address=connected_networks[network['name']].get('IPAddress'),
ipv6_address=connected_networks[network['name']].get('GlobalIPv6Address'),
aliases=connected_networks[network['name']].get('Aliases'),
links=connected_networks[network['name']].get('Links')
)
))
return different, differences
def has_extra_networks(self):
'''
Check if the container is connected to non-requested networks
'''
extra_networks = []
extra = False
if not self.container.get('NetworkSettings'):
self.fail("has_extra_networks: Error parsing container properties. NetworkSettings missing.")
connected_networks = self.container['NetworkSettings'].get('Networks')
if connected_networks:
for network, network_config in connected_networks.items():
keep = False
if self.parameters.networks:
for expected_network in self.parameters.networks:
if expected_network['name'] == network:
keep = True
if not keep:
extra = True
extra_networks.append(dict(name=network, id=network_config['NetworkID']))
return extra, extra_networks
def _get_expected_devices(self):
if not self.parameters.devices:
return None
expected_devices = []
for device in self.parameters.devices:
parts = device.split(':')
if len(parts) == 1:
expected_devices.append(
dict(
CgroupPermissions='rwm',
PathInContainer=parts[0],
PathOnHost=parts[0]
))
elif len(parts) == 2:
parts = device.split(':')
expected_devices.append(
dict(
CgroupPermissions='rwm',
PathInContainer=parts[1],
PathOnHost=parts[0]
)
)
else:
expected_devices.append(
dict(
CgroupPermissions=parts[2],
PathInContainer=parts[1],
PathOnHost=parts[0]
))
return expected_devices
def _get_expected_entrypoint(self):
if not self.parameters.entrypoint:
return None
return shlex.split(self.parameters.entrypoint)
def _get_expected_ports(self):
if not self.parameters.published_ports:
return None
expected_bound_ports = {}
for container_port, config in self.parameters.published_ports.items():
if isinstance(container_port, int):
container_port = "%s/tcp" % container_port
if len(config) == 1:
if isinstance(config[0], int):
expected_bound_ports[container_port] = [{'HostIp': "0.0.0.0", 'HostPort': config[0]}]
else:
expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': ""}]
elif isinstance(config[0], tuple):
expected_bound_ports[container_port] = []
for host_ip, host_port in config:
expected_bound_ports[container_port].append({'HostIp': host_ip, 'HostPort': str(host_port)})
else:
expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': str(config[1])}]
return expected_bound_ports
def _get_expected_links(self):
if self.parameters.links is None:
return None
self.log('parameter links:')
self.log(self.parameters.links, pretty_print=True)
exp_links = []
for link, alias in self.parameters.links:
exp_links.append("/%s:%s/%s" % (link, ('/' + self.parameters.name), alias))
return exp_links
def _get_expected_binds(self, image):
self.log('_get_expected_binds')
image_vols = []
if image:
image_vols = self._get_image_binds(image['ContainerConfig'].get('Volumes'))
param_vols = []
if self.parameters.volumes:
for vol in self.parameters.volumes:
host = None
if ':' in vol:
if len(vol.split(':')) == 3:
host, container, mode = vol.split(':')
if len(vol.split(':')) == 2:
parts = vol.split(':')
if parts[1] not in VOLUME_PERMISSIONS:
host, container, mode = vol.split(':') + ['rw']
if host:
param_vols.append("%s:%s:%s" % (host, container, mode))
result = list(set(image_vols + param_vols))
self.log("expected_binds:")
self.log(result, pretty_print=True)
return result
def _get_image_binds(self, volumes):
'''
Convert array of binds to array of strings with format host_path:container_path:mode
:param volumes: array of bind dicts
:return: array of strings
'''
results = []
if isinstance(volumes, dict):
results += self._get_bind_from_dict(volumes)
elif isinstance(volumes, list):
for vol in volumes:
results += self._get_bind_from_dict(vol)
return results
@staticmethod
def _get_bind_from_dict(volume_dict):
results = []
if volume_dict:
for host_path, config in volume_dict.items():
if isinstance(config, dict) and config.get('bind'):
container_path = config.get('bind')
mode = config.get('mode', 'rw')
results.append("%s:%s:%s" % (host_path, container_path, mode))
return results
def _get_expected_volumes(self, image):
self.log('_get_expected_volumes')
expected_vols = dict()
if image and image['ContainerConfig'].get('Volumes'):
expected_vols.update(image['ContainerConfig'].get('Volumes'))
if self.parameters.volumes:
for vol in self.parameters.volumes:
container = None
if ':' in vol:
if len(vol.split(':')) == 3:
host, container, mode = vol.split(':')
if len(vol.split(':')) == 2:
parts = vol.split(':')
if parts[1] not in VOLUME_PERMISSIONS:
host, container, mode = vol.split(':') + ['rw']
new_vol = dict()
if container:
new_vol[container] = dict()
else:
new_vol[vol] = dict()
expected_vols.update(new_vol)
if not expected_vols:
expected_vols = None
self.log("expected_volumes:")
self.log(expected_vols, pretty_print=True)
return expected_vols
def _get_expected_env(self, image):
self.log('_get_expected_env')
expected_env = dict()
if image and image['ContainerConfig'].get('Env'):
for env_var in image['ContainerConfig']['Env']:
parts = env_var.split('=', 1)
expected_env[parts[0]] = parts[1]
if self.parameters.env:
expected_env.update(self.parameters.env)
param_env = []
for key, value in expected_env.items():
param_env.append("%s=%s" % (key, value))
return param_env
def _get_expected_exposed(self, image):
self.log('_get_expected_exposed')
image_ports = []
if image:
image_ports = [re.sub(r'/.+$', '', p) for p in (image['ContainerConfig'].get('ExposedPorts') or {}).keys()]
param_ports = []
if self.parameters.ports:
param_ports = [str(p[0]) for p in self.parameters.ports]
result = list(set(image_ports + param_ports))
self.log(result, pretty_print=True)
return result
def _get_expected_ulimits(self, config_ulimits):
self.log('_get_expected_ulimits')
if config_ulimits is None:
return None
results = []
for limit in config_ulimits:
results.append(dict(
Name=limit.name,
Soft=limit.soft,
Hard=limit.hard
))
return results
def _get_expected_sysctls(self, config_sysctls):
self.log('_get_expected_sysctls')
if config_sysctls is None:
return None
result = dict()
for key, value in config_sysctls.items():
result[key] = str(value)
return result
def _get_expected_cmd(self):
self.log('_get_expected_cmd')
if not self.parameters.command:
return None
return shlex.split(self.parameters.command)
def _convert_simple_dict_to_list(self, param_name, join_with=':'):
if getattr(self.parameters, param_name, None) is None:
return None
results = []
for key, value in getattr(self.parameters, param_name).items():
results.append("%s%s%s" % (key, join_with, value))
return results
class ContainerManager(DockerBaseClass):
'''
Perform container management tasks
'''
def __init__(self, client):
super(ContainerManager, self).__init__()
self.client = client
self.parameters = TaskParameters(client)
self.check_mode = self.client.check_mode
self.results = {'changed': False, 'actions': []}
self.diff = {}
self.facts = {}
state = self.parameters.state
if state in ('stopped', 'started', 'present'):
self.present(state)
elif state == 'absent':
self.absent()
if not self.check_mode and not self.parameters.debug:
self.results.pop('actions')
if self.client.module._diff or self.parameters.debug:
self.results['diff'] = self.diff
if self.facts:
self.results['ansible_facts'] = {'docker_container': self.facts}
def present(self, state):
container = self._get_container(self.parameters.name)
image = self._get_image()
self.log(image, pretty_print=True)
if not container.exists:
# New container
self.log('No container found')
new_container = self.container_create(self.parameters.image, self.parameters.create_parameters)
if new_container:
container = new_container
else:
# Existing container
different, differences = container.has_different_configuration(image)
image_different = False
if not self.parameters.ignore_image:
image_different = self._image_is_different(image, container)
if image_different or different or self.parameters.recreate:
self.diff['differences'] = differences
if image_different:
self.diff['image_different'] = True
self.log("differences")
self.log(differences, pretty_print=True)
if container.running:
self.container_stop(container.Id)
self.container_remove(container.Id)
new_container = self.container_create(self.parameters.image, self.parameters.create_parameters)
if new_container:
container = new_container
if container and container.exists:
container = self.update_limits(container)
container = self.update_networks(container)
if state == 'started' and not container.running:
container = self.container_start(container.Id)
elif state == 'started' and self.parameters.restart:
self.container_stop(container.Id)
container = self.container_start(container.Id)
elif state == 'stopped' and container.running:
self.container_stop(container.Id)
container = self._get_container(container.Id)
self.facts = container.raw
def absent(self):
container = self._get_container(self.parameters.name)
if container.exists:
if container.running:
self.container_stop(container.Id)
self.container_remove(container.Id)
def fail(self, msg, **kwargs):
self.client.module.fail_json(msg=msg, **kwargs)
def _get_container(self, container):
'''
Expects container ID or Name. Returns a container object
'''
return Container(self.client.get_container(container), self.parameters)
def _get_image(self):
if not self.parameters.image:
self.log('No image specified')
return None
repository, tag = utils.parse_repository_tag(self.parameters.image)
if not tag:
tag = "latest"
image = self.client.find_image(repository, tag)
if not self.check_mode:
if not image or self.parameters.pull:
self.log("Pull the image.")
image, alreadyToLatest = self.client.pull_image(repository, tag)
if alreadyToLatest:
self.results['changed'] = False
else:
self.results['changed'] = True
self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag)))
self.log("image")
self.log(image, pretty_print=True)
return image
def _image_is_different(self, image, container):
if image and image.get('Id'):
if container and container.Image:
if image.get('Id') != container.Image:
return True
return False
def update_limits(self, container):
limits_differ, different_limits = container.has_different_resource_limits()
if limits_differ:
self.log("limit differences:")
self.log(different_limits, pretty_print=True)
if limits_differ and not self.check_mode:
self.container_update(container.Id, self.parameters.update_parameters)
return self._get_container(container.Id)
return container
def update_networks(self, container):
has_network_differences, network_differences = container.has_network_differences()
updated_container = container
if has_network_differences:
if self.diff.get('differences'):
self.diff['differences'].append(dict(network_differences=network_differences))
else:
self.diff['differences'] = [dict(network_differences=network_differences)]
self.results['changed'] = True
updated_container = self._add_networks(container, network_differences)
if self.parameters.purge_networks:
has_extra_networks, extra_networks = container.has_extra_networks()
if has_extra_networks:
if self.diff.get('differences'):
self.diff['differences'].append(dict(purge_networks=extra_networks))
else:
self.diff['differences'] = [dict(purge_networks=extra_networks)]
self.results['changed'] = True
updated_container = self._purge_networks(container, extra_networks)
return updated_container
def _add_networks(self, container, differences):
for diff in differences:
# remove the container from the network, if connected
if diff.get('container'):
self.results['actions'].append(dict(removed_from_network=diff['parameter']['name']))
if not self.check_mode:
try:
self.client.disconnect_container_from_network(container.Id, diff['parameter']['id'])
except Exception as exc:
self.fail("Error disconnecting container from network %s - %s" % (diff['parameter']['name'],
str(exc)))
# connect to the network
params = dict(
ipv4_address=diff['parameter'].get('ipv4_address', None),
ipv6_address=diff['parameter'].get('ipv6_address', None),
links=diff['parameter'].get('links', None),
aliases=diff['parameter'].get('aliases', None)
)
self.results['actions'].append(dict(added_to_network=diff['parameter']['name'], network_parameters=params))
if not self.check_mode:
try:
self.log("Connecting container to network %s" % diff['parameter']['id'])
self.log(params, pretty_print=True)
self.client.connect_container_to_network(container.Id, diff['parameter']['id'], **params)
except Exception as exc:
self.fail("Error connecting container to network %s - %s" % (diff['parameter']['name'], str(exc)))
return self._get_container(container.Id)
def _purge_networks(self, container, networks):
for network in networks:
self.results['actions'].append(dict(removed_from_network=network['name']))
if not self.check_mode:
try:
self.client.disconnect_container_from_network(container.Id, network['name'])
except Exception as exc:
self.fail("Error disconnecting container from network %s - %s" % (network['name'],
str(exc)))
return self._get_container(container.Id)
def container_create(self, image, create_parameters):
self.log("create container")
self.log("image: %s parameters:" % image)
self.log(create_parameters, pretty_print=True)
self.results['actions'].append(dict(created="Created container", create_parameters=create_parameters))
self.results['changed'] = True
new_container = None
if not self.check_mode:
try:
new_container = self.client.create_container(image, **create_parameters)
except Exception as exc:
self.fail("Error creating container: %s" % str(exc))
return self._get_container(new_container['Id'])
return new_container
def container_start(self, container_id):
self.log("start container %s" % (container_id))
self.results['actions'].append(dict(started=container_id))
self.results['changed'] = True
if not self.check_mode:
try:
self.client.start(container=container_id)
except Exception as exc:
self.fail("Error starting container %s: %s" % (container_id, str(exc)))
if not self.parameters.detach:
status = self.client.wait(container_id)
config = self.client.inspect_container(container_id)
logging_driver = config['HostConfig']['LogConfig']['Type']
if logging_driver == 'json-file' or logging_driver == 'journald':
output = self.client.logs(container_id, stdout=True, stderr=True, stream=False, timestamps=False)
else:
output = "Result logged using `%s` driver" % logging_driver
if status != 0:
self.fail(output, status=status)
if self.parameters.cleanup:
self.container_remove(container_id, force=True)
insp = self._get_container(container_id)
if insp.raw:
insp.raw['Output'] = output
else:
insp.raw = dict(Output=output)
return insp
return self._get_container(container_id)
def container_remove(self, container_id, link=False, force=False):
volume_state = (not self.parameters.keep_volumes)
self.log("remove container container:%s v:%s link:%s force%s" % (container_id, volume_state, link, force))
self.results['actions'].append(dict(removed=container_id, volume_state=volume_state, link=link, force=force))
self.results['changed'] = True
response = None
if not self.check_mode:
try:
response = self.client.remove_container(container_id, v=volume_state, link=link, force=force)
except Exception as exc:
self.fail("Error removing container %s: %s" % (container_id, str(exc)))
return response
def container_update(self, container_id, update_parameters):
if update_parameters:
self.log("update container %s" % (container_id))
self.log(update_parameters, pretty_print=True)
self.results['actions'].append(dict(updated=container_id, update_parameters=update_parameters))
self.results['changed'] = True
if not self.check_mode and callable(getattr(self.client, 'update_container')):
try:
self.client.update_container(container_id, **update_parameters)
except Exception as exc:
self.fail("Error updating container %s: %s" % (container_id, str(exc)))
return self._get_container(container_id)
def container_kill(self, container_id):
self.results['actions'].append(dict(killed=container_id, signal=self.parameters.kill_signal))
self.results['changed'] = True
response = None
if not self.check_mode:
try:
if self.parameters.kill_signal:
response = self.client.kill(container_id, signal=self.parameters.kill_signal)
else:
response = self.client.kill(container_id)
except Exception as exc:
self.fail("Error killing container %s: %s" % (container_id, exc))
return response
def container_stop(self, container_id):
if self.parameters.force_kill:
self.container_kill(container_id)
return
self.results['actions'].append(dict(stopped=container_id, timeout=self.parameters.stop_timeout))
self.results['changed'] = True
response = None
if not self.check_mode:
try:
if self.parameters.stop_timeout:
response = self.client.stop(container_id, timeout=self.parameters.stop_timeout)
else:
response = self.client.stop(container_id)
except Exception as exc:
self.fail("Error stopping container %s: %s" % (container_id, str(exc)))
return response
def main():
argument_spec = dict(
auto_remove=dict(type='bool', default=False),
blkio_weight=dict(type='int'),
capabilities=dict(type='list'),
cleanup=dict(type='bool', default=False),
command=dict(type='raw'),
cpu_period=dict(type='int'),
cpu_quota=dict(type='int'),
cpuset_cpus=dict(type='str'),
cpuset_mems=dict(type='str'),
cpu_shares=dict(type='int'),
detach=dict(type='bool', default=True),
devices=dict(type='list'),
dns_servers=dict(type='list'),
dns_opts=dict(type='list'),
dns_search_domains=dict(type='list'),
domainname=dict(type='str'),
env=dict(type='dict'),
env_file=dict(type='path'),
entrypoint=dict(type='list'),
etc_hosts=dict(type='dict'),
exposed_ports=dict(type='list', aliases=['exposed', 'expose']),
force_kill=dict(type='bool', default=False, aliases=['forcekill']),
groups=dict(type='list'),
hostname=dict(type='str'),
ignore_image=dict(type='bool', default=False),
image=dict(type='str'),
interactive=dict(type='bool', default=False),
ipc_mode=dict(type='str'),
keep_volumes=dict(type='bool', default=True),
kernel_memory=dict(type='str'),
kill_signal=dict(type='str'),
labels=dict(type='dict'),
links=dict(type='list'),
log_driver=dict(type='str',
choices=['none', 'json-file', 'syslog', 'journald', 'gelf', 'fluentd', 'awslogs', 'splunk'],
default=None),
log_options=dict(type='dict', aliases=['log_opt']),
mac_address=dict(type='str'),
memory=dict(type='str', default='0'),
memory_reservation=dict(type='str'),
memory_swap=dict(type='str'),
memory_swappiness=dict(type='int'),
name=dict(type='str', required=True),
network_mode=dict(type='str'),
userns_mode=dict(type='str'),
networks=dict(type='list'),
oom_killer=dict(type='bool'),
oom_score_adj=dict(type='int'),
paused=dict(type='bool', default=False),
pid_mode=dict(type='str'),
privileged=dict(type='bool', default=False),
published_ports=dict(type='list', aliases=['ports']),
pull=dict(type='bool', default=False),
purge_networks=dict(type='bool', default=False),
read_only=dict(type='bool', default=False),
recreate=dict(type='bool', default=False),
restart=dict(type='bool', default=False),
restart_policy=dict(type='str', choices=['no', 'on-failure', 'always', 'unless-stopped']),
restart_retries=dict(type='int', default=None),
shm_size=dict(type='str'),
security_opts=dict(type='list'),
state=dict(type='str', choices=['absent', 'present', 'started', 'stopped'], default='started'),
stop_signal=dict(type='str'),
stop_timeout=dict(type='int'),
tmpfs=dict(type='list'),
trust_image_content=dict(type='bool', default=False),
tty=dict(type='bool', default=False),
ulimits=dict(type='list'),
sysctls=dict(type='dict'),
user=dict(type='str'),
uts=dict(type='str'),
volumes=dict(type='list'),
volumes_from=dict(type='list'),
volume_driver=dict(type='str'),
working_dir=dict(type='str'),
)
required_if = [
('state', 'present', ['image'])
]
client = AnsibleDockerClient(
argument_spec=argument_spec,
required_if=required_if,
supports_check_mode=True
)
if not HAS_DOCKER_PY_2 and client.module.params.get('auto_remove'):
client.module.fail_json(msg="'auto_remove' is not compatible with docker-py, and requires the docker python module")
cm = ContainerManager(client)
client.module.exit_json(**cm.results)
if __name__ == '__main__':
main()
|
gpl-3.0
|
mcardacci/tools_of_the_dark_arts
|
droopescan/dscan/common/functions.py
|
2
|
10543
|
from __future__ import print_function
from collections import OrderedDict
from dscan.common.enum import colors, ScanningMethod
try:
from requests.exceptions import ConnectionError, ReadTimeout, ConnectTimeout, \
TooManyRedirects
except:
old_req = """Running a very old version of requests! Please `pip
install -U requests`."""
print(old_req)
import dscan
import hashlib
import pystache
import re
import sys
import traceback
import xml.etree.ElementTree as ET
SPLIT_PATTERN = re.compile('[ \t]+')
def repair_url(url):
"""
Fixes URL.
@param url: url to repair.
@param out: instance of StandardOutput as defined in this lib.
@return: Newline characters are stripped from the URL string.
If the url string parameter does not start with http, it prepends http://
If the url string parameter does not end with a slash, appends a slash.
If the url contains a query string, it gets removed.
"""
url = url.strip('\n')
if not re.match(r"^http", url):
url = "http://" + url
if "?" in url:
url, _ = url.split('?')
if not url.endswith("/"):
return url + "/"
else :
return url
def in_enum(string, enum):
return string in enum.__dict__
def enum_list(enum):
methods = []
for method in enum.__dict__:
if not method.startswith("_"):
methods.append(method)
return methods
def base_url(url):
"""
@param url: the url to get the base of.
@return: the protocol, domain and port of a URL, concatenated. If the
URL is relative, False is returned.
"""
if 'http' not in url:
return False
url_split = url.split("/")
return url_split[0] + "//" + url_split[2] + "/"
def scan_http_status(scanning_method):
if scanning_method == ScanningMethod.not_found:
return 404
elif scanning_method == ScanningMethod.forbidden:
return 403
elif scanning_method == ScanningMethod.ok:
return 200
raise RuntimeError("Unexpected argument to common.scan_method")
def template(template_file, variables={}):
variables.update(colors)
f = open(dscan.PWD + 'common/template/' + template_file, 'r')
template = f.read()
renderer = pystache.Renderer(search_dirs=dscan.PWD)
return renderer.render(template, variables)
def strip_whitespace(s):
return re.sub(r'\s+', ' ', s)
def is_string(var):
return isinstance(var, str)
def dict_combine(x, y):
z = x.copy()
z.update(y)
return z
def file_len(fname):
i = 0
with open(fname) as f:
for l in f:
i += 1
return i
def strip_letters(string):
return ''.join([c for c in str(string) if c in '1234567890.-_'])
def version_gt(version, gt):
"""
Code for parsing simple, numeric versions. Letters will be stripped prior to
comparison. Simple appendages such as 1-rc1 are supported. Test cases for
function are present on dscan/tests/fingerprint_tests.py
"""
version_split = strip_letters(version).split('.')
gt_split = strip_letters(gt).split('.')
v_len = len(version_split)
g_len = len(gt_split)
if v_len > g_len:
longest = version_split
shortest_len = len(gt_split)
l = v_len
else:
longest = gt_split
shortest_len = len(version_split)
l = g_len
gt = False
for i in range(l):
overcame_shortest = i >= shortest_len
if not overcame_shortest:
v = version_split[i]
g = gt_split[i]
v_is_rc = '-' in v or '_' in v
g_is_rc = '-' in g or '_' in g
if v_is_rc:
v_split = re.split(r'[-_]', v)
v = v_split[0]
try:
v_rc_nb = int(''.join(v_split[1:]))
except ValueError:
v_rc_nb = 0
if g_is_rc:
g_split = re.split(r'[-_]', g)
g = g_split[0]
try:
g_rc_nb = int(''.join(g_split[1:]))
except ValueError:
g_rc_nb = 0
try:
v = int(v)
except ValueError:
v = 0
try:
g = int(g)
except ValueError:
g = 0
if v > g:
gt = True
break
elif v < g:
break
else:
if not v_is_rc and g_is_rc:
gt = True
break
elif v_is_rc and not g_is_rc:
break
elif v_is_rc and g_is_rc:
if v_rc_nb > g_rc_nb:
gt = True
break
elif v_rc_nb < g_rc_nb:
break
else:
nb = longest[i]
is_rc = '-' in nb or '_' in nb
if is_rc:
nb = re.split(r'[-_]', nb)[0]
try:
nb_int = int(nb)
except ValueError:
if longest == version_split:
break
else:
gt = True
break
if nb_int > 0:
if longest == version_split:
gt = True
break
else:
break
return gt
def md5_file(filename):
return hashlib.md5(open(filename).read()).hexdigest()
def version_get():
"""
Returns current droopescan version. Not. It was broken and not a useful
feature, so I replaced it with a way more elite version.
"""
version = '1.33.7'
return version
def error(msg):
raise RuntimeError('\033[91m%s\033[0m' % msg)
def exc_handle(url, out, testing):
"""
Handle exception. If of a determinate subset, it is stored into a file as a
single type. Otherwise, full stack is stored. Furthermore, if testing, stack
is always shown.
@param url: url which was being scanned when exception was thrown.
@param out: Output object, usually self.out.
@param testing: whether we are currently running unit tests.
"""
quiet_exceptions = [ConnectionError, ReadTimeout, ConnectTimeout,
TooManyRedirects]
type, value, _ = sys.exc_info()
if type not in quiet_exceptions or testing:
exc = traceback.format_exc()
exc_string = ("Line '%s' raised:\n" % url) + exc
out.warn(exc_string, whitespace_strp=False)
if testing:
print(exc)
else:
exc_string = "Line %s '%s: %s'" % (url, type, value)
out.warn(exc_string)
def tail(f, window=20):
"""
Returns the last `window` lines of file `f` as a list.
@param window: the number of lines.
"""
if window == 0:
return []
BUFSIZ = 1024
f.seek(0, 2)
bytes = f.tell()
size = window + 1
block = -1
data = []
while size > 0 and bytes > 0:
if bytes - BUFSIZ > 0:
# Seek back one whole BUFSIZ
f.seek(block * BUFSIZ, 2)
# read BUFFER
data.insert(0, f.read(BUFSIZ).decode('utf-8', errors='ignore'))
else:
# file too small, start from begining
f.seek(0,0)
# only read what was not read
data.insert(0, f.read(bytes).decode('utf-8', errors='ignore'))
linesFound = data[0].count('\n')
size -= linesFound
bytes -= BUFSIZ
block -= 1
return ''.join(data).splitlines()[-window:]
def _line_contains_host(url):
return re.search(SPLIT_PATTERN, url)
def process_host_line(line):
"""
Processes a line and determines whether it is a tab-delimited CSV of
url and host.
Strips all strings.
@param line: the line to analyse.
@param opts: the options dictionary to modify.
@return: a tuple containing url, and host header if any change is
required. Otherwise, line, null is returned.
"""
if not line:
return None, None
host = None
if _line_contains_host(line):
url, host = re.split(SPLIT_PATTERN, line.strip())
else:
url = line.strip()
return url, host
def instances_get(opts, plugins, url_file_input, out):
"""
Creates and returns an ordered dictionary containing instances for all available
scanning plugins, sort of ordered by popularity.
@param opts: options as returned by self._options.
@param plugins: plugins as returned by plugins_util.plugins_base_get.
@param url_file_input: boolean value which indicates whether we are
scanning an individual URL or a file. This is used to determine
kwargs required.
@param out: self.out
"""
instances = OrderedDict()
preferred_order = ['wordpress', 'joomla', 'drupal']
for cms_name in preferred_order:
for plugin in plugins:
plugin_name = plugin.__name__.lower()
if cms_name == plugin_name:
instances[plugin_name] = instance_get(plugin, opts,
url_file_input, out)
for plugin in plugins:
plugin_name = plugin.__name__.lower()
if plugin_name not in preferred_order:
instances[plugin_name] = instance_get(plugin, opts,
url_file_input, out)
return instances
def instance_get(plugin, opts, url_file_input, out):
"""
Return an instance dictionary for an individual plugin.
@see Scan._instances_get.
"""
inst = plugin()
hp, func, enabled_func = inst._general_init(opts, out)
name = inst._meta.label
kwargs = {
'hide_progressbar': hp,
'functionality': func,
'enabled_functionality': enabled_func
}
if url_file_input:
del kwargs['hide_progressbar']
return {
'inst': inst,
'kwargs': kwargs
}
def result_anything_found(result):
"""
Interim solution for the fact that sometimes determine_scanning_method can
legitimately return a valid scanning method, but it results that the site
does not belong to a particular CMS.
@param result: the result as passed to Output.result()
@return: whether anything was found.
"""
keys = ['version', 'themes', 'plugins', 'interesting urls']
anything_found = False
for k in keys:
if k not in result:
continue
else:
if not result[k]['is_empty']:
anything_found = True
return anything_found
|
gpl-3.0
|
felipenaselva/felipe.repository
|
script.module.streamhublive/resources/modules/js2py/pyjs.py
|
29
|
1884
|
from base import *
from constructors.jsmath import Math
from constructors.jsdate import Date
from constructors.jsobject import Object
from constructors.jsfunction import Function
from constructors.jsstring import String
from constructors.jsnumber import Number
from constructors.jsboolean import Boolean
from constructors.jsregexp import RegExp
from constructors.jsarray import Array
from prototypes.jsjson import JSON
from host.console import console
from host.jseval import Eval
from host.jsfunctions import parseFloat, parseInt, isFinite, isNaN
# Now we have all the necessary items to create global environment for script
__all__ = ['Js', 'PyJsComma', 'PyJsStrictEq', 'PyJsStrictNeq',
'PyJsException', 'PyJsBshift', 'Scope', 'PyExceptionToJs',
'JsToPyException', 'JS_BUILTINS', 'appengine', 'set_global_object',
'JsRegExp', 'PyJsException', 'PyExceptionToJs', 'JsToPyException', 'PyJsSwitchException']
# these were defined in base.py
builtins = ('true','false','null','undefined','Infinity',
'NaN', 'console', 'String', 'Number', 'Boolean', 'RegExp',
'Math', 'Date', 'Object', 'Function', 'Array',
'parseFloat', 'parseInt', 'isFinite', 'isNaN')
#Array, Function, JSON, Error is done later :)
# also some built in functions like eval...
def set_global_object(obj):
obj.IS_CHILD_SCOPE = False
this = This({})
this.own = obj.own
this.prototype = obj.prototype
PyJs.GlobalObject = this
# make this available
obj.register('this')
obj.put('this', this)
scope = dict(zip(builtins, [globals()[e] for e in builtins]))
# Now add errors:
for name, error in ERRORS.iteritems():
scope[name] = error
#add eval
scope['eval'] = Eval
scope['JSON'] = JSON
JS_BUILTINS = {}
#k:v for k,v in scope.iteritems()
for k,v in scope.iteritems():
JS_BUILTINS[k] = v
|
gpl-2.0
|
diox/app-validator
|
appvalidator/testcases/javascript/instanceactions.py
|
5
|
3341
|
import jstypes
import utils
from appvalidator.constants import BUGZILLA_BUG
from appvalidator.csp import warn
from .instanceproperties import _set_HTML_property
def createElement(args, traverser, wrapper):
"""Handles createElement calls"""
if not args:
return
first_as_str = utils.get_as_str(args[0].get_literal_value(traverser))
if first_as_str.lower() == u"script":
_create_script_tag(traverser)
elif not isinstance(args[0], jstypes.JSLiteral):
_create_variable_element(traverser)
def createElementNS(args, traverser, wrapper):
"""Handles createElementNS calls"""
if not args or len(args) < 2:
return
second_as_str = utils.get_as_str(args[1].get_literal_value(traverser))
if "script" in second_as_str.lower():
_create_script_tag(traverser)
elif not isinstance(args[1], jstypes.JSLiteral):
_create_variable_element(traverser)
def _create_script_tag(traverser):
"""Raises a warning that the dev is creating a script tag"""
warn(traverser.err,
filename=traverser.filename,
line=traverser.line,
column=traverser.position,
context=traverser.context,
violation_type="createElement-script")
def _create_variable_element(traverser):
"""Raises a warning that the dev is creating an arbitrary element"""
warn(traverser.err,
filename=traverser.filename,
line=traverser.line,
column=traverser.position,
context=traverser.context,
violation_type="createElement-variable")
def insertAdjacentHTML(args, traverser, wrapper):
"""
Perfrom the same tests on content inserted into the DOM via
insertAdjacentHTML as we otherwise would for content inserted via the
various innerHTML/outerHTML properties.
"""
if not args or len(args) < 2:
return
_set_HTML_property("insertAdjacentHTML", args[1], traverser)
def setAttribute(args, traverser, wrapper):
"""This ensures that setAttribute calls don't set on* attributes"""
if not args:
return
first_as_str = utils.get_as_str(args[0].get_literal_value(traverser))
if first_as_str.lower().startswith("on"):
warn(traverser.err,
filename=traverser.filename,
line=traverser.line,
column=traverser.position,
context=traverser.context,
violation_type="setAttribute-on")
def bind(args, traverser, wrapper):
"""This mimics the `Function.prototype.bind` method."""
if wrapper.callable and wrapper.TYPEOF == "function":
return wrapper # Just pass it through.
return jstypes.JSObject(traverser=traverser)
def feature(constant):
def wrap(args, traverser, wrapper):
traverser.log_feature(constant)
return wrap
INSTANCE_DEFINITIONS = {
u"bind": bind,
u"createElement": createElement,
u"createElementNS": createElementNS,
u"insertAdjacentHTML": insertAdjacentHTML,
u"setAttribute": setAttribute,
u"requestFullScreen": feature("FULLSCREEN"),
u"mozRequestFullScreen": feature("FULLSCREEN"),
u"webkitRequestFullScreen": feature("FULLSCREEN"),
u"requestPointerLock": feature("POINTER_LOCK"),
u"mozRequestPointerLock": feature("POINTER_LOCK"),
u"webkitRequestPointerLock": feature("POINTER_LOCK"),
}
|
bsd-3-clause
|
ProfessorX/Config
|
.PyCharm30/system/python_stubs/-1247972723/PyKDE4/kdecore/KConfigBase.py
|
1
|
1886
|
# encoding: utf-8
# module PyKDE4.kdecore
# from /usr/lib/python2.7/dist-packages/PyKDE4/kdecore.so
# by generator 1.135
# no doc
# imports
import PyQt4.QtCore as __PyQt4_QtCore
import PyQt4.QtNetwork as __PyQt4_QtNetwork
class KConfigBase(): # skipped bases: <type 'sip.wrapper'>
# no doc
def accessMode(self, *args, **kwargs): # real signature unknown
pass
def deleteGroup(self, *args, **kwargs): # real signature unknown
pass
def deleteGroupImpl(self, *args, **kwargs): # real signature unknown
pass
def group(self, *args, **kwargs): # real signature unknown
pass
def groupImpl(self, *args, **kwargs): # real signature unknown
pass
def groupList(self, *args, **kwargs): # real signature unknown
pass
def hasGroup(self, *args, **kwargs): # real signature unknown
pass
def hasGroupImpl(self, *args, **kwargs): # real signature unknown
pass
def isGroupImmutable(self, *args, **kwargs): # real signature unknown
pass
def isGroupImmutableImpl(self, *args, **kwargs): # real signature unknown
pass
def isImmutable(self, *args, **kwargs): # real signature unknown
pass
def markAsClean(self, *args, **kwargs): # real signature unknown
pass
def sync(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
AccessMode = None # (!) real value is ''
Global = 2
Localized = 4
NoAccess = 0
Normal = 1
Persistent = 1
ReadOnly = 1
ReadWrite = 2
WriteConfigFlag = None # (!) real value is ''
WriteConfigFlags = None # (!) real value is ''
|
gpl-2.0
|
bverburg/CouchPotatoServer
|
couchpotato/core/notifications/plex/server.py
|
31
|
5113
|
from datetime import timedelta, datetime
from urlparse import urlparse
import traceback
from couchpotato.core.helpers.variable import cleanHost
from couchpotato import CPLog
try:
import xml.etree.cElementTree as etree
except ImportError:
import xml.etree.ElementTree as etree
log = CPLog(__name__)
class PlexServer(object):
def __init__(self, plex):
self.plex = plex
self.clients = {}
self.last_clients_update = None
def staleClients(self):
if not self.last_clients_update:
return True
return self.last_clients_update + timedelta(minutes=15) < datetime.now()
def request(self, path, data_type='xml'):
if not self.plex.conf('media_server'):
log.warning("Plex media server hostname is required")
return None
if path.startswith('/'):
path = path[1:]
#Maintain support for older Plex installations without myPlex
if not self.plex.conf('auth_token') and not self.plex.conf('username') and not self.plex.conf('password'):
data = self.plex.urlopen('%s/%s' % (
self.createHost(self.plex.conf('media_server'), port = 32400),
path
))
else:
#Fetch X-Plex-Token if it doesn't exist but a username/password do
if not self.plex.conf('auth_token') and (self.plex.conf('username') and self.plex.conf('password')):
import urllib2, base64
log.info("Fetching a new X-Plex-Token from plex.tv")
username = self.plex.conf('username')
password = self.plex.conf('password')
req = urllib2.Request("https://plex.tv/users/sign_in.xml", data="")
authheader = "Basic %s" % base64.encodestring('%s:%s' % (username, password))[:-1]
req.add_header("Authorization", authheader)
req.add_header("X-Plex-Product", "Couchpotato Notifier")
req.add_header("X-Plex-Client-Identifier", "b3a6b24dcab2224bdb101fc6aa08ea5e2f3147d6")
req.add_header("X-Plex-Version", "1.0")
try:
response = urllib2.urlopen(req)
except urllib2.URLError, e:
log.info("Error fetching token from plex.tv")
try:
auth_tree = etree.parse(response)
token = auth_tree.findall(".//authentication-token")[0].text
self.plex.conf('auth_token', token)
except (ValueError, IndexError) as e:
log.info("Error parsing plex.tv response: " + ex(e))
#Add X-Plex-Token header for myPlex support workaround
data = self.plex.urlopen('%s/%s?X-Plex-Token=%s' % (
self.createHost(self.plex.conf('media_server'), port = 32400),
path,
self.plex.conf('auth_token')
))
if data_type == 'xml':
return etree.fromstring(data)
else:
return data
def updateClients(self, client_names):
log.info('Searching for clients on Plex Media Server')
self.clients = {}
result = self.request('clients')
if not result:
return
found_clients = [
c for c in result.findall('Server')
if c.get('name') and c.get('name').lower() in client_names
]
# Store client details in cache
for client in found_clients:
name = client.get('name').lower()
self.clients[name] = {
'name': client.get('name'),
'found': True,
'address': client.get('address'),
'port': client.get('port'),
'protocol': client.get('protocol', 'xbmchttp')
}
client_names.remove(name)
# Store dummy info for missing clients
for client_name in client_names:
self.clients[client_name] = {
'found': False
}
if len(client_names) > 0:
log.debug('Unable to find clients: %s', ', '.join(client_names))
self.last_clients_update = datetime.now()
def refresh(self, section_types=None):
if not section_types:
section_types = ['movie']
sections = self.request('library/sections')
try:
for section in sections.findall('Directory'):
if section.get('type') not in section_types:
continue
self.request('library/sections/%s/refresh' % section.get('key'), 'text')
except:
log.error('Plex library update failed for %s, Media Server not running: %s',
(self.plex.conf('media_server'), traceback.format_exc(1)))
return False
return True
def createHost(self, host, port = None):
h = cleanHost(host)
p = urlparse(h)
h = h.rstrip('/')
if port and not p.port:
h += ':%s' % port
return h
|
gpl-3.0
|
shakamunyi/ansible
|
lib/ansible/plugins/lookup/template.py
|
14
|
1887
|
# (c) 2012, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.template import Templar
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
if not isinstance(terms, list):
terms = [ terms ]
basedir = self.get_basedir(variables)
ret = []
templar = Templar(loader=self._loader, variables=variables)
for term in terms:
self._display.debug("File lookup term: %s" % term)
lookupfile = self._loader.path_dwim_relative(basedir, 'templates', term)
self._display.vvvv("File lookup using %s as file" % lookupfile)
if lookupfile and os.path.exists(lookupfile):
with open(lookupfile, 'r') as f:
template_data = f.read()
res = templar.template(template_data, preserve_trailing_newlines=True)
ret.append(res)
else:
raise AnsibleError("the template file %s could not be found for the lookup" % term)
return ret
|
gpl-3.0
|
UrusTeam/URUS
|
Tools/autotest/pysim/vehicleinfo.py
|
11
|
11250
|
class VehicleInfo(object):
def __init__(self):
"""
make_target: option passed to make to create binaries. Usually sitl, and "-debug" may be appended if -D is passed to sim_vehicle.py
default_params_filename: filename of default parameters file. Taken to be relative to autotest dir.
extra_mavlink_cmds: extra parameters that will be passed to mavproxy
"""
self.options = {
"ArduCopter": {
"default_frame": "quad",
"frames": {
# COPTER
"+": {
"waf_target": "bin/arducopter",
"default_params_filename": "default_params/copter.parm",
},
"quad": {
"model": "+",
"waf_target": "bin/arducopter",
"default_params_filename": "default_params/copter.parm",
},
"X": {
"waf_target": "bin/arducopter",
"default_params_filename": "default_params/copter.parm",
# this param set FRAME doesn't actually work because mavproxy
# won't set a parameter unless it knows of it, and the
# param fetch happens asynchronously
"extra_mavlink_cmds": "param fetch frame; param set FRAME 1;",
},
"hexa": {
"make_target": "sitl",
"waf_target": "bin/arducopter",
"default_params_filename": ["default_params/copter.parm",
"default_params/copter-hexa.parm" ],
},
"octa-quad": {
"make_target": "sitl",
"waf_target": "bin/arducopter",
"default_params_filename": ["default_params/copter.parm",
"default_params/copter-octaquad.parm" ],
},
"octa": {
"make_target": "sitl",
"waf_target": "bin/arducopter",
"default_params_filename": ["default_params/copter.parm",
"default_params/copter-octa.parm" ],
},
"tri": {
"make_target": "sitl",
"waf_target": "bin/arducopter",
"default_params_filename": ["default_params/copter.parm",
"default_params/copter-tri.parm" ],
},
"y6": {
"make_target": "sitl",
"waf_target": "bin/arducopter",
"default_params_filename": ["default_params/copter.parm",
"default_params/copter-y6.parm" ],
},
"dodeca-hexa": {
"make_target": "sitl",
"waf_target": "bin/arducopter",
"default_params_filename": ["default_params/copter.parm",
"default_params/copter-dodecahexa.parm" ],
},
# SIM
"IrisRos": {
"waf_target": "bin/arducopter",
"default_params_filename": "default_params/copter.parm",
},
"gazebo-iris": {
"waf_target": "bin/arducopter",
"default_params_filename": ["default_params/copter.parm",
"default_params/gazebo-iris.parm"],
},
# HELICOPTER
"heli": {
"make_target": "sitl-heli",
"waf_target": "bin/arducopter-heli",
"default_params_filename": "default_params/copter-heli.parm",
},
"heli-dual": {
"make_target": "sitl-heli-dual",
"waf_target": "bin/arducopter-heli",
"default_params_filename": ["default_params/copter-heli.parm",
"default_params/copter-heli-dual.parm"],
},
"heli-compound": {
"make_target": "sitl-heli-compound",
"waf_target": "bin/arducopter-heli",
},
"singlecopter": {
"make_target": "sitl",
"waf_target": "bin/arducopter",
"default_params_filename": "default_params/copter-single.parm",
},
"coaxcopter": {
"make_target": "sitl",
"waf_target": "bin/arducopter",
"default_params_filename": ["default_params/copter-single.parm",
"default_params/copter-coax.parm"],
},
"calibration": {
"extra_mavlink_cmds": "module load sitl_calibration;",
},
},
},
"ArduPlane": {
"default_frame": "plane",
"frames": {
# PLANE
"quadplane-tilttri": {
"make_target": "sitl",
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/quadplane-tilttri.parm",
},
"quadplane-tilttrivec": {
"make_target": "sitl",
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/quadplane-tilttrivec.parm",
},
"quadplane-tilthvec": {
"make_target": "sitl",
"waf_target": "bin/arduplane",
"default_params_filename": ["default_params/plane.parm", "default_params/quadplane-tilthvec.parm"],
},
"quadplane-tri": {
"make_target": "sitl",
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/quadplane-tri.parm",
},
"quadplane-cl84" : {
"make_target" : "sitl",
"waf_target" : "bin/arduplane",
"default_params_filename": "default_params/quadplane-cl84.parm",
},
"quadplane": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/quadplane.parm",
},
"firefly": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/firefly.parm",
},
"plane-elevon": {
"waf_target": "bin/arduplane",
"default_params_filename": ["default_params/plane.parm", "default_params/plane-elevons.parm"],
},
"plane-vtail": {
"waf_target": "bin/arduplane",
"default_params_filename": ["default_params/plane.parm", "default_params/plane-vtail.parm"],
},
"plane-tailsitter": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/plane-tailsitter.parm",
},
"plane": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/plane.parm",
},
"plane-dspoilers": {
"waf_target": "bin/arduplane",
"default_params_filename": ["default_params/plane.parm", "default_params/plane-dspoilers.parm"]
},
"gazebo-zephyr": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/gazebo-zephyr.parm",
},
"last_letter": {
"waf_target": "bin/arduplane",
},
"CRRCSim": {
"waf_target": "bin/arduplane",
},
"jsbsim": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/plane-jsbsim.parm",
},
"calibration": {
"extra_mavlink_cmds": "module load sitl_calibration;",
},
},
},
"APMrover2": {
"default_frame": "rover",
"frames": {
# ROVER
"rover": {
"waf_target": "bin/ardurover",
"default_params_filename": "default_params/rover.parm",
},
"rover-skid": {
"waf_target": "bin/ardurover",
"default_params_filename": ["default_params/rover.parm",
"default_params/rover-skid.parm"],
},
"gazebo-rover": {
"waf_target": "bin/ardurover",
"default_params_filename": ["default_params/rover.parm",
"default_params/rover-skid.parm"],
},
"calibration": {
"extra_mavlink_cmds": "module load sitl_calibration;",
},
},
},
"ArduSub": {
"default_frame": "vectored",
"frames": {
"vectored": {
"waf_target": "bin/ardusub",
"default_params_filename": "default_params/sub.parm",
},
"gazebo-bluerov2": {
"waf_target": "bin/ardusub",
"default_params_filename": "default_params/sub.parm",
},
},
},
"AntennaTracker": {
"default_frame": "tracker",
"frames": {
"tracker": {
"waf_target": "bin/antennatracker",
},
},
},
}
def default_frame(self, vehicle):
return self.options[vehicle]["default_frame"]
def default_waf_target(self, vehicle):
"""Returns a waf target based on vehicle type, which is often determined by which directory the user is in"""
default_frame = self.default_frame(vehicle)
return self.options[vehicle]["frames"][default_frame]["waf_target"]
def options_for_frame(self, frame, vehicle, opts):
"""Return informatiom about how to sitl for frame e.g. build-type==sitl"""
ret = None
frames = self.options[vehicle]["frames"]
if frame in frames:
ret = self.options[vehicle]["frames"][frame]
else:
for p in ["octa", "tri", "y6", "firefly", "heli", "gazebo", "last_letter", "jsbsim", "quadplane", "plane-elevon", "plane-vtail", "plane"]:
if frame.startswith(p):
ret = self.options[vehicle]["frames"][p]
break
if ret is None:
if frame.endswith("-heli"):
ret = self.options[vehicle]["frames"]["heli"]
if ret is None:
print("WARNING: no config for frame (%s)" % frame)
ret = {}
if "model" not in ret:
ret["model"] = frame
if "sitl-port" not in ret:
ret["sitl-port"] = True
if opts.model is not None:
ret["model"] = opts.model
if (ret["model"].find("xplane") != -1 or ret["model"].find("flightaxis") != -1):
ret["sitl-port"] = False
if "make_target" not in ret:
ret["make_target"] = "sitl"
if "waf_target" not in ret:
ret["waf_target"] = self.default_waf_target(vehicle)
if opts.build_target is not None:
ret["make_target"] = opts.build_target
ret["waf_target"] = opts.build_target
return ret
|
gpl-3.0
|
ihidalgo/uip-prog3
|
Parciales/practicas/kivy-designer-master/designer/project_loader.py
|
1
|
47244
|
import re
import os
import sys
import inspect
import time
import functools
import shutil
import imp
from six import exec_
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.button import Button
from kivy.base import runTouchApp
from kivy.factory import Factory, FactoryException
from kivy.properties import ObjectProperty
from kivy.lang import Builder
from kivy.uix.sandbox import Sandbox
from kivy.clock import Clock
from designer.helper_functions import get_indentation, get_indent_str,\
get_line_start_pos, get_kivy_designer_dir
from designer.proj_watcher import ProjectWatcher
PROJ_DESIGNER = '.designer'
KV_PROJ_FILE_NAME = os.path.join(PROJ_DESIGNER, 'kvproj')
PROJ_FILE_CONFIG = os.path.join(PROJ_DESIGNER, 'file_config.ini')
ignored_paths = ('.designer', '.buildozer', '.git', '__pycache__', 'bin', )
class Comment(object):
'''Comment is an Abstract class for representing a commentary.
'''
def __init__(self, string, path, _file):
super(Comment, self).__init__()
self.string = string
self.path = path
self.kv_file = _file
class WidgetRule(object):
'''WidgetRule is an Abstract class for representing a rule of Widget.
'''
def __init__(self, widget, parent):
super(WidgetRule, self).__init__()
self.name = widget
self.parent = parent
self.file = None
self.kv_file = None
self.module = None
class ClassRule(WidgetRule):
'''ClassRule is a class for representing a class rule in kv
'''
def __init__(self, class_name):
super(ClassRule, self).__init__(class_name, None)
class CustomWidgetRule(ClassRule):
'''CustomWidgetRule is a class for representing a custom widgets rule in kv
'''
def __init__(self, class_name, kv_file, py_file):
super(ClassRule, self).__init__(class_name, None)
self.class_name = class_name
self.kv_file = kv_file
self.py_file = py_file
class RootRule(ClassRule):
'''RootRule is a class for representing root rule in kv.
'''
def __init__(self, class_name, widget):
super(RootRule, self).__init__(class_name)
self.widget = widget
class ProjectLoaderException(Exception):
pass
class ProjectLoader(object):
'''ProjectLoader class, used to load Project
'''
def __init__(self, proj_watcher):
super(ProjectLoader, self).__init__()
self._dir_list = []
self.proj_watcher = proj_watcher
self.class_rules = []
self.root_rule = None
self.new_project = None
self.dict_file_type_and_path = {}
self.kv_file_list = []
self.kv_code_input = None
self.tab_pannel = None
self._root_rule = None
self.file_list = []
self.proj_dir = ""
self._is_root_already_in_factory = False
def update_file_list(self):
'''
Update and return the file_list object
'''
self.file_list = self._get_file_list(self.proj_dir)
return self.file_list
def _get_file_list(self, path):
'''This function is recursively called for loading all py file files
in the current directory.
'''
file_list = []
for ignored in ignored_paths:
if ignored in path:
return []
if os.path.isfile(path):
path = os.path.dirname(path)
sys.path.insert(0, path)
self._dir_list.append(path)
for _file in os.listdir(path):
file_path = os.path.join(path, _file)
if os.path.isdir(file_path):
file_list += self._get_file_list(file_path)
else:
# Consider only kv, py and buildozer(spec) files
if file_path[file_path.rfind('.'):] in [".py", ".spec"]:
if os.path.dirname(file_path) == self.proj_dir:
file_list.insert(0, file_path)
else:
file_list.append(file_path)
return file_list
def add_custom_widget(self, py_path):
'''This function is used to add a custom widget given path to its
py file.
'''
f = open(py_path, 'r')
py_string = f.read()
f.close()
# Find path to kv. py file will have Builder.load_file('path/to/kv')
_r = re.findall(r'Builder\.load_file\s*\(\s*.+\s*\)', py_string)
if _r == []:
raise ProjectLoaderException('Cannot find widget\'s kv file.')
py_string = py_string.replace(_r[0], '')
kv_path = _r[0][_r[0].find('(') + 1: _r[0].find(')')]
py_string = py_string.replace(kv_path, '')
kv_path = kv_path.replace("'", '').replace('"', '')
f = open(kv_path, 'r')
kv_string = f.read()
f.close()
# Remove all the 'app' lines
for app_str in re.findall(r'.+app+.+', kv_string):
kv_string = kv_string.replace(
app_str,
app_str[:get_indentation(app_str)] + '#' + app_str.lstrip())
Builder.load_string(kv_string)
sys.path.insert(0, os.path.dirname(kv_path))
_to_check = []
# Get all the class_rules
for class_str in re.findall(r'<+([\w_]+)>', kv_string):
if re.search(r'\bclass\s+%s+.+:' % class_str, py_string):
module = imp.new_module('CustomWidget')
exec_(py_string, module.__dict__)
sys.modules['AppModule'] = module
class_rule = CustomWidgetRule(class_str, kv_path, py_path)
class_rule.file = py_path
class_rule.module = module
self.custom_widgets.append(class_rule)
def get_root_str(self, kv_str=''):
'''This function will get the root widgets rule from either kv_str
or if it is empty string then from the kv file of root widget
'''
if kv_str == '':
f = open(self.root_rule.kv_file, 'r')
kv_str = f.read()
f.close()
# Find the start position of root_rule
start_pos = kv_str.find(self.root_rule.name)
if start_pos == -1:
raise ProjectLoaderException(
'Cannot find root rule in its file')
# Get line for start_pos
_line = 0
_line_pos = 0
_line_pos = kv_str.find('\n', _line_pos + 1)
while _line_pos != -1 and _line_pos < start_pos:
_line_pos = kv_str.find('\n', _line_pos + 1)
_line += 1
# Find the end position of root_rule, where indentation becomes 0
# or file ends
_line += 1
lines = kv_str.splitlines()
_total_lines = len(lines)
while _line < _total_lines and (lines[_line].strip() == '' or
get_indentation(lines[_line]) != 0):
_line_pos = kv_str.find('\n', _line_pos + 1)
_line += 1
end_pos = _line_pos
root_old_str = kv_str[start_pos: end_pos]
for _rule in self.class_rules:
if _rule.name == self.root_rule.name:
root_old_str = "<" + root_old_str
return root_old_str
def get_full_str(self):
'''This function will give the full string of all detected kv files.
'''
text = ''
for _file in self.kv_file_list:
f = open(_file, 'r')
text += f.read() + '\n'
f.close()
return text
def load_new_project(self, kv_path):
'''To load a new project given by kv_path
'''
self.new_project = True
self._load_project(kv_path)
def load_project(self, kv_path):
'''To load a project given by kv_path
'''
ret = self._load_project(kv_path)
self.new_project = False
# Add project_dir to watch
self.proj_watcher.start_watching(self.proj_dir)
return ret
def _load_project(self, kv_path):
'''Pivate function to load any project given by kv_path
'''
if os.path.isdir(kv_path):
self.proj_dir = kv_path
else:
self.proj_dir = os.path.dirname(kv_path)
parent_proj_dir = os.path.dirname(self.proj_dir)
sys.path.insert(0, parent_proj_dir)
self.class_rules = []
all_files_loaded = True
_file = None
for _file in os.listdir(self.proj_dir):
# Load each kv file in the directory
_file = os.path.join(self.proj_dir, _file)
if _file[_file.rfind('.'):] != '.kv':
continue
self.kv_file_list.append(_file)
f = open(_file, 'r')
kv_string = f.read()
f.close()
# Remove all the 'app' lines
for app_str in re.findall(r'.+app+.+', kv_string):
kv_string = kv_string.replace(
app_str,
app_str[:get_indentation(app_str)] +
'#' + app_str.lstrip())
# Get all the class_rules
for class_str in re.findall(r'<+([\w_]+)>', kv_string):
class_rule = ClassRule(class_str)
class_rule.kv_file = _file
self.class_rules.append(class_rule)
try:
root_name = re.findall(r'^([\w\d_]+)\:', kv_string,
re.MULTILINE)
if root_name != []:
# It will occur when there is a root rule and it can't
# be loaded by Builder because the its file
# has been imported
root_name = root_name[0]
if not hasattr(Factory, root_name):
match = re.search(r'^([\w\d_]+)\:', kv_string,
re.MULTILINE)
kv_string = kv_string[:match.start()] + \
'<' + root_name + '>:' + kv_string[match.end():]
self.root_rule = RootRule(root_name, None)
self.root_rule.kv_file = _file
self._root_rule = self.root_rule
self._is_root_already_in_factory = False
else:
self._is_root_already_in_factory = True
else:
self._is_root_already_in_factory = False
re_kv_event = r'(\s+on_\w+\s*:.+)|([\s\w\d]+:[\.\s\w]+\(.*\))'
root_rule = Builder.load_string(re.sub(re_kv_event,
'', kv_string))
if root_rule:
self.root_rule = RootRule(root_rule.__class__.__name__,
root_rule)
self.root_rule.kv_file = _file
self._root_rule = self.root_rule
except Exception as e:
all_files_loaded = False
if not all_files_loaded:
raise ProjectLoaderException('Cannot load file "%s"' % (_file))
if os.path.exists(os.path.join(self.proj_dir, KV_PROJ_FILE_NAME)):
projdir_mtime = os.path.getmtime(self.proj_dir)
f = open(os.path.join(self.proj_dir, KV_PROJ_FILE_NAME), 'r')
proj_str = f.read()
f.close()
_file_is_valid = True
# Checking if the file is valid
if proj_str == '' or\
proj_str.count('<files>') != proj_str.count('</files>') or\
proj_str.count('<file>') != proj_str.count('</file>') or\
proj_str.count('<class>') != proj_str.count('</class>'):
_file_is_valid = False
if _file_is_valid:
projdir_time = proj_str[
proj_str.find('<time>') + len('<time>'):
proj_str.find('</time>')]
projdir_time = float(projdir_time.strip())
if _file_is_valid and projdir_mtime <= projdir_time:
# Project Directory folder hasn't been modified,
# file list will remain same
self.file_list = []
un_modified_files = []
start_pos = proj_str.find('<files>')
end_pos = proj_str.find('</files>')
if start_pos != -1 and end_pos != -1:
start_pos = proj_str.find('<file>', start_pos)
end_pos1 = proj_str.find('</file>', start_pos)
while start_pos < end_pos and start_pos != -1:
_file = proj_str[
start_pos + len('<file>'):end_pos1].strip()
self.file_list.append(_file)
if os.path.getmtime(_file) <= projdir_time:
un_modified_files.append(_file)
start_pos = proj_str.find('<file>', end_pos1)
end_pos1 = proj_str.find('</file>', start_pos)
for _file in self.file_list:
_dir = os.path.dirname(_file)
if _dir not in sys.path:
sys.path.insert(0, _dir)
# Reload information for app
start_pos = proj_str.find('<app>')
end_pos = proj_str.find('</app>')
if start_pos != -1 and end_pos != -1:
self._app_class = proj_str[
proj_str.find('<class>', start_pos) + len('<class>'):
proj_str.find('</class>', start_pos)].strip()
self._app_file = proj_str[
proj_str.find('<file>', start_pos) + len('<file>'):
proj_str.find('</file>', start_pos)].strip()
f = open(self._app_file, 'r')
self._app_module = self._import_module(f.read(),
self._app_file)
f.close()
# Reload information for the files which haven't been modified
start_pos = proj_str.find('<classes>')
end_pos = proj_str.find('</classes>')
if start_pos != -1 and end_pos != -1:
while start_pos < end_pos and start_pos != -1:
start_pos = proj_str.find('<class>', start_pos) +\
len('<class>')
end_pos1 = proj_str.find('</class>', start_pos)
_file = proj_str[
proj_str.find('<file>', start_pos) + len('<file>'):
proj_str.find('</file>', start_pos)].strip()
if _file in un_modified_files:
# If _file is un modified then assign it to
# class rule with _name
_name = proj_str[
proj_str.find('<name>', start_pos) +
len('<name>'):
proj_str.find('</name>', start_pos)].strip()
for _rule in self.class_rules:
if _name == _rule.name:
_rule.file = _file
f = open(_file, 'r')
_rule.module = self._import_module(
f.read(), _file, _fromlist=[_name])
f.close()
start_pos = proj_str.find('<class>', start_pos)
end_pos1 = proj_str.find('</class>', start_pos)
if self.file_list == []:
self.file_list = self._get_file_list(self.proj_dir)
# Get all files corresponding to each class
self._get_class_files()
# If root widget is not created but root class is known
# then create widget
if self.root_rule and not self.root_rule.widget and \
self.root_rule.name:
self.root_rule.widget = self.get_widget_of_class(
self.root_rule.name)
self.load_proj_config()
def load_proj_config(self):
'''To load project's config file. Project's config file is stored in
.designer directory in project's directory.
'''
try:
f = open(os.path.join(self.proj_dir, PROJ_FILE_CONFIG), 'r')
s = f.read()
f.close()
start_pos = -1
end_pos = -1
start_pos = s.find('<file_type_and_dirs>\n')
end_pos = s.find('</file_type_and_dirs>\n')
if start_pos != -1 and end_pos != -1:
for searchiter in re.finditer(r'<file_type=.+', s):
if searchiter.start() < start_pos:
continue
if searchiter.start() > end_pos:
break
found_str = searchiter.group(0)
file_type = found_str[found_str.find('"') + 1:
found_str.find(
'"', found_str.find('"') + 1)]
folder = found_str[
found_str.find('"', found_str.find('dir=') + 1) + 1:
found_str.rfind('"')]
self.dict_file_type_and_path[file_type] = folder
except IOError:
pass
def save_proj_config(self):
'''To save project's config file.
'''
string = '<file_type_and_dirs>\n'
for file_type in self.dict_file_type_and_path.keys():
string += ' <file_type="' + file_type + '"' + ' dir="' + \
self.dict_file_type_and_path[file_type] + '">\n'
string += '</file_type_and_dirs>\n'
f = open(os.path.join(self.proj_dir, PROJ_FILE_CONFIG), 'w')
f.write(string)
f.close()
def add_dir_for_file_type(self, file_type, folder):
'''To add directory for specified file_type. More information in
add_file.py
'''
self.dict_file_type_and_path[file_type] = folder
self.save_proj_config()
def perform_auto_save(self, *args):
'''To perform auto save. Auto Save is done after every 5 min.
'''
if not self.root_rule:
return
auto_save_dir = os.path.join(self.proj_dir, '.designer')
auto_save_dir = os.path.join(auto_save_dir, 'auto_save')
if not os.path.exists(auto_save_dir):
os.makedirs(auto_save_dir)
else:
shutil.rmtree(auto_save_dir)
os.mkdir(auto_save_dir)
for _file in os.listdir(self.proj_dir):
if list(set(ignored_paths) & {_file}):
continue
old_file = os.path.join(self.proj_dir, _file)
new_file = os.path.join(auto_save_dir, _file)
if os.path.isdir(old_file):
shutil.copytree(old_file, new_file)
else:
shutil.copy(old_file, new_file)
root_rule_file = os.path.join(auto_save_dir,
os.path.basename(self.root_rule.kv_file))
f = open(root_rule_file, 'r')
_file_str = f.read()
f.close()
text = self.kv_code_input.text
root_str = self.get_root_str()
f = open(root_rule_file, 'w')
_file_str = _file_str.replace(root_str, text)
f.write(_file_str)
f.close()
# For custom widgets copy py and kv file
for widget in self.custom_widgets:
custom_kv = os.path.join(auto_save_dir,
os.path.basename(widget.kv_file))
if not os.path.exists(custom_kv):
shutil.copy(widget.kv_file, custom_kv)
custom_py = os.path.join(auto_save_dir,
os.path.basename(widget.py_file))
if not os.path.exists(custom_py):
shutil.copy(widget.py_file, custom_py)
def save_project(self, proj_dir=''):
'''To save project to proj_dir. If proj_dir is not empty string then
project is saved to a new directory other than its
current directory and otherwise it is saved to the
current directory.
'''
# To stop ProjectWatcher from emitting event when project is saved
self.proj_watcher.allow_event_dispatch = False
proj_dir_changed = False
if self.new_project:
# Create dir and copy new_proj.kv and new_proj.py to new directory
if not os.path.exists(proj_dir):
os.mkdir(proj_dir)
kivy_designer_dir = get_kivy_designer_dir()
kivy_designer_new_proj_dir = os.path.join(kivy_designer_dir,
"new_proj")
for _file in os.listdir(kivy_designer_new_proj_dir):
old_file = os.path.join(kivy_designer_new_proj_dir, _file)
new_file = os.path.join(proj_dir, _file)
if os.path.isdir(old_file):
shutil.copytree(old_file, new_file)
else:
shutil.copy(old_file, new_file)
self.file_list = self._get_file_list(proj_dir)
new_kv_file = os.path.join(proj_dir, "main.kv")
new_py_file = os.path.join(proj_dir, "main.py")
self.proj_dir = proj_dir
if self.root_rule:
self.root_rule.kv_file = new_kv_file
self.root_rule.py_file = new_py_file
if self.class_rules:
self.class_rules[0].py_file = new_py_file
self.class_rules[0].kv_file = new_kv_file
self.new_project = False
else:
if proj_dir != '' and proj_dir != self.proj_dir:
proj_dir_changed = True
# Remove previous project directories from sys.path
for _dir in self._dir_list:
try:
sys.path.remove(_dir)
except:
pass
# if proj_dir and self.proj_dir differs then user wants to save
# an already opened project to somewhere else
# Copy all the files
if not os.path.exists(proj_dir):
os.mkdir(proj_dir)
for _file in os.listdir(self.proj_dir):
old_file = os.path.join(self.proj_dir, _file)
new_file = os.path.join(proj_dir, _file)
if os.path.isdir(old_file):
shutil.copytree(old_file, new_file)
else:
shutil.copy(old_file, new_file)
self.file_list = self._get_file_list(proj_dir)
# Change the path of all files in the class rules,
# root rule and app
relative_path = self._app_file[
self._app_file.find(self.proj_dir):]
self._app_file = os.path.join(proj_dir, relative_path)
f = open(self._app_file, 'r')
s = f.read()
f.close()
self._import_module(s, self._app_file,
_fromlist=[self._app_class])
for _rule in self.class_rules:
relative_path = _rule.kv_file[
_rule.kv_file.find(self.proj_dir):]
_rule.kv_file = os.path.join(proj_dir, relative_path)
relative_path = _rule.file[_rule.file.find(self.proj_dir):]
_rule.file = os.path.join(proj_dir, relative_path)
f = open(_rule.file, 'r')
s = f.read()
f.close()
self._import_module(s, _rule.file, _fromlist=[_rule.name])
relative_path = self.root_rule.kv_file[
self.root_rule.kv_file.find(self.proj_dir):]
self.root_rule.kv_file = os.path.join(proj_dir, relative_path)
relative_path = self.root_rule.file[
self.root_rule.file.find(self.proj_dir):]
self.root_rule.file = os.path.join(proj_dir, relative_path)
self.proj_dir = proj_dir
# For custom widgets copy py and kv file to project directory
for widget in self.custom_widgets:
custom_kv = os.path.join(self.proj_dir,
os.path.basename(widget.kv_file))
if not os.path.exists(custom_kv):
shutil.copy(widget.kv_file, custom_kv)
custom_py = os.path.join(self.proj_dir,
os.path.basename(widget.py_file))
if not os.path.exists(custom_py):
shutil.copy(widget.py_file, custom_py)
# Saving all opened py files and also reimport them
for _code_input in self.tab_pannel.list_py_code_inputs:
path = os.path.join(self.proj_dir, _code_input.rel_file_path)
f = open(path, 'w')
f.write(_code_input.text)
f.close()
_from_list = []
for rule in self.class_rules:
if rule.file == path:
_from_list.append(rule.file)
if not self.is_root_a_class_rule():
if self.root_rule.file == path:
_from_list.append(self.root_rule.name)
# Ignore all types that are not .py
if path.endswith(".py"):
self._import_module(_code_input.text, path,
_fromlist=_from_list)
# Save all class rules
text = self.kv_code_input.text
for _rule in self.class_rules:
# Get the kv text from KVLangArea and write it to class rule's file
f = open(_rule.kv_file, 'r')
_file_str = f.read()
f.close()
old_str = self.get_class_str_from_text(_rule.name, _file_str)
new_str = self.get_class_str_from_text(_rule.name, text)
f = open(_rule.kv_file, 'w')
_file_str = _file_str.replace(old_str, new_str)
f.write(_file_str)
f.close()
# If root widget is not changed
if self._root_rule.name == self.root_rule.name:
# Save root widget's rule
is_root_class = False
for _rule in self.class_rules:
if _rule.name == self.root_rule.name:
is_root_class = True
break
if not is_root_class:
f = open(self.root_rule.kv_file, 'r')
_file_str = f.read()
f.close()
old_str = self.get_class_str_from_text(self.root_rule.name,
_file_str,
is_class=False)
new_str = self.get_class_str_from_text(self.root_rule.name,
text, is_class=False)
f = open(self.root_rule.kv_file, 'w')
_file_str = _file_str.replace(old_str, new_str)
f.write(_file_str)
f.close()
else:
# If root widget is changed
# Root Widget changes, there can be these cases:
root_name = self.root_rule.name
f = open(self._app_file, 'r')
file_str = f.read()
f.close()
self._root_rule = self.root_rule
if self.is_root_a_class_rule() and self._app_file:
# Root Widget's class rule is a custom class
# and its rule is class rule. So, it already have been saved
# the string of App's build() function will be changed to
# return new root widget's class
if self._app_class != 'runTouchApp':
s = re.search(r'class\s+%s.+:' % self._app_class, file_str)
if s:
build_searchiter = None
for searchiter in re.finditer(
r'[ \ \t]+def\s+build\s*\(\s*self.+\s*:',
file_str):
if searchiter.start() > s.start():
build_searchiter = searchiter
break
if build_searchiter:
indent = get_indentation(build_searchiter.group(0))
file_str = file_str[:build_searchiter.end()] +\
'\n' + get_indent_str(2 * indent) + "return " +\
root_name + "()\n" + \
file_str[build_searchiter.end():]
else:
file_str = file_str[:s.end()] + \
"\n def build(self):\n return " + \
root_name + '()\n' + file_str[s.end():]
else:
file_str = re.sub(r'runTouchApp\s*\(.+\)',
'runTouchApp(' + root_name + '())',
file_str)
f = open(self._app_file, 'w')
f.write(file_str)
f.close()
else:
# Root Widget's rule is not a custom class
# and its rule is root rule
# Its kv_file should be of App's class name
# and App's build() function should be cleared
if not self.root_rule.kv_file:
s = self._app_class.replace('App', '').lower()
root_file = None
for _file in self.kv_file_list:
if os.path.basename(_file).find(s) == 0:
self.root_rule.kv_file = _file
break
f = open(self.root_rule.kv_file, 'r')
_file_str = f.read()
f.close()
new_str = self.get_class_str_from_text(self.root_rule.name,
text, False)
f = open(self.root_rule.kv_file, 'a')
f.write(new_str)
f.close()
if self._app_class != 'runTouchApp':
s = re.search(r'class\s+%s.+:' % self._app_class, file_str)
if s:
build_searchiter = None
for searchiter in re.finditer(
r'[ \ \t]+def\s+build\s*\(\s*self.+\s*:',
file_str):
if searchiter.start() > s.start():
build_searchiter = searchiter
break
if build_searchiter:
lines = file_str.splitlines()
total_lines = len(lines)
indent = get_indentation(build_searchiter.group(0))
_line = 0
_line_pos = -1
_line_pos = file_str.find('\n', _line_pos + 1)
while _line_pos <= build_searchiter.start():
_line_pos = file_str.find('\n', _line_pos + 1)
_line += 1
_line += 1
while _line < total_lines:
if lines[_line].strip() != '' and\
get_indentation(lines[_line]) <= \
indent:
break
_line += 1
_line -= 1
end = get_line_start_pos(file_str, _line)
start = build_searchiter.start()
file_str = file_str.replace(file_str[start:end],
' pass')
f = open(self._app_file, 'w')
f.write(file_str)
f.close()
# Allow Project Watcher to emit events
Clock.schedule_once(self._allow_proj_watcher_dispatch, 1)
def get_class_str_from_text(self, class_name, _file_str, is_class=True):
'''To return the full class rule of class_name from _file_str
'''
_file_str += '\n'
start_pos = -1
# Find the start position of class_name
if is_class:
start_pos = _file_str.find('<' + class_name + '>:')
else:
while True:
start_pos = _file_str.find(class_name, start_pos + 1)
if start_pos == 0 or not (_file_str[start_pos - 1].isalnum() and
_file_str[start_pos - 1] != ''):
break
_line = 0
_line_pos = 0
_line_pos = _file_str.find('\n', _line_pos + 1)
while _line_pos != -1 and _line_pos < start_pos:
_line_pos = _file_str.find('\n', _line_pos + 1)
_line += 1
# Find the end position of class_name, where indentation becomes 0
# or file ends
_line += 1
lines = _file_str.splitlines()
_total_lines = len(lines)
hash_pos = 0
while hash_pos == 0 and _line < _total_lines:
hash_pos = lines[_line].find('#')
if hash_pos == 0:
_line_pos += 1 + len(lines[_line])
_line += 1
while _line < _total_lines and (lines[_line].strip() == '' or
get_indentation(lines[_line]) != 0):
_line_pos = _file_str.find('\n', _line_pos + 1)
_line += 1
hash_pos = 0
while hash_pos == 0 and _line < _total_lines:
hash_pos = lines[_line].find('#')
if hash_pos == 0:
_line += 1
end_pos = _line_pos
old_str = _file_str[start_pos: end_pos]
return old_str
def _allow_proj_watcher_dispatch(self, *args):
'''To start project_watcher to start watching self.proj_dir
'''
self.proj_watcher.allow_event_dispatch = True
# self.proj_watcher.start_watching(self.proj_dir)
def _app_in_string(self, s):
'''To determine if there is an App class or runTouchApp
defined/used in string s.
'''
if 'runTouchApp(' in s:
self._app_class = 'runTouchApp'
return True
elif 'kivy.app' in s:
for _class in re.findall(r'\bclass\b.+:', s):
b_index1 = _class.find('(')
b_index2 = _class.find(')')
classes = _class[b_index1 + 1:b_index2]
classes = re.sub(r'[\s]+', '', classes)
classes = classes.split(',')
if 'App' in classes:
self._app_class = _class[_class.find(' '):b_index1].strip()
return True
return False
def _get_class_files(self):
'''To search through all detected class rules and find
their python files and to search for app.
'''
if self._app_file is None:
# Search for main.py
for _file in self.file_list:
if _file[_file.rfind('/') + 1:] == 'main.py':
f = open(_file, 'r')
s = f.read()
f.close()
if self._app_in_string(s):
self._app_module = self._import_module(s, _file)
self._app_file = _file
# Search for a file with app in its name
if not self._app_class:
for _file in self.file_list:
if 'app' in _file[_file.rfind('/'):]:
f = open(_file, 'r')
s = f.read()
f.close()
if self._app_in_string(s):
self._app_module = self._import_module(s, _file)
self._app_file = _file
to_find = []
for _rule in self.class_rules:
if _rule.file is None:
to_find.append(_rule)
if self.root_rule:
to_find.append(self.root_rule)
# If cannot find due to above methods, search every file
for _file in self.file_list:
if _file[_file.rfind('.') + 1:] == 'py':
f = open(_file, 'r')
s = f.read()
f.close()
if not self._app_file and self._app_in_string(s):
self._app_module = self._import_module(s, _file)
self._app_file = _file
for _rule in to_find[:]:
if _rule.file:
continue
if re.search(r'\bclass\s*%s+.+:' % (_rule.name), s):
mod = self._import_module(s, _file,
_fromlist=[_rule.name])
if hasattr(mod, _rule.name):
_rule.file = _file
to_find.remove(_rule)
_rule.module = mod
# Cannot Find App, So, use default runTouchApp
if not self._app_file:
self._app_class = 'runTouchApp'
# Root Widget may be in Factory not in file
if self.root_rule:
if not self.root_rule.file and\
hasattr(Factory, self.root_rule.name):
to_find.remove(self.root_rule)
# to_find should be empty, if not some class's files are not detected
if to_find != []:
raise ProjectLoaderException(
'Cannot find class files for all classes')
def _import_module(self, s, _file, _fromlist=[]):
module = None
import_from_s = False
_r = re.findall(r'Builder\.load_file\s*\(\s*.+\s*\)', s)
if _r:
s = s.replace(_r[0], '')
import_from_s = True
run_pos = s.rfind('().run()')
i = None
if run_pos != -1:
run_pos -= 1
while not s[run_pos].isspace():
run_pos -= 1
i = run_pos - 1
while s[i] == ' ':
i -= 1
if i is not None and i == run_pos - 1 or _r != []:
if i == run_pos - 1:
s = s.replace('%s().run()' % self._app_class, '')
if 'AppModule' in sys.modules:
del sys.modules['AppModule']
module = imp.new_module('AppModule')
exec_(s, module.__dict__)
sys.modules['AppModule'] = module
return module
module_name = _file[_file.rfind(os.sep) + 1:].replace('.py', '')
if module_name in sys.modules:
del sys.modules[module_name]
module = __import__(module_name, fromlist=_fromlist)
return module
def cleanup(self, stop_watcher=True):
'''To cleanup everything loaded by previous project.
'''
if stop_watcher:
self.proj_watcher.stop()
# Remove all class rules and root rules of previous project
rules = []
try:
rules = Builder.match(self.root_rule.widget)
for _rule in rules:
for _tuple in Builder.rules[:]:
if _tuple[1] == _rule:
Builder.rules.remove(_tuple)
except:
pass
for _tuple in Builder.rules[:]:
for _rule in self.class_rules:
if "<" + _rule.name + ">" == _tuple[1].name:
Builder.rules.remove(_tuple)
if self.root_rule and not self._is_root_already_in_factory and\
hasattr(Factory, self.root_rule.name):
Factory.unregister(self.root_rule.name)
self._app_file = None
self._app_class = None
self._app_module = None
self._app = None
# Remove previous project directories
for _dir in self._dir_list:
try:
sys.path.remove(_dir)
except:
pass
self.kv_file_list = []
self.file_list = []
self._dir_list = []
self.class_rules = []
self.list_comments = []
self.custom_widgets = []
self.dict_file_type_and_path = {}
self.root_rule = None
self._root_rule = None
def get_app(self, reload_app=False):
'''To get the applications app class instance
'''
if not self._app_file or not self._app_class or not self._app_module:
return None
if not reload_app and self._app:
return self._app
for name, obj in inspect.getmembers(self._app_module):
if inspect.isclass(obj) and self._app_class == name:
self._app = obj()
return self._app
# if still couldn't get app, although that shouldn't happen
return None
def reload_from_str(self, root_str):
'''To reload from root_str
'''
rules = []
# Cleaning root rules
try:
rules = Builder.match(self.root_rule.widget)
for _rule in rules:
for _tuple in Builder.rules[:]:
if _tuple[1] == _rule:
Builder.rules.remove(_tuple)
except:
pass
# Cleaning class rules
for _rule in self.class_rules:
for rule in Builder.rules[:]:
if rule[1].name == '<' + _rule.name + '>':
Builder.rules.remove(rule)
break
root_widget = None
# Remove all the 'app' lines
root_str = re.sub(r'.+app+.+', '', root_str)
root_widget = Builder.load_string(root_str)
if not root_widget:
root_widget = self.get_widget_of_class(self.root_rule.name)
self.root_rule.widget = root_widget
if not root_widget:
root_name = root_str[:root_str.find('\n')]
root_name = root_widget.replace(':', '').replace('<', '')
root_name = root_widget.replace('>', '')
root_widget = self.set_root_widget(root_name)
return root_widget
def is_root_a_class_rule(self):
'''Returns True if root rule is a class rule
'''
for _rule in self.class_rules:
if _rule.name == self.root_rule.name:
return True
return False
def set_root_widget(self, root_name, widget=None):
'''To set root_name as the root rule.
'''
root_widget = None
if not widget:
root_widget = self.get_widget_of_class(root_name)
else:
root_widget = widget
self.root_rule = RootRule(root_name, root_widget)
for _rule in self.class_rules:
if _rule.name == root_name:
self.root_rule.kv_file = _rule.kv_file
self.root_rule.py_file = _rule.file
break
if not self._root_rule:
self._root_rule = self.root_rule
return root_widget
def get_root_widget(self, new_root=False):
'''To get the root widget of the current project.
'''
if not new_root and self.root_rule and self.root_rule.name != '':
return self.root_rule.widget
if self._app_file is None:
return None
f = open(self._app_file, 'r')
s = f.read()
f.close()
current_app = App.get_running_app()
app = self.get_app(reload_app=True)
root_widget = None
if app is not None:
root_widget = app.build()
if not root_widget:
root_widget = app.root
App._running_app = current_app
if root_widget:
self.root_rule = RootRule(root_widget.__class__.__name__,
root_widget)
for _rule in self.class_rules:
if _rule.name == self.root_rule.name:
self.root_rule.kv_file = _rule.kv_file
self.root_rule.file = _rule.file
break
if not self._root_rule:
self._root_rule = self.root_rule
if not self.root_rule.kv_file:
raise ProjectLoaderException("Cannot find root widget's kv file")
return root_widget
def get_widget_of_class(self, class_name):
'''To get instance of the class_name
'''
self.root = getattr(Factory, class_name)()
return self.root
def is_widget_custom(self, widget):
for rule in self.class_rules:
if rule.name == type(widget).__name__:
return True
return False
def record(self):
'''To record all the findings in ./designer/kvproj. These will
be loaded again if project hasn't been modified
outside Kivy Designer
'''
if not os.path.exists(os.path.join(
self.proj_dir, os.path.dirname(KV_PROJ_FILE_NAME))):
os.mkdir(os.path.join(self.proj_dir, ".designer"))
f = open(os.path.join(self.proj_dir, KV_PROJ_FILE_NAME), 'w')
f.close()
f = open(os.path.join(self.proj_dir, KV_PROJ_FILE_NAME), 'w')
proj_file_str = '<time>\n' + ' ' + str(time.time()) + '\n</time>\n'
proj_file_str += '<files>\n'
for _file in self.file_list:
proj_file_str += ' <file>\n'
proj_file_str += ' ' + _file
proj_file_str += '\n </file>\n'
proj_file_str += '</files>\n'
proj_file_str += '<classes>\n'
for _rule in self.class_rules:
proj_file_str += ' <class>\n'
proj_file_str += ' <name>\n'
proj_file_str += ' ' + _rule.name
proj_file_str += '\n </name>\n'
proj_file_str += ' <file>\n'
proj_file_str += ' ' + _rule.file
proj_file_str += '\n </file>\n'
proj_file_str += '\n </class>\n'
proj_file_str += '</classes>\n'
if self._app_class and self._app_file:
proj_file_str += '<app>\n'
proj_file_str += ' <class>\n'
proj_file_str += ' ' + self._app_class
proj_file_str += '\n </class>\n'
proj_file_str += ' <file>\n'
proj_file_str += ' ' + self._app_file
proj_file_str += '\n </file>\n'
proj_file_str += '</app>\n'
f.write(proj_file_str)
f.close()
|
mit
|
nkgilley/home-assistant
|
homeassistant/components/spc/alarm_control_panel.py
|
7
|
3441
|
"""Support for Vanderbilt (formerly Siemens) SPC alarm systems."""
import logging
from pyspcwebgw.const import AreaMode
import homeassistant.components.alarm_control_panel as alarm
from homeassistant.components.alarm_control_panel.const import (
SUPPORT_ALARM_ARM_AWAY,
SUPPORT_ALARM_ARM_HOME,
SUPPORT_ALARM_ARM_NIGHT,
)
from homeassistant.const import (
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_DISARMED,
STATE_ALARM_TRIGGERED,
)
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import DATA_API, SIGNAL_UPDATE_ALARM
_LOGGER = logging.getLogger(__name__)
def _get_alarm_state(area):
"""Get the alarm state."""
if area.verified_alarm:
return STATE_ALARM_TRIGGERED
mode_to_state = {
AreaMode.UNSET: STATE_ALARM_DISARMED,
AreaMode.PART_SET_A: STATE_ALARM_ARMED_HOME,
AreaMode.PART_SET_B: STATE_ALARM_ARMED_NIGHT,
AreaMode.FULL_SET: STATE_ALARM_ARMED_AWAY,
}
return mode_to_state.get(area.mode)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the SPC alarm control panel platform."""
if discovery_info is None:
return
api = hass.data[DATA_API]
async_add_entities([SpcAlarm(area=area, api=api) for area in api.areas.values()])
class SpcAlarm(alarm.AlarmControlPanelEntity):
"""Representation of the SPC alarm panel."""
def __init__(self, area, api):
"""Initialize the SPC alarm panel."""
self._area = area
self._api = api
async def async_added_to_hass(self):
"""Call for adding new entities."""
self.async_on_remove(
async_dispatcher_connect(
self.hass,
SIGNAL_UPDATE_ALARM.format(self._area.id),
self._update_callback,
)
)
@callback
def _update_callback(self):
"""Call update method."""
self.async_schedule_update_ha_state(True)
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the device."""
return self._area.name
@property
def changed_by(self):
"""Return the user the last change was triggered by."""
return self._area.last_changed_by
@property
def state(self):
"""Return the state of the device."""
return _get_alarm_state(self._area)
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_ALARM_ARM_HOME | SUPPORT_ALARM_ARM_AWAY | SUPPORT_ALARM_ARM_NIGHT
async def async_alarm_disarm(self, code=None):
"""Send disarm command."""
await self._api.change_mode(area=self._area, new_mode=AreaMode.UNSET)
async def async_alarm_arm_home(self, code=None):
"""Send arm home command."""
await self._api.change_mode(area=self._area, new_mode=AreaMode.PART_SET_A)
async def async_alarm_arm_night(self, code=None):
"""Send arm home command."""
await self._api.change_mode(area=self._area, new_mode=AreaMode.PART_SET_B)
async def async_alarm_arm_away(self, code=None):
"""Send arm away command."""
await self._api.change_mode(area=self._area, new_mode=AreaMode.FULL_SET)
|
apache-2.0
|
OndrejIT/pyload
|
module/plugins/hoster/MediafireCom.py
|
6
|
3247
|
# -*- coding: utf-8 -*-
from ..captcha.ReCaptcha import ReCaptcha
from ..captcha.SolveMedia import SolveMedia
from ..internal.SimpleHoster import SimpleHoster
class MediafireCom(SimpleHoster):
__name__ = "MediafireCom"
__type__ = "hoster"
__version__ = "0.98"
__status__ = "testing"
__pattern__ = r'https?://(?:www\.)?mediafire\.com/(file/|view/\??|download(\.php\?|/)|\?)(?P<ID>\w+)'
__config__ = [("activated", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("fallback", "bool", "Fallback to free download if premium fails", True),
("chk_filesize", "bool", "Check file size", True),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10)]
__description__ = """Mediafire.com hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("zoidberg", "[email protected]"),
("stickell", "[email protected]"),
("Walter Purcaro", "[email protected]"),
("GammaC0de", "nitzo2001[AT]yahoo[DOT]com")]
NAME_PATTERN = r'<META NAME="description" CONTENT="(?P<N>.+?)"/>'
SIZE_PATTERN = r'<div class="fileName">(?P<N>.+?)</div>'
TEMP_OFFLINE_PATTERN = r'^unmatchable$'
OFFLINE_PATTERN = r'class="error_msg_title"'
LINK_FREE_PATTERN = r'aria-label="Download file"\s+href="(.+?)"'
PASSWORD_PATTERN = r'<form name="form_password"'
def setup(self):
self.resume_download = True
self.multiDL = True
def handle_captcha(self):
solvemedia = SolveMedia(self.pyfile)
captcha_key = solvemedia.detect_key()
if captcha_key:
self.captcha = solvemedia
response, challenge = solvemedia.challenge(captcha_key)
self.data = self.load("http://www.mediafire.com/?" + self.info['pattern']['ID'],
post={'adcopy_challenge': challenge,
'adcopy_response': response})
return
recaptcha = ReCaptcha(self.pyfile)
captcha_key = recaptcha.detect_key()
if captcha_key:
url, inputs = self.parse_html_form('name="form_captcha"')
self.log_debug(("form_captcha url:%s inputs:%s") % (url, inputs))
if url:
self.captcha = recaptcha
response, challenge = recaptcha.challenge(captcha_key)
inputs['g-recaptcha-response'] = response
self.data = self.load(self.fixurl(url), post=inputs)
else:
self.fail("ReCaptcha form not found")
def handle_free(self, pyfile):
self.handle_captcha()
if self.PASSWORD_PATTERN in self.data:
password = self.get_password()
if not password:
self.fail(_("No password found"))
else:
self.log_info(_("Password protected link, trying: %s") % password)
self.data = self.load(self.link, post={'downloadp': password})
if self.PASSWORD_PATTERN in self.data:
self.fail(_("Wrong password"))
return SimpleHoster.handle_free(self, pyfile)
|
gpl-3.0
|
AGnarlyNarwhal/cse360_webapp
|
cse360_webapp/urls.py
|
1
|
1946
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.conf import settings
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
#from django.conf.urls.static import static
from app1 import views
#This is the project/url.py
urlpatterns = patterns('',
# Examples:
url(r'^home/', 'app1.views.events'),
url(r'^app1/', include('app1.urls')),
url(r'^accounts/', include('userprofile.urls')),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
#Gives out our media root to serve pictures
url(r'^media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT }),
#Gives out the static root to serve static files (images,css,js, etc.)
#url(r'^static/(?P<path>.*)$', 'django.views.static.serve',
#{'document_root': settings.STATIC_ROOT }),
#User login/authentication/registration urls
#url(r'^accounts/user_image_upload/$', 'cse360_webapp.views.user_image_upload'
url(r'^accounts/tickets/$', 'app1.views.tickets'),
url(r'^accounts/login/$', 'cse360_webapp.views.login'),
url(r'^accounts/auth/$', 'cse360_webapp.views.auth_view'),
url(r'^accounts/logout/$', 'cse360_webapp.views.logout'),
url(r'^accounts/loggedin/$', 'cse360_webapp.views.loggedin'),
url(r'^accounts/invalid/$', 'cse360_webapp.views.invalid_login'),
url(r'^accounts/register/$', 'cse360_webapp.views.register_user'),
url(r'^accounts/register_success/$',
'cse360_webapp.views.register_success'),
)
"""
if settings.DEBUG:
# static files (images, css, javascript, etc.)
urlpatterns += patterns('',
(r'^media/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.MEDIA_ROOT}))
"""
if not settings.DEBUG:
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
urlpatterns += staticfiles_urlpatterns()
|
gpl-2.0
|
linebp/pandas
|
bench/bench_groupby.py
|
1
|
1293
|
from pandas import *
from pandas.util.testing import rands
from pandas.compat import range
import string
import random
k = 20000
n = 10
foo = np.tile(np.array([rands(10) for _ in range(k)], dtype='O'), n)
foo2 = list(foo)
random.shuffle(foo)
random.shuffle(foo2)
df = DataFrame({'A': foo,
'B': foo2,
'C': np.random.randn(n * k)})
import pandas._sandbox as sbx
def f():
table = sbx.StringHashTable(len(df))
ret = table.factorize(df['A'])
return ret
def g():
table = sbx.PyObjectHashTable(len(df))
ret = table.factorize(df['A'])
return ret
ret = f()
"""
import pandas._tseries as lib
f = np.std
grouped = df.groupby(['A', 'B'])
label_list = [ping.labels for ping in grouped.groupings]
shape = [len(ping.ids) for ping in grouped.groupings]
from pandas.core.groupby import get_group_index
group_index = get_group_index(label_list, shape,
sort=True, xnull=True).astype('i4')
ngroups = np.prod(shape)
indexer = lib.groupsort_indexer(group_index, ngroups)
values = df['C'].values.take(indexer)
group_index = group_index.take(indexer)
f = lambda x: x.std(ddof=1)
grouper = lib.Grouper(df['C'], np.ndarray.std, group_index, ngroups)
result = grouper.get_result()
expected = grouped.std()
"""
|
bsd-3-clause
|
dnxbjyj/python-basic
|
gui/wxpython/wxPython-demo-4.0.1/demo/Button.py
|
1
|
2502
|
#!/usr/bin/env python
import wx
import images
#----------------------------------------------------------------------
class TestPanel(wx.Panel):
def __init__(self, parent, log):
wx.Panel.__init__(self, parent, -1,
style=wx.NO_FULL_REPAINT_ON_RESIZE)
self.log = log
b = wx.Button(self, 10, "Default Button", (20, 20))
self.Bind(wx.EVT_BUTTON, self.OnClick, b)
b.SetDefault()
b.SetSize(b.GetBestSize())
b = wx.Button(self, 20, "HELLO AGAIN!", (20, 80))
self.Bind(wx.EVT_BUTTON, self.OnClick, b)
b.SetToolTip("This is a Hello button...")
b = wx.Button(self, 40, "Flat Button?", (20,160), style=wx.NO_BORDER)
b.SetToolTip("This button has a style flag of wx.NO_BORDER.\n"
"On some platforms that will give it a flattened look.")
self.Bind(wx.EVT_BUTTON, self.OnClick, b)
b = wx.Button(self, 50, "wx.Button with icon", (20, 220))
b.SetToolTip("wx.Button can how have an icon on the left, right,\n"
"above or below the label.")
self.Bind(wx.EVT_BUTTON, self.OnClick, b)
b.SetBitmap(images.Mondrian.Bitmap,
wx.LEFT # Left is the default, the image can be on the other sides too
#wx.RIGHT
#wx.TOP
#wx.BOTTOM
)
b.SetBitmapMargins((2,2)) # default is 4 but that seems too big to me.
# Setting the bitmap and margins changes the best size, so
# reset the initial size since we're not using a sizer in this
# example which would have taken care of this for us.
b.SetInitialSize()
#b = wx.Button(self, 60, "Multi-line\nbutton", (20, 280))
#b = wx.Button(self, 70, pos=(160, 280))
#b.SetLabel("Another\nmulti-line")
def OnClick(self, event):
self.log.write("Click! (%d)\n" % event.GetId())
#----------------------------------------------------------------------
def runTest(frame, nb, log):
win = TestPanel(nb, log)
return win
#----------------------------------------------------------------------
overview = """<html><body>
<h2>Button</h2>
A button is a control that contains a text string or a bitmap and can be
placed on nearly any kind of window.
</body></html>
"""
if __name__ == '__main__':
import sys,os
import run
run.main(['', os.path.basename(sys.argv[0])] + sys.argv[1:])
|
mit
|
ivanprjcts/equinox-spring16-API
|
makesdks/outputs/sdk.py
|
1
|
6152
|
from sdklib import SdkBase
class DefaultApi(SdkBase):
API_HOST = "api.spring16.equinox.local"
API_USERS__PK__URL = "/users/%s/"
API_USERS_URL = "/users/"
API_APPLICATIONS__PK__URL = "/applications/%s/"
API_APPLICATIONS_URL = "/applications/"
API_INSTANCES__PK__URL = "/instances/%s/"
API_OPERATIONS_URL = "/operations/"
API_OPERATIONS__PK__URL = "/operations/%s/"
API_INSTANCES_URL = "/instances/"
def put_users_pk(self, pk, username, email=None):
form_params = {"username": username}
if email is not None:
form_params["email"] = email
return self._http("PUT", self.API_USERS__PK__URL % (pk), form_params=form_params)
def delete_users_pk(self, pk):
return self._http("DELETE", self.API_USERS__PK__URL % (pk))
def get_users_pk(self, pk):
return self._http("GET", self.API_USERS__PK__URL % (pk))
def patch_users_pk(self, pk, username=None, email=None):
form_params = dict()
if username is not None:
form_params["username"] = username
if email is not None:
form_params["email"] = email
return self._http("PATCH", self.API_USERS__PK__URL % (pk), form_params=form_params)
def post_users(self, username, email=None):
form_params = {"username": username}
if email is not None:
form_params["email"] = email
return self._http("POST", self.API_USERS_URL, form_params=form_params)
def get_users(self, page=None):
query_params = dict()
if page is not None:
query_params["page"] = page
return self._http("GET", self.API_USERS_URL, query_params=query_params)
def put_applications_pk(self, pk, name, open=None, description=None):
form_params = {"name": name}
if open is not None:
form_params["open"] = open
if description is not None:
form_params["description"] = description
return self._http("PUT", self.API_APPLICATIONS__PK__URL % (pk), form_params=form_params)
def delete_applications_pk(self, pk):
return self._http("DELETE", self.API_APPLICATIONS__PK__URL % (pk))
def get_applications_pk(self, pk):
return self._http("GET", self.API_APPLICATIONS__PK__URL % (pk))
def patch_applications_pk(self, pk, name=None, open=None, description=None):
form_params = dict()
if name is not None:
form_params["name"] = name
if open is not None:
form_params["open"] = open
if description is not None:
form_params["description"] = description
return self._http("PATCH", self.API_APPLICATIONS__PK__URL % (pk), form_params=form_params)
def post_applications(self, name, open=None, description=None):
form_params = {"name": name}
if open is not None:
form_params["open"] = open
if description is not None:
form_params["description"] = description
return self._http("POST", self.API_APPLICATIONS_URL, form_params=form_params)
def get_applications(self, page=None):
query_params = dict()
if page is not None:
query_params["page"] = page
return self._http("GET", self.API_APPLICATIONS_URL, query_params=query_params)
def put_instances_pk(self, pk, name, user, open=None):
form_params = {"name": name, "user": user}
if open is not None:
form_params["open"] = open
return self._http("PUT", self.API_INSTANCES__PK__URL % (pk), form_params=form_params)
def delete_instances_pk(self, pk):
return self._http("DELETE", self.API_INSTANCES__PK__URL % (pk))
def get_instances_pk(self, pk):
return self._http("GET", self.API_INSTANCES__PK__URL % (pk))
def patch_instances_pk(self, pk, name=None, open=None, user=None):
form_params = dict()
if name is not None:
form_params["name"] = name
if open is not None:
form_params["open"] = open
if user is not None:
form_params["user"] = user
return self._http("PATCH", self.API_INSTANCES__PK__URL % (pk), form_params=form_params)
def post_operations(self, name, application, open=None):
form_params = {"name": name, "application": application}
if open is not None:
form_params["open"] = open
return self._http("POST", self.API_OPERATIONS_URL, form_params=form_params)
def get_operations(self, page=None):
query_params = dict()
if page is not None:
query_params["page"] = page
return self._http("GET", self.API_OPERATIONS_URL, query_params=query_params)
def put_operations_pk(self, pk, name, application, open=None):
form_params = {"name": name, "application": application}
if open is not None:
form_params["open"] = open
return self._http("PUT", self.API_OPERATIONS__PK__URL % (pk), form_params=form_params)
def delete_operations_pk(self, pk):
return self._http("DELETE", self.API_OPERATIONS__PK__URL % (pk))
def get_operations_pk(self, pk):
return self._http("GET", self.API_OPERATIONS__PK__URL % (pk))
def patch_operations_pk(self, pk, name=None, open=None, application=None):
form_params = dict()
if name is not None:
form_params["name"] = name
if open is not None:
form_params["open"] = open
if application is not None:
form_params["application"] = application
return self._http("PATCH", self.API_OPERATIONS__PK__URL % (pk), form_params=form_params)
def post_instances(self, name, user, open=None):
form_params = {"name": name, "user": user}
if open is not None:
form_params["open"] = open
return self._http("POST", self.API_INSTANCES_URL, form_params=form_params)
def get_instances(self, page=None):
query_params = dict()
if page is not None:
query_params["page"] = page
return self._http("GET", self.API_INSTANCES_URL, query_params=query_params)
|
lgpl-3.0
|
tensorflow/tensorflow
|
tensorflow/python/pywrap_mlir.py
|
6
|
3647
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python module for MLIR functions exported by pybind11."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=invalid-import-order, g-bad-import-order, wildcard-import, unused-import, undefined-variable
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import context
from tensorflow.python._pywrap_mlir import *
def import_graphdef(graphdef,
pass_pipeline,
show_debug_info,
input_names=None,
input_data_types=None,
input_data_shapes=None,
output_names=[]):
if input_names is not None:
return ImportGraphDef(
str(graphdef).encode('utf-8'), pass_pipeline.encode('utf-8'),
show_debug_info, ','.join(input_names).encode('utf-8'),
','.join(input_data_types).encode('utf-8'),
':'.join(input_data_shapes).encode('utf-8'),
','.join(output_names).encode('utf-8'))
return ImportGraphDef(
str(graphdef).encode('utf-8'), pass_pipeline.encode('utf-8'),
show_debug_info)
def import_function(concrete_function, pass_pipeline, show_debug_info):
ctxt = context.context()
ctxt.ensure_initialized()
return ImportFunction(ctxt._handle,
str(concrete_function.function_def).encode('utf-8'),
pass_pipeline.encode('utf-8'), show_debug_info)
def experimental_convert_saved_model_to_mlir(saved_model_path, exported_names,
show_debug_info):
return ExperimentalConvertSavedModelToMlir(
str(saved_model_path).encode('utf-8'),
str(exported_names).encode('utf-8'), show_debug_info)
def experimental_convert_saved_model_v1_to_mlir_lite(saved_model_path,
exported_names, tags,
upgrade_legacy,
show_debug_info):
return ExperimentalConvertSavedModelV1ToMlirLite(
str(saved_model_path).encode('utf-8'),
str(exported_names).encode('utf-8'),
str(tags).encode('utf-8'), upgrade_legacy, show_debug_info)
def experimental_convert_saved_model_v1_to_mlir(saved_model_path,
exported_names, tags,
lift_variables, upgrade_legacy,
show_debug_info):
return ExperimentalConvertSavedModelV1ToMlir(
str(saved_model_path).encode('utf-8'),
str(exported_names).encode('utf-8'),
str(tags).encode('utf-8'), lift_variables, upgrade_legacy,
show_debug_info)
def experimental_run_pass_pipeline(mlir_txt, pass_pipeline, show_debug_info):
return ExperimentalRunPassPipeline(
mlir_txt.encode('utf-8'), pass_pipeline.encode('utf-8'), show_debug_info)
|
apache-2.0
|
sgerhart/ansible
|
lib/ansible/modules/network/illumos/ipadm_addr.py
|
61
|
11664
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Adam Števko <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ipadm_addr
short_description: Manage IP addresses on an interface on Solaris/illumos systems
description:
- Create/delete static/dynamic IP addresses on network interfaces on Solaris/illumos systems.
- Up/down static/dynamic IP addresses on network interfaces on Solaris/illumos systems.
- Manage IPv6 link-local addresses on network interfaces on Solaris/illumos systems.
version_added: "2.3"
author: Adam Števko (@xen0l)
options:
address:
description:
- Specifiies an IP address to configure in CIDR notation.
required: false
aliases: [ "addr" ]
addrtype:
description:
- Specifiies a type of IP address to configure.
required: false
default: static
choices: [ 'static', 'dhcp', 'addrconf' ]
addrobj:
description:
- Specifies an unique IP address on the system.
required: true
temporary:
description:
- Specifies that the configured IP address is temporary. Temporary
IP addresses do not persist across reboots.
required: false
default: false
wait:
description:
- Specifies the time in seconds we wait for obtaining address via DHCP.
required: false
default: 60
state:
description:
- Create/delete/enable/disable an IP address on the network interface.
required: false
default: present
choices: [ 'absent', 'present', 'up', 'down', 'enabled', 'disabled', 'refreshed' ]
'''
EXAMPLES = '''
- name: Configure IP address 10.0.0.1 on e1000g0
ipadm_addr: addr=10.0.0.1/32 addrobj=e1000g0/v4 state=present
- name: Delete addrobj
ipadm_addr: addrobj=e1000g0/v4 state=absent
- name: Configure link-local IPv6 address
ipadm_addr: addtype=addrconf addrobj=vnic0/v6
- name: Configure address via DHCP and wait 180 seconds for address obtaining
ipadm_addr: addrobj=vnic0/dhcp addrtype=dhcp wait=180
'''
RETURN = '''
addrobj:
description: address object name
returned: always
type: string
sample: bge0/v4
state:
description: state of the target
returned: always
type: string
sample: present
temporary:
description: specifies if operation will persist across reboots
returned: always
type: boolean
sample: True
addrtype:
description: address type
returned: always
type: string
sample: static
address:
description: IP address
returned: only if addrtype is 'static'
type: string
sample: 1.3.3.7/32
wait:
description: time we wait for DHCP
returned: only if addrtype is 'dhcp'
type: string
sample: 10
'''
import socket
from ansible.module_utils.basic import AnsibleModule
SUPPORTED_TYPES = ['static', 'addrconf', 'dhcp']
class Addr(object):
def __init__(self, module):
self.module = module
self.address = module.params['address']
self.addrtype = module.params['addrtype']
self.addrobj = module.params['addrobj']
self.temporary = module.params['temporary']
self.state = module.params['state']
self.wait = module.params['wait']
def is_cidr_notation(self):
return self.address.count('/') == 1
def is_valid_address(self):
ip_address = self.address.split('/')[0]
try:
if len(ip_address.split('.')) == 4:
socket.inet_pton(socket.AF_INET, ip_address)
else:
socket.inet_pton(socket.AF_INET6, ip_address)
except socket.error:
return False
return True
def is_dhcp(self):
cmd = [self.module.get_bin_path('ipadm')]
cmd.append('show-addr')
cmd.append('-p')
cmd.append('-o')
cmd.append('type')
cmd.append(self.addrobj)
(rc, out, err) = self.module.run_command(cmd)
if rc == 0:
if out.rstrip() != 'dhcp':
return False
return True
else:
self.module.fail_json(msg='Wrong addrtype %s for addrobj "%s": %s' % (out, self.addrobj, err),
rc=rc,
stderr=err)
def addrobj_exists(self):
cmd = [self.module.get_bin_path('ipadm')]
cmd.append('show-addr')
cmd.append(self.addrobj)
(rc, _, _) = self.module.run_command(cmd)
if rc == 0:
return True
else:
return False
def delete_addr(self):
cmd = [self.module.get_bin_path('ipadm')]
cmd.append('delete-addr')
cmd.append(self.addrobj)
return self.module.run_command(cmd)
def create_addr(self):
cmd = [self.module.get_bin_path('ipadm')]
cmd.append('create-addr')
cmd.append('-T')
cmd.append(self.addrtype)
if self.temporary:
cmd.append('-t')
if self.addrtype == 'static':
cmd.append('-a')
cmd.append(self.address)
if self.addrtype == 'dhcp' and self.wait:
cmd.append('-w')
cmd.append(self.wait)
cmd.append(self.addrobj)
return self.module.run_command(cmd)
def up_addr(self):
cmd = [self.module.get_bin_path('ipadm')]
cmd.append('up-addr')
if self.temporary:
cmd.append('-t')
cmd.append(self.addrobj)
return self.module.run_command(cmd)
def down_addr(self):
cmd = [self.module.get_bin_path('ipadm')]
cmd.append('down-addr')
if self.temporary:
cmd.append('-t')
cmd.append(self.addrobj)
return self.module.run_command(cmd)
def enable_addr(self):
cmd = [self.module.get_bin_path('ipadm')]
cmd.append('enable-addr')
cmd.append('-t')
cmd.append(self.addrobj)
return self.module.run_command(cmd)
def disable_addr(self):
cmd = [self.module.get_bin_path('ipadm')]
cmd.append('disable-addr')
cmd.append('-t')
cmd.append(self.addrobj)
return self.module.run_command(cmd)
def refresh_addr(self):
cmd = [self.module.get_bin_path('ipadm')]
cmd.append('refresh-addr')
cmd.append(self.addrobj)
return self.module.run_command(cmd)
def main():
module = AnsibleModule(
argument_spec=dict(
address=dict(aliases=['addr']),
addrtype=dict(default='static', choices=SUPPORTED_TYPES),
addrobj=dict(required=True),
temporary=dict(default=False, type='bool'),
state=dict(
default='present', choices=['absent', 'present', 'up', 'down', 'enabled', 'disabled', 'refreshed']),
wait=dict(default=60),
),
mutually_exclusive=[
('address', 'wait'),
],
supports_check_mode=True
)
addr = Addr(module)
rc = None
out = ''
err = ''
result = {}
result['addrobj'] = addr.addrobj
result['state'] = addr.state
result['temporary'] = addr.temporary
result['addrtype'] = addr.addrtype
if addr.addrtype == 'static' and addr.address:
if addr.is_cidr_notation() and addr.is_valid_address():
result['address'] = addr.address
else:
module.fail_json(msg='Invalid IP address: %s' % addr.address)
if addr.addrtype == 'dhcp' and addr.wait:
result['wait'] = addr.wait
if addr.state == 'absent':
if addr.addrobj_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = addr.delete_addr()
if rc != 0:
module.fail_json(msg='Error while deleting addrobj: "%s"' % err,
addrobj=addr.addrobj,
stderr=err,
rc=rc)
elif addr.state == 'present':
if not addr.addrobj_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = addr.create_addr()
if rc != 0:
module.fail_json(msg='Error while configuring IP address: "%s"' % err,
addrobj=addr.addrobj,
addr=addr.address,
stderr=err,
rc=rc)
elif addr.state == 'up':
if addr.addrobj_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = addr.up_addr()
if rc != 0:
module.fail_json(msg='Error while bringing IP address up: "%s"' % err,
addrobj=addr.addrobj,
stderr=err,
rc=rc)
elif addr.state == 'down':
if addr.addrobj_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = addr.down_addr()
if rc != 0:
module.fail_json(msg='Error while bringing IP address down: "%s"' % err,
addrobj=addr.addrobj,
stderr=err,
rc=rc)
elif addr.state == 'refreshed':
if addr.addrobj_exists():
if addr.is_dhcp():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = addr.refresh_addr()
if rc != 0:
module.fail_json(msg='Error while refreshing IP address: "%s"' % err,
addrobj=addr.addrobj,
stderr=err,
rc=rc)
else:
module.fail_json(msg='state "refreshed" cannot be used with "%s" addrtype' % addr.addrtype,
addrobj=addr.addrobj,
stderr=err,
rc=1)
elif addr.state == 'enabled':
if addr.addrobj_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = addr.enable_addr()
if rc != 0:
module.fail_json(msg='Error while enabling IP address: "%s"' % err,
addrobj=addr.addrobj,
stderr=err,
rc=rc)
elif addr.state == 'disabled':
if addr.addrobj_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = addr.disable_addr()
if rc != 0:
module.fail_json(msg='Error while disabling IP address: "%s"' % err,
addrobj=addr.addrobj,
stderr=err,
rc=rc)
if rc is None:
result['changed'] = False
else:
result['changed'] = True
if out:
result['stdout'] = out
if err:
result['stderr'] = err
module.exit_json(**result)
if __name__ == '__main__':
main()
|
mit
|
jesramirez/odoo
|
addons/website_certification/__init__.py
|
385
|
1030
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import certification
import controllers
|
agpl-3.0
|
plin1112/pysimm
|
tests/test_lmps_examples.py
|
2
|
1833
|
import unittest
import os
from os import path as osp
from testing import AbstractExamplesTestCase
from testing import example_tests_sort
class LammpsExamplesTestCase(AbstractExamplesTestCase):
def test_lammps_binary(self):
binary = os.environ.get('LAMMPS_EXEC')
self.assertIsNotNone(binary)
def test_example1(self):
for p in self.path_generator(osp.join('01_methane', '*')):
self.assertEqual(self.run_example(p), True)
def test_example2(self):
for p in self.path_generator(osp.join('02_methanol', '*')):
self.assertEqual(self.run_example(p), True)
def test_example3(self):
for p in self.path_generator(osp.join('03_benzene', '*')):
self.assertEqual(self.run_example(p), True)
def test_example4(self):
for p in self.path_generator(osp.join('04_polyethylene', '*')):
self.assertEqual(self.run_example(p), True)
def test_example5(self):
for p in self.path_generator(osp.join('05_polyethylene-co-styrene', '*')):
self.assertEqual(self.run_example(p), True)
def test_example6(self):
for p in self.path_generator(osp.join('06_polymethyl_methacryalte_multi', '*')):
self.assertEqual(self.run_example(p), True)
def test_example7(self):
for p in self.path_generator(osp.join('07_lammps_simulation', '')):
self.assertEqual(self.run_example(p), True)
def test_example8(self):
for p in self.path_generator(osp.join('08_ethanol_acetone_mixture', '')):
self.assertEqual(self.run_example(p), True)
if __name__ == '__main__':
my_tl = unittest.TestLoader()
my_tl.sortTestMethodsUsing = example_tests_sort
unittest.TextTestRunner(buffer=True, verbosity=2).run(my_tl.loadTestsFromTestCase(LammpsExamplesTestCase))
|
mit
|
defaultnamehere/grr
|
lib/aff4_objects/timeline_test.py
|
6
|
2810
|
#!/usr/bin/env python
# Copyright 2011 Google Inc. All Rights Reserved.
"""AFF4 Timeline object tests."""
import random
import time
from grr.lib import aff4
from grr.lib import rdfvalue
from grr.lib import test_lib
class TimelineTest(test_lib.AFF4ObjectTest):
"""Test the timeline implementation."""
def testTimeSeries(self):
"""Check that timeseries sort events by timestamps."""
path = "/C.1/time series 1"
fd = aff4.FACTORY.Create(path, "GRRTimeSeries", token=self.token)
# Make up some random events in random time order.
now = int(time.time() * 1000000)
times = [random.randint(0, 1000) * 1000000 + now for _ in range(100)]
for t in times:
event = rdfvalue.Event(timestamp=t)
event.stat.st_mtime = t / 1000000
event.stat.pathspec.path = time.ctime(t/1000000)
fd.AddEvent(event)
fd.Close()
# Now read back the events and make sure they are in time order.
times.sort()
fd = aff4.FACTORY.Open(path, token=self.token)
count = 0
for t, event in zip(times, fd):
self.assertEqual(event.timestamp, t)
count += 1
self.assert_(fd.Get(fd.Schema.SIZE) > 0)
self.assertEqual(count, len(times))
def testTimeSeriesQuery(self):
"""Check that we can filter by query string."""
path = "/C.1/time series 2"
fd = aff4.FACTORY.Create(path, "GRRTimeSeries", token=self.token)
times = [1321533293629468, 1321633293629468, 1321733293629468]
for t in times:
event = rdfvalue.Event(timestamp=t)
event.stat.st_mtime = t / 1000000
event.stat.pathspec.path = time.strftime("Path @ %a %b %d %T %Y",
time.gmtime(t/1000000))
fd.AddEvent(event)
fd.Close()
fd = aff4.FACTORY.Open(path, token=self.token)
# Check that we can filter the events
results = list(fd.Query("timestamp > 2000"))
self.assertEqual(len(results), 3)
# Match by timestamp
results = list(fd.Query(
"timestamp >= 2011/11/18 and timestamp < 2011/11/19"))
self.assertEqual(len(results), 1)
self.assertEqual(results[0].event.timestamp, 1321633293629468)
# Test if <= works as expected.
results = list(fd.Query(
"timestamp >= 2011/11/18 and timestamp <= 2011/11/19"))
self.assertEqual(len(results), 2)
# Match within the embedded stat protobuf
results = list(fd.Query(
"event.stat.st_mtime >= 2011/11/18 and event.stat.st_mtime < 2011/11/19"
))
self.assertEqual(len(results), 1)
self.assertEqual(results[0].event.timestamp, 1321633293629468)
# Match a string deeply nested in protobufs
results = list(fd.Query("event.stat.pathspec.path contains Fri"))
self.assertEqual(len(results), 1)
self.assertEqual(results[0].event.timestamp, 1321633293629468)
|
apache-2.0
|
gaberger/pybvc
|
samples/sampleopenflow/apps/oftool/oftool.py
|
2
|
63172
|
#!/usr/bin/env python
# Copyright (c) 2015, BROCADE COMMUNICATIONS SYSTEMS, INC
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
"""
@authors: Sergei Garbuzov
@status: Development
@version: 1.1.0
oftool.py: OpenFlow tool command line utility (sample application)
"""
import json
import yaml
import argparse
from collections import OrderedDict
from pybvc.controller.controller import Controller
from pybvc.common.status import STATUS
from pybvc.common.utils import dbg_print
from pybvc.openflowdev.ofswitch import OFSwitch, \
FlowEntry, \
GroupEntry
from pybvc.controller.topology import Topology, Node
from pybvc.controller.inventory import Inventory, \
OpenFlowCapableNode, \
OpenFlowPort, \
GroupFeatures, \
GroupDescription, \
GroupStatistics
#-------------------------------------------------------------------------------
# Class 'CtrlCfg'
#-------------------------------------------------------------------------------
class CtrlCfg():
""" Attributes necessary for communication with Controller """
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def __init__(self, addr, port, name, pswd):
self.ip_addr = addr
self.tcp_port = port
self.admin_name = name
self.admin_pswd = pswd
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def to_string(self):
return "%s:%s" % (self.ip_addr, self.tcp_port)
#-------------------------------------------------------------------------------
# Class 'ConcatJSONObjects'
#-------------------------------------------------------------------------------
class ConcatJSONObjects(json.JSONDecoder):
""" Custom JSON decoder subclass used for retrieving multiple JSON objects
from a text file. Capable to parse JSON files and strings annotated with
single and multi-line comments
"""
MULTILINE_COMMENT_START = "/*"
MULTILINE_COMMENT_END = "*/"
SINGLELINE_COMMENT = "//"
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def _strip_comments(self, s):
json_string = ""
is_multiline_comment = False
lines = s.split('\n')
for l in lines:
l = l.strip()
if l.startswith(self.MULTILINE_COMMENT_START):
is_multiline_comment= True
if is_multiline_comment:
if l.endswith(self.MULTILINE_COMMENT_END):
is_multiline_comment = False
continue
if l.startswith(self.SINGLELINE_COMMENT):
continue
# check for the internal single-line comment
# (current algorithm assumes that if single-line comment
# substring is found at the highest index in the JSON string
# and number of double-quotation marks before its position is
# even than it is really a comment, not a substring located
# within JSON string value)
idx = l.rfind(self.SINGLELINE_COMMENT)
if idx >= 0:
cnt = l.count('"', 0, idx)
if cnt%2 == 0: # single-line comment substring is outside
l = l[:idx] # of the JSON body, ignore comment part
# remove whitespaces preceding stripped comments (if any)
l = l.rstrip()
json_string += l
return json_string
#---------------------------------------------------------------------------
# Overriding 'decode' method defined in JSONDecoder parent class
#---------------------------------------------------------------------------
def decode(self, s):
objs = []
json_string = self._strip_comments(s)
try:
idx = 0
js_len = len(json_string)
while idx < js_len:
obj, idx = self.raw_decode(json_string, idx)
objs.append(obj)
except(Exception) as e:
print "\n!!! JSON decode failed\n"
print "Decode string:\n%s\n" % json_string
print "Failure Reason: %s\n" % e
return objs
#-------------------------------------------------------------------------------
# Class 'TopologyInfo'
#-------------------------------------------------------------------------------
class TopologyInfo():
""" Methods to retrieve and display network topology information """
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def __init__(self, ctrl):
assert(isinstance(ctrl, Controller))
self.ctrl = ctrl
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def show_common(self, verbose=False):
ctrl = self.ctrl
result = ctrl.build_inventory_object()
status = result.get_status()
if(status.eq(STATUS.OK) == True):
inventory = result.get_data()
assert(isinstance(inventory, Inventory))
elif(status.eq(STATUS.DATA_NOT_FOUND)):
print "\n".strip()
print " Requested data not found"
print "\n".strip()
exit(0)
else:
print ("\n")
print ("!!!Error, failed to obtain inventory info, reason: %s"
% status.brief().lower())
exit(1)
result = ctrl.get_topology_ids()
status = result.get_status()
if(status.eq(STATUS.OK) == True):
topology_ids = result.get_data()
assert(isinstance(topology_ids, list))
elif(status.eq(STATUS.DATA_NOT_FOUND)):
print "\n".strip()
print " Requested data not found"
print "\n".strip()
exit(0)
else:
print ("\n")
print ("!!!Error, failed to obtain topology info, reason: %s"
% status.brief().lower())
exit(1)
topologies = []
for topo_id in topology_ids:
result = ctrl.build_topology_object(topo_id)
status = result.get_status()
if(status.eq(STATUS.OK) == True):
topo = result.get_data()
topologies.append(topo)
assert(isinstance(topo, Topology))
else:
print ("\n")
print ("!!!Error, failed to parse '%s' topology info, reason: %s"
% (topo_id, status.brief().lower()))
exit(1)
for topo in topologies:
print "\n".strip()
print (" Network topology '%s'") % topo.get_id()
print "\n".strip()
flows_cnt = 0
sids = topo.get_switch_ids()
for sid in sids:
flows_cnt += inventory.get_openflow_node_flows_cnt(sid)
print (" Number of switches : %s" % topo.get_switches_cnt())
print (" Number of inter-switch links : %s" % topo.get_inter_switch_links_cnt())
print (" Number of hosts : %s" % topo.get_hosts_cnt())
print (" Number of flows : %s" % flows_cnt)
if (verbose):
print "\n".strip()
print (" Switches in topology")
s1 = 'IP Address'
s2 = 'OpenFlow Id'
s3 = 'Flows Cnt'
sym = '-'
print "\n".strip()
print " {0:<15} {1:<30} {2:<10}".format(s1, s2, s3)
print " {0:<15} {1:<30} {2:<10}".format(sym*15, sym*30, sym*10)
switch_ids = topo.get_switch_ids()
for switch_id in switch_ids:
inv_node = inventory.get_openflow_node(switch_id)
addr = inv_node.get_ip_address()
fcnt = inventory.get_openflow_node_flows_cnt(switch_id)
print " {0:<15} {1:<30} {2:<10}".format(addr, switch_id, fcnt)
print "\n".strip()
print (" Hosts in topology")
s4 = 'IP Address'
s5 = 'MAC Address'
print "\n".strip()
print " {0:<15} {1:<17}".format(s4, s5)
print " {0:<15} {1:<17}".format(sym*15, sym*17)
host_ids = topo.get_host_ids()
for host_id in host_ids:
topo_node = topo.get_node_by_id(host_id)
mac = topo_node.get_mac_address()
ipaddr = topo_node.get_ip_address_for_mac(mac)
print " {0:<15} {1:<17}".format(ipaddr, mac)
print"\n"
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def show_switch(self, switch_id, verbose=False):
ctrl = self.ctrl
switch_inv = None
switch_topo = None
result = ctrl.build_openflow_node_inventory_object(switch_id)
status = result.get_status()
if(status.eq(STATUS.OK) == True):
switch_inv = result.get_data()
assert(isinstance(switch_inv, OpenFlowCapableNode))
elif(status.eq(STATUS.DATA_NOT_FOUND)):
print "\n".strip()
print " Requested data not found"
print "\n".strip()
exit(0)
else:
print ("\n")
print ("!!!Error, failed to get inventory info for '%s' switch, reason: %s"
% (switch_id, status.brief().lower()))
exit(1)
topo_id = "flow:1"
result = ctrl.build_topology_object(topo_id)
status = result.get_status()
if(status.eq(STATUS.OK) == True):
topo = result.get_data()
assert(isinstance(topo, Topology))
switch_topo = topo.get_switch(switch_id)
assert(isinstance(switch_topo, Node))
assert(switch_topo.is_switch())
elif(status.eq(STATUS.DATA_NOT_FOUND)):
print "\n".strip()
print " Requested data not found"
print "\n".strip()
exit(0)
else:
print ("\n")
print ("!!!Error, failed to parse '%s' topology info, reason: %s"
% (topo_id, status.brief().lower()))
exit(1)
print "\n".strip()
print " Switch '%s'" % switch_id
print "\n".strip()
print " IP Address : %s" % switch_inv.get_ip_address()
print " Max tables : %s" % switch_inv.get_max_tables_info()
print " Number of flows : %s" % switch_inv.get_flows_cnt()
clist = switch_inv.get_capabilities()
g = 2
chunks=[clist[x:x+g] for x in xrange(0, len(clist), g)]
s = 'Capabilities'
print " %s :" % s,
for i in range(0, len(chunks)):
n = 0 if i == 0 else len(s) + 9
print "%s%s" % (" "*n, ", ".join(chunks[i]))
print "\n".strip()
s1 = "Port "
s2 = "OpenFlow Id"
sym = '-'
print " {0:<10} {1:<30}".format(s1, s2)
print " {0:<10} {1:<30}".format(sym*10, sym*30)
pids = switch_inv.get_port_ids()
for pid in pids:
pnum = switch_inv.get_port_number(pid)
print " {0:<10} {1:<30}".format(pnum, pid)
print "\n".strip()
if (verbose):
pnums = switch_topo.get_port_numbers()
for pnum in pnums:
if pnum == 'LOCAL':
continue
print " Port '%s' connected devices" % pnum
print "\n".strip()
peer_list = topo.get_peer_list_for_node_port_(switch_topo, pnum)
if len(peer_list):
for item in peer_list:
assert(isinstance(item, Node))
if(item.is_switch()):
print " Device Type : %s" % "switch"
print " OpenFlow Id : %s" % item.get_openflow_id()
elif (item.is_host()):
print " Device Type : %s" % "host"
mac_addr = item.get_mac_address()
print " MAC Address : %s" % mac_addr
ip_addr = item.get_ip_address_for_mac(mac_addr)
print " IP Address : %s" % ip_addr
else:
print " None"
print "\n".strip()
#-------------------------------------------------------------------------------
# Class 'InventoryInfo'
#-------------------------------------------------------------------------------
class InventoryInfo():
""" Methods to retrieve and display nodes inventory information """
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def __init__(self, ctrl):
self.ctrl = ctrl
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def show_common(self, verbose=False):
ctrl = self.ctrl
inv_obj = None
result = ctrl.build_inventory_object()
status = result.get_status()
if(status.eq(STATUS.OK) == True):
inv_obj = result.get_data()
assert(isinstance(inv_obj, Inventory))
elif(status.eq(STATUS.DATA_NOT_FOUND)):
print "\n".strip()
print " Requested data not found"
print "\n".strip()
exit(0)
else:
print ("\n")
print ("!!!Error, failed to obtain inventory info, reason: %s"
% status.brief().lower())
exit(0)
openflow_node_ids = inv_obj.get_openflow_node_ids()
openflow_nodes = []
flows_cnt = 0
for node_id in openflow_node_ids:
node = inv_obj.get_openflow_node(node_id)
assert(isinstance(node, OpenFlowCapableNode))
openflow_nodes.append(node)
flows_cnt += inv_obj.get_openflow_node_flows_cnt(node_id)
print "\n".strip()
print (" OpenFlow Inventory Information")
print "\n".strip()
print (" Number of switches : %s" % len(openflow_node_ids))
print (" Number of flows : %s" % flows_cnt)
if (verbose):
for node in openflow_nodes:
assert(isinstance(node, OpenFlowCapableNode))
print "\n".strip()
print " Switch '%s'\n" % node.get_id()
print " IP Address : %s" % node.get_ip_address()
print " Number of flows : %s" % node.get_flows_cnt()
print " Max tables : %s" % node.get_max_tables_info()
print " Max buffers : %s" % node.get_max_buffers_info()
s = 'Capabilities'
clist = node.get_capabilities()
if len(clist) > 0:
g = 2
chunks=[clist[x:x+g] for x in xrange(0, len(clist), g)]
print " %s :" % s,
for i in range(0, len(chunks)):
n = 0 if i == 0 else len(s) + 11
print "%s%s" % (" "*n, ", ".join(chunks[i]))
else:
print " %s : %s" % (s, None)
s1 = 'Table Id'
s2 = 'Flows Cnt'
print "\n".strip()
print " {0:<8} {1:<10}".format(s1, s2)
sym = '-'
print " {0:<8} {1:<10}".format(sym*len(s1), sym*len(s2))
flow_tables_cnt = node.get_flow_tables_cnt()
for table_id in range(0, flow_tables_cnt+1):
cnt = node.get_flows_in_table_cnt(table_id)
if (cnt != 0):
print " {0:<8} {1:<10}".format(table_id, cnt)
s1 = 'Port'
s2 = 'OpenFlow Id'
print "\n".strip()
print " {0:<8} {1:<16}".format(s1, s2)
print " {0:<8} {1:<30}".format(sym*8, sym*30)
port_ids = node.get_port_ids()
for port_id in port_ids:
port_obj = node.get_port_obj(port_id)
assert(isinstance(port_obj, OpenFlowPort))
pnum = port_obj.get_port_number()
print " {0:<8} {1:<30}".format(pnum, port_id)
else:
print "\n".strip()
print (" Switches")
s1 = 'IP Address'
s2 = 'OpenFlow Id'
s3 = 'Flows Cnt'
sym = '-'
print "\n".strip()
print " {0:<15} {1:<30} {2:<10}".format(s1, s2, s3)
print " {0:<15} {1:<30} {2:<10}".format(sym*15, sym*30, sym*10)
for node in openflow_nodes:
assert(isinstance(node, OpenFlowCapableNode))
addr = node.get_ip_address()
node_id = node.get_id()
flows_cnt = node.get_flows_cnt()
print " {0:<15} {1:<30} {2:<10}".format(addr, node_id, flows_cnt)
print "\n"
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def show_switch(self, switch_id, verbose=False):
ctrl = self.ctrl
switch_inv = None
result = ctrl.build_openflow_node_inventory_object(switch_id)
status = result.get_status()
if(status.eq(STATUS.OK) == True):
switch_inv = result.get_data()
assert(isinstance(switch_inv, OpenFlowCapableNode))
elif(status.eq(STATUS.DATA_NOT_FOUND)):
print "\n".strip()
print " Requested data not found"
print "\n".strip()
exit(0)
else:
print ("\n")
print ("!!!Error, failed to get inventory info for '%s' switch, reason: %s"
% (switch_id, status.brief().lower()))
exit(1)
if (verbose):
print "\n".strip()
print " Switch '%s'\n" % switch_inv.get_id()
print " Manufacturer : %s" % switch_inv.get_manufacturer_info()
print " Software : %s" % switch_inv.get_software_info()
print " Hardware : %s" % switch_inv.get_hardware_info()
print " Serial number : %s" % switch_inv.get_serial_number()
print " Description : %s" % switch_inv.get_description()
print "\n".strip()
print " OpenFlow Id : %s" % switch_inv.get_id()
print " IP Address : %s" % switch_inv.get_ip_address()
print " Number of flows : %s" % switch_inv.get_flows_cnt()
print " Max tables : %s" % switch_inv.get_max_tables_info()
print " Max buffers : %s" % switch_inv.get_max_buffers_info()
clist = switch_inv.get_capabilities()
g = 2
chunks=[clist[x:x+g] for x in xrange(0, len(clist), g)]
s = 'Capabilities'
print " %s :" % s,
for i in range(0, len(chunks)):
n = 0 if i == 0 else len(s) + 9
print "%s%s" % (" "*n, ", ".join(chunks[i]))
port_ids = switch_inv.get_port_ids()
for port_id in port_ids:
port_inv = switch_inv.get_port_obj(port_id)
assert(isinstance(port_inv, OpenFlowPort))
pnum = port_inv.get_port_number()
pname = port_inv.get_port_name()
pid = port_inv.get_port_id()
mac = port_inv.get_mac_address()
link_state = port_inv.get_link_state()
fwd_state = port_inv.get_forwarding_state()
pkts_rx = port_inv.get_packets_received()
pkts_tx = port_inv.get_packets_transmitted()
bytes_rx = port_inv.get_bytes_received()
bytes_tx = port_inv.get_bytes_transmitted()
print "\n".strip()
print " Port '{}'".format(pnum)
print "\n".strip()
print " OpenFlow Id : {}".format(pid)
print " Name : {}".format(pname)
print " MAC address : {}".format(mac)
print " Link state : {}".format(link_state)
print " Oper state : {}".format(fwd_state)
print " Pkts RX : {}".format(pkts_rx)
print " Pkts TX : {}".format(pkts_tx)
print " Bytes RX : {}".format(bytes_rx)
print " Bytes TX : {}".format(bytes_tx)
s = 'Current features'
cflist = port_inv.get_current_features()
if len(cflist) > 0:
g = 2
chunks=[cflist[x:x+g] for x in xrange(0, len(cflist), g)]
print " %s :" % s,
for i in range(0, len(chunks)):
n = 0 if i == 0 else len(s) + 8
print "%s%s" % (" "*n, ", ".join(chunks[i]))
else:
print " %s : %s" % (s, None)
else:
print "\n".strip()
print " Switch '%s'" % switch_id
print "\n".strip()
print " IP Address : %s" % switch_inv.get_ip_address()
print " Number of flows : %s" % switch_inv.get_flows_cnt()
print " Max tables : %s" % switch_inv.get_max_tables_info()
print " Max buffers : %s" % switch_inv.get_max_buffers_info()
clist = switch_inv.get_capabilities()
g = 2
chunks=[clist[x:x+g] for x in xrange(0, len(clist), g)]
s = 'Capabilities'
print " %s :" % s,
for i in range(0, len(chunks)):
n = 0 if i == 0 else len(s) + 9
print "%s%s" % (" "*n, ", ".join(chunks[i]))
s1 = 'Table Id'
s2 = 'Flows Cnt'
print "\n".strip()
print " {0:<8} {1:<10}".format(s1, s2)
sym = '-'
print " {0:<8} {1:<10}".format(sym*len(s1), sym*len(s2))
flow_tables_cnt = switch_inv.get_flow_tables_cnt()
for table_id in range(0, flow_tables_cnt+1):
cnt = switch_inv.get_flows_in_table_cnt(table_id)
if (cnt != 0):
print " {0:<8} {1:<10}".format(table_id, cnt)
s1 = 'Port'
s2 = 'OpenFlow Id'
print "\n".strip()
print " {0:<8} {1:<16}".format(s1, s2)
print " {0:<8} {1:<30}".format(sym*8, sym*30)
port_ids = switch_inv.get_port_ids()
for port_id in port_ids:
port_obj = switch_inv.get_port_obj(port_id)
assert(isinstance(port_obj, OpenFlowPort))
pnum = port_obj.get_port_number()
print " {0:<8} {1:<30}".format(pnum, port_id)
print "\n"
#-------------------------------------------------------------------------------
# Class 'FlowInfo'
#-------------------------------------------------------------------------------
class FlowInfo():
""" Methods to retrieve and display OpenFlow flows information """
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def __init__(self, ctrl, switch_id):
self.ctrl = ctrl
self.switchid = switch_id
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def show_table(self, table_id, oper, ofp):
flow_entries = []
ofswitch = OFSwitch(self.ctrl, self.switchid)
if oper:
result = ofswitch.get_operational_FlowEntries(table_id)
else:
result = ofswitch.get_configured_FlowEntries(table_id)
status = result.get_status()
if(status.eq(STATUS.OK) == True):
data = result.get_data()
flow_entries = sorted(data, key=lambda fe: fe.get_flow_priority())
elif(status.eq(STATUS.DATA_NOT_FOUND)):
print "\n".strip()
print " Requested data not found"
print "\n".strip()
exit(0)
else:
print ("\n")
print ("!!!Error, reason: %s" % status.brief().lower())
exit(0)
print "\n".strip()
s = "Device Operational" if oper else "Controller Cached"
print " Switch '%s' - %s Flows" % (self.switchid, s)
print "\n".strip()
if len(flow_entries) > 0:
for flow_entry in flow_entries:
assert(isinstance(flow_entry, FlowEntry))
if(ofp):
print " -- Flow id '%s'" % flow_entry.get_flow_id()
print " %s" % flow_entry.to_ofp_oxm_syntax()
else:
lines = flow_entry.to_yang_json(strip=True).split('\n')
for line in lines:
print " %s" % line
else:
print " No flows found"
print "\n".strip()
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def show_flow(self, table_id, flow_id, oper, ofp):
ofswitch = OFSwitch(self.ctrl, self.switchid)
flow_entry = None
if oper:
result = ofswitch.get_operational_FlowEntry(table_id, flow_id)
else:
result = ofswitch.get_configured_FlowEntry(table_id, flow_id)
status = result.get_status()
if(status.eq(STATUS.OK)):
flow_entry = result.get_data()
assert(isinstance(flow_entry, FlowEntry))
elif(status.eq(STATUS.DATA_NOT_FOUND)):
print "\n".strip()
print " Requested data not found"
print "\n".strip()
exit(0)
else:
print ("\n")
print ("!!!Error, reason: %s" % status.brief().lower())
exit(0)
print "\n".strip()
s = "Device Operational" if oper else "Controller Cached"
print " Switch '%s' - %s Flows" % (self.switchid, s)
print "\n".strip()
if(flow_entry != None):
if(ofp):
print " -- Flow id '%s'" % flow_entry.get_flow_id()
print " %s" % flow_entry.to_ofp_oxm_syntax()
else:
lines = flow_entry.to_yang_json(strip=True).split('\n')
for line in lines:
print " %s" % line
else:
print " Not found"
print "\n".strip()
class GroupInfo():
""" Methods to retrieve and display OpenFlow groups information """
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def __init__(self, ctrl, switch_id):
self.ctrl = ctrl
self.switchid = switch_id
def show_table(self, config, description, stats, ofp):
groups = []
ofswitch = OFSwitch(self.ctrl, self.switchid)
s = ""
if description:
s = 'Device Operational Groups'
result = ofswitch.get_groups_description(decode_object=True)
elif stats:
s = 'Group Statistics'
result = ofswitch.get_groups_statistics(decode_object=True)
elif config:
s = 'Controller Cached Groups'
result = ofswitch.get_configured_groups(decode_object=True)
else:
assert(False)
status = result.get_status()
if(status.eq(STATUS.OK)):
data = result.get_data()
groups = sorted(data, key=lambda ge: ge.get_group_id())
elif(status.eq(STATUS.DATA_NOT_FOUND)):
print "\n".strip()
print " Requested data not found"
print "\n".strip()
exit(0)
else:
print ("\n")
print ("!!!Error, reason: %s" % status.brief().lower())
exit(0)
print "\n".strip()
print " Switch '%s' - %s" % (self.switchid, s)
print "\n".strip()
if len(groups) > 0:
for group in groups:
assert(isinstance(group, GroupEntry) or
isinstance(group, GroupDescription) or
isinstance(group, GroupStatistics))
if(ofp):
print " -- Group id '%s'" % group.get_group_id()
print " %s" % group.to_ofp_oxm_syntax()
else:
lines = group.to_yang_json(strip=True).split('\n')
for line in lines:
print " %s" % line
else:
print " No groups found"
print "\n".strip()
def show_group(self, group_id, config, description, stats, ofp):
ofswitch = OFSwitch(self.ctrl, self.switchid)
group_entry = None
s = ""
if description:
s = 'Description'
result = ofswitch.get_group_description(group_id,
decode_object=True)
elif stats:
s = 'Statistics'
result = ofswitch.get_group_statistics(group_id,
decode_object=True)
elif config:
s = 'Config'
result = ofswitch.get_configured_group(group_id,
decode_object=True)
else:
assert(False)
status = result.get_status()
if(status.eq(STATUS.OK)):
group_entry = result.get_data()
assert(isinstance(group_entry, GroupEntry) or
isinstance(group_entry, GroupDescription) or
isinstance(group_entry, GroupStatistics))
elif(status.eq(STATUS.DATA_NOT_FOUND)):
print "\n".strip()
print " Requested data not found"
print "\n".strip()
exit(0)
else:
print ("\n")
print ("!!!Error, reason: %s" % status.brief().lower())
exit(0)
print "\n".strip()
print " Group %s - Switch '%s'" % (s, self.switchid)
print "\n".strip()
if(group_entry != None):
if(ofp):
print "[GroupInfo] show_group - TBD"
# print " -- Flow id '%s'" % flow_entry.get_flow_id()
# print " %s" % flow_entry.to_ofp_oxm_syntax()
else:
lines = group_entry.to_yang_json(strip=True).split('\n')
for line in lines:
print " %s" % line
else:
print " Not found"
print "\n".strip()
def show_features(self):
ofswitch = OFSwitch(self.ctrl, self.switchid)
result = ofswitch.get_group_features(decode_object=True)
status = result.get_status()
if(status.eq(STATUS.OK)):
print "\n".strip()
print (" Group Features - Switch '%s'") % self.switchid
print "\n".strip()
group_features = result.get_data()
assert(isinstance(group_features, GroupFeatures))
s = 'Max groups'
alist = group_features.get_max_groups()
if alist:
q = 1 # number of list items to be in a single
# output string chunk
chunks=[alist[x:x+q] for x in xrange(0, len(alist), q)]
print " %s :" % s,
for i in range(0, len(chunks)):
n = 0 if i == 0 else len(s) + 9
print "%s%s" % (" "*n, ", ".join(map(str, chunks[i])))
else:
print " %s : %s" % (s, "n/a")
s = 'Group types'
alist = group_features.get_types()
if alist:
q = 1 # number of list items to be in a single
# output string chunk
chunks=[alist[x:x+q] for x in xrange(0, len(alist), q)]
print " %s :" % s,
for i in range(0, len(chunks)):
n = 0 if i == 0 else len(s) + 8
print "%s%s" % (" "*n, ", ".join(chunks[i]))
else:
print " %s : %s" % (s, "n/a")
s = 'Capabilities'
alist = group_features.get_capabilities()
if alist:
q = 1 # number of list items to be in a single
# output string chunk
chunks=[alist[x:x+q] for x in xrange(0, len(alist), q)]
print " %s :" % s,
for i in range(0, len(chunks)):
n = 0 if i == 0 else len(s) + 7
print "%s%s" % (" "*n, ", ".join(chunks[i]))
else:
print " %s : %s" % (s, "n/a")
s = 'Actions'
actions = group_features.get_actions()
if actions:
q = 4 # number of list items to be in a single
# output string chunk
print " %s :" % s,
for i, alist in enumerate(actions):
n = 0 if i == 0 else len(s) + 12
chunks=[alist[x:x+q] for x in xrange(0, len(alist), q)]
for j in range(0, len(chunks)):
n = 0 if i == 0 and j == 0 else len(s) + 12
print "%s%s" % (" "*n, ", ".join(chunks[j]))
print "\n".strip()
else:
print " %s : %s" % (s, "n/a")
print "\n".strip()
elif(status.eq(STATUS.DATA_NOT_FOUND)):
print "\n".strip()
print " Requested data not found"
print "\n".strip()
exit(0)
else:
print ("\n")
print ("!!!Error, reason: %s" % status.brief().lower())
exit(0)
# features =
def show_statistics(self):
print "[GroupInfo] show_statistics - TBD"
def show_description(self):
print "[GroupInfo] show_description - TBD"
#-------------------------------------------------------------------------------
# Class 'OFToolParser'
#-------------------------------------------------------------------------------
class OFToolParser(object):
""" CLI parser and commands executer """
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def __init__(self):
self.prog = 'oftool'
parser = argparse.ArgumentParser(
prog=self.prog,
description='Command line tool for interaction with OpenFlow Controller',
usage="%(prog)s [-h] [-C <path>] <command> [<args>]\n"
"(type '%(prog)s -h' for details)\n"
"\nAvailable commands are:\n"
"\n show-topo Show network topology information"
"\n show-inv Show inventory nodes information"
"\n show-flow Show OpenFlow flows information"
"\n clear-flow Delete OpenFlow flows"
"\n add-flow Add OpenFlow flows"
"\n show-group Show OpenFlow groups information"
"\n clear-group Delete OpenFlow groups"
"\n add-group Add OpenFlow groups"
"\n"
"\n '%(prog)s help <command>' provides details for a specific command")
parser.add_argument('-C', metavar="<path>",
dest='ctrl_cfg_file',
help="path to the controller's configuration file "
"(default is './ctrl.yml')",
default="./ctrl.yml")
parser.add_argument('command', help='command to be executed')
args, remaining_args = parser.parse_known_args()
# Get Controller's attributes from configuration file
self.ctrl_cfg = self.get_ctrl_cfg(args.ctrl_cfg_file)
if(self.ctrl_cfg == None):
print "\n".strip()
print ("Cannot find controller configuration file")
print "\n".strip()
exit(1)
# Invoke method that is matching the name of sub-command argument
cmd_orig = args.command
cmd = cmd_orig.replace('-', '_')
if hasattr(self, cmd):
getattr(self, cmd)(remaining_args)
else:
print "\n".strip()
print ("Error, unrecognized command '%s'" % cmd_orig)
print "\n".strip()
parser.print_help()
exit(1)
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def show_topo(self, options):
parser = argparse.ArgumentParser(
prog=self.prog,
# description='Show network topology information',
usage="%(prog)s show-topo [-s=SWITCHID|--switch=SWITCHID]"
" [-v|--verbose]"
"\n\n"
"Show OpenFlow network topology information in the"
" operational inventory store\n\n"
"Options:\n"
" -s, --switch switch identifier\n"
" -v, --verbose detailed output\n")
parser.add_argument("-v", '--verbose', action="store_true",
help="output details level")
parser.add_argument('-s', '--switch', metavar = "SWITCHID")
parser.add_argument('-U', action="store_true", dest="usage",
help=argparse.SUPPRESS)
args = parser.parse_args(options)
if(args.usage):
parser.print_usage()
print "\n".strip()
return
print "\n".strip()
print " [Controller '%s']" % self.ctrl_cfg.to_string()
ctrl = Controller(self.ctrl_cfg.ip_addr, self.ctrl_cfg.tcp_port,
self.ctrl_cfg.admin_name, self.ctrl_cfg.admin_pswd)
topo = TopologyInfo(ctrl)
if(args.switch):
topo.show_switch(args.switch, args.verbose)
else:
topo.show_common(args.verbose)
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def show_inv(self, options):
parser = argparse.ArgumentParser(
prog=self.prog,
# description="Show OpenFlow nodes information in the controller's inventory store",
usage="%(prog)s show-inv [-s=SWITCHID|--switch=SWITCHID]"
" [-v|--verbose]"
"\n\n"
"Show OpenFlow nodes information in the"
" operational inventory store\n\n"
"Options:\n"
" -s, --switch switch identifier\n"
" -v, --verbose detailed output\n")
parser.add_argument("-v", '--verbose', action="store_true",
help="output details level")
parser.add_argument('-s', '--switch', metavar = "SWITCHID")
parser.add_argument('-U', action="store_true", dest="usage",
help=argparse.SUPPRESS)
args = parser.parse_args(options)
if(args.usage):
parser.print_usage()
print "\n".strip()
return
print "\n".strip()
print " [Controller '%s']" % self.ctrl_cfg.to_string()
ctrl = Controller(self.ctrl_cfg.ip_addr, self.ctrl_cfg.tcp_port,
self.ctrl_cfg.admin_name, self.ctrl_cfg.admin_pswd)
inv = InventoryInfo(ctrl)
if(args.switch):
inv.show_switch(args.switch, args.verbose)
else:
inv.show_common(args.verbose)
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def show_flow(self, options):
parser = argparse.ArgumentParser(
prog=self.prog,
# description='Show OpenFlow flows information',
usage="%(prog)s show-flow -s=SWITCHID|--switch=SWITCHID\n"
" -t=TABLEID|--table=TABLEID\n"
" [-f=FLOWID|--flow=FLOWID]\n"
" [--config|--operational]\n"
" [--json|--ofp]"
"\n\n"
"Show OpenFlow flows information\n\n"
"\n\n"
"Options:\n"
" -s, --switch switch identifier\n"
" -t, --table flow table id\n"
" -f, --flow flow entry id\n"
" --config controller cached flows (default)\n"
" --operational device operational flows\n"
" --json display data in JSON format (default)\n"
" --ofp display data in OpenFlow protocol format\n"
)
parser.add_argument('-s', '--switch', metavar = "SWITCHID")
parser.add_argument('-t', '--table', metavar = "TABLEID",
type=self.positive_int)
parser.add_argument('-f', '--flow', metavar = "FLOWID")
group1 = parser.add_mutually_exclusive_group()
group1.add_argument('--config', action='store_true', default=True)
group1.add_argument('--oper', action='store_true')
group2 = parser.add_mutually_exclusive_group()
group2.add_argument('--json', action='store_true', default=True)
group2.add_argument('--ofp', action='store_true')
parser.add_argument('-U', action="store_true", dest="usage",
help=argparse.SUPPRESS)
args = parser.parse_args(options)
if(args.usage):
parser.print_usage()
print "\n".strip()
return
if (args.switch == None):
msg = "option -s (or --switch) is required"
parser.error(msg)
if (args.table == None):
msg = "option -t (or --table) is required"
parser.error(msg)
print "\n".strip()
print " [Controller '%s']" % self.ctrl_cfg.to_string()
ctrl = Controller(self.ctrl_cfg.ip_addr, self.ctrl_cfg.tcp_port,
self.ctrl_cfg.admin_name, self.ctrl_cfg.admin_pswd)
flow = FlowInfo(ctrl, args.switch)
if (args.flow != None):
flow.show_flow(args.table, args.flow, args.oper, args.ofp)
else:
flow.show_table(args.table, args.oper, args.ofp)
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def clear_flow(self, options):
parser = argparse.ArgumentParser(
prog=self.prog,
description='Clear OpenFlow flows',
usage="%(prog)s clear-flow -s=SWITCHID|--switch=SWICTHID\n"
" -t=TABLEID|--table=TABLEID\n"
" [-f=FLOWID|--flow=FLOWID]\n"
"\n\n"
"Remove one or set of flows from the Controller\n\n"
"\n\n"
"Options:\n"
" -s, --switch switch identifier\n"
" -t, --table flow table id\n"
" -f, --flow flow id\n"
)
parser.add_argument('-s', '--switch', metavar = "SWITCHID")
parser.add_argument('-t', '--table', metavar = "TABLEID",
type=self.positive_int)
parser.add_argument('-f', '--flow', metavar = "FLOWID")
parser.add_argument('-U', action="store_true", dest="usage",
help=argparse.SUPPRESS)
args = parser.parse_args(options)
if(args.usage):
parser.print_usage()
print "\n".strip()
return
if (args.switch == None):
msg = "option -s (or --switch) is required"
parser.error(msg)
if (args.table == None):
msg = "option -t (or --table) is required"
parser.error(msg)
print "\n".strip()
print " [Controller '%s']" % self.ctrl_cfg.to_string()
ctrl = Controller(self.ctrl_cfg.ip_addr, self.ctrl_cfg.tcp_port,
self.ctrl_cfg.admin_name, self.ctrl_cfg.admin_pswd)
ofswitch = OFSwitch(ctrl, args.switch)
if(args.flow != None):
result = ofswitch.delete_flow(args.table, args.flow)
else:
result = ofswitch.delete_flows(args.table)
status = result.get_status()
print "\n".strip()
print "%s" % status.detailed()
print "\n".strip()
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def add_flow(self, options):
parser = argparse.ArgumentParser(
prog=self.prog,
description="Add flow entries to the Controller's cache",
usage="%(prog)s add-flow -s=SWITCHID|--switch=SWICTHID\n"
" -f <path>|--file <path>\n"
" [--dry-run]\n"
"\n\n"
"Add flow entries to the Controller's cache\n\n"
"\n\n"
"Options:\n"
" -s, --switch switch identifier\n"
" -f, --file path to the file containing flow entries\n"
" (default is './flow.json')\n"
" -dry-run show content of flow(s) to be created"
)
parser.add_argument('-s', '--switch', metavar = "SWITCHID")
parser.add_argument('-f', '--file', metavar="<path>",
dest='flow_file',
help="path to the file containing flow entries "
"(default is './flow.json')",
default="./flow.json")
parser.add_argument('-U', action="store_true", dest="usage",
help=argparse.SUPPRESS)
parser.add_argument('--dry-run', action="store_true",
dest='dry_run', default=False)
args = parser.parse_args(options)
if(args.usage):
parser.print_usage()
print "\n".strip()
return
if(args.dry_run):
flows = self.read_data(args.flow_file)
if flows:
for flow in flows:
print json.dumps(flow, indent=4)
return
if (args.switch == None):
msg = "option -s (or --switch) is required"
parser.error(msg)
flows = self.read_data(args.flow_file)
if not flows:
print "Failed to execute command, exit"
exit(1)
print "\n".strip()
print " [Controller '%s']" % self.ctrl_cfg.to_string()
print "\n".strip()
ctrl = Controller(self.ctrl_cfg.ip_addr, self.ctrl_cfg.tcp_port,
self.ctrl_cfg.admin_name, self.ctrl_cfg.admin_pswd)
ofswitch = OFSwitch(ctrl, args.switch)
try:
for flow in flows:
fid = flow['id']
tid = flow['table_id']
js = json.dumps(flow, default=lambda o: o.__dict__)
result = ofswitch.add_modify_flow_json(table_id=tid, flow_id=fid,flow_json=js)
status = result.get_status()
if(status.eq(STATUS.OK)):
print "Flow id '%s', success" % fid
else:
print "Flow id '%s', failure, reason: %s" % (fid, status.detailed())
print "\n".strip()
except(Exception) as e:
msg = "Error: %s" % repr(e)
dbg_print(msg)
def show_group(self, options):
parser = argparse.ArgumentParser(
prog=self.prog,
# description='Show OpenFlow groups information',
usage="%(prog)s show-group -s=SWITCHID|--switch=SWITCHID\n"
" [--features]\n"
" [-g=GROUPID|--group=GROUPID]\n"
# " [--config|--operational]\n"
" [--config|--operational|--statistics]\n"
" [--json|--ofp]"
"\n\n"
"Show OpenFlow groups information\n\n"
"\n\n"
"Options:\n"
" -s, --switch switch identifier\n"
" --features capabilities of groups on the switch\n"
" -g, --group group identifier (integer)\n"
" --config controller cached groups (default)\n"
# " --operational device operational groups\n"
# " --description set of groups on the switch\n"
# " --description operational groups on the switch\n"
" --operational operational groups on the switch\n"
# " --statistics statistics for one or more groups\n"
" --statistics statistics for groups on the switch\n"
" --json display data in JSON format (default)\n"
" --ofp display data in OpenFlow protocol format\n"
)
parser.add_argument('-s', '--switch', metavar = "SWITCHID")
parser.add_argument('-g', '--group', metavar = "GROUPID")
group1 = parser.add_mutually_exclusive_group()
group1.add_argument('--features', action='store_true')
group1.add_argument('--config', action='store_true', default=True)
group1.add_argument('--oper', action='store_true', dest="description")
# group1.add_argument('--description', action='store_true')
group1.add_argument('--statistics', action='store_true')
group2 = parser.add_mutually_exclusive_group()
group2.add_argument('--json', action='store_true', default=True)
group2.add_argument('--ofp', action='store_true')
parser.add_argument('-U', action="store_true", dest="usage",
help=argparse.SUPPRESS)
args = parser.parse_args(options)
if(args.usage):
parser.print_usage()
print "\n".strip()
return
if (args.switch == None):
msg = "option -s (or --switch) is required"
parser.error(msg)
print "\n".strip()
print " [Controller '%s']" % self.ctrl_cfg.to_string()
ctrl = Controller(self.ctrl_cfg.ip_addr, self.ctrl_cfg.tcp_port,
self.ctrl_cfg.admin_name, self.ctrl_cfg.admin_pswd)
group = GroupInfo(ctrl, args.switch)
if(args.features):
group.show_features()
elif (args.group != None):
group.show_group(args.group,
args.config, args.description,
args.statistics, args.ofp)
else:
group.show_table(args.config, args.description,
args.statistics, args.ofp)
def clear_group(self, options):
parser = argparse.ArgumentParser(
prog=self.prog,
description='Clear OpenFlow flows',
usage="%(prog)s clear-group -s=SWITCHID|--switch=SWICTHID\n"
" [-g=GROUPID|--group=GROUPID]\n"
"\n\n"
"Remove one or all groups from the Controller\n\n"
"\n\n"
"Options:\n"
" -s, --switch switch identifier\n"
" -g, --group group identifier (integer)\n"
)
parser.add_argument('-s', '--switch', metavar = "SWITCHID")
parser.add_argument('-g', '--group', metavar = "GROUPID",
type=self.positive_int)
parser.add_argument('-U', action="store_true", dest="usage",
help=argparse.SUPPRESS)
args = parser.parse_args(options)
if(args.usage):
parser.print_usage()
print "\n".strip()
return
if (args.switch == None):
msg = "option -s (or --switch) is required"
parser.error(msg)
print "\n".strip()
print " [Controller '%s']" % self.ctrl_cfg.to_string()
ctrl = Controller(self.ctrl_cfg.ip_addr, self.ctrl_cfg.tcp_port,
self.ctrl_cfg.admin_name, self.ctrl_cfg.admin_pswd)
ofswitch = OFSwitch(ctrl, args.switch)
if(args.group != None):
result = ofswitch.delete_group(args.group)
status = result.get_status()
print "\n".strip()
if(status.eq(STATUS.OK)):
print (" Group '%s' successfully removed") % args.group
elif(status.eq(STATUS.DATA_NOT_FOUND)):
print " Requested data not found"
else:
print (" Failed to remove group '%s', reason '%s'") \
% (args.group, status.detailed())
print "\n".strip()
else:
result = ofswitch.get_group_ids(operational=False)
status = result.get_status()
print "\n".strip()
if(status.eq(STATUS.OK)):
group_ids = result.get_data()
for group in group_ids:
result = ofswitch.delete_group(group)
status = result.get_status()
if(status.eq(STATUS.OK)):
print (" Group '%s' successfully removed") % group
else:
print (" Failed to remove group '%s', reason '%s'") \
% (group,status.detailed())
else:
msg = " Failed to get list of groups to be deleted"
print ("%s, reason '%s'") % (msg, status.detailed())
print "\n".strip()
def add_group(self, options):
parser = argparse.ArgumentParser(
prog=self.prog,
description="Add flow entries to the Controller's cache",
usage="%(prog)s add-flow -s=SWITCHID|--switch=SWICTHID\n"
" -f <path>|--file <path>\n"
" [--dry-run]\n"
"\n\n"
"Add group table entries to the Controller's cache\n\n"
"\n\n"
"Options:\n"
" -s, --switch switch identifier\n"
" -f, --file path to the file containing group entries\n"
" (default is './group.json')\n"
" -dry-run show content of group(s) to be created"
)
parser.add_argument('-s', '--switch', metavar = "SWITCHID")
parser.add_argument('-f', '--file', metavar="<path>",
dest='group_file',
help="path to the file containing group entries "
"(default is './group.json')",
default="./group.json")
parser.add_argument('-U', action="store_true", dest="usage",
help=argparse.SUPPRESS)
parser.add_argument('--dry-run', action="store_true",
dest='dry_run', default=False)
args = parser.parse_args(options)
if(args.usage):
parser.print_usage()
print "\n".strip()
return
if(args.dry_run):
groups = self.read_data(args.group_file)
if groups:
for group in groups:
print json.dumps(group, indent=4)
return
if (args.switch == None):
msg = "option -s (or --switch) is required"
parser.error(msg)
groups = self.read_data(args.group_file)
if not groups:
print "Failed to execute command, exit"
exit(1)
print "\n".strip()
print " [Controller '%s']" % self.ctrl_cfg.to_string()
print "\n".strip()
ctrl = Controller(self.ctrl_cfg.ip_addr, self.ctrl_cfg.tcp_port,
self.ctrl_cfg.admin_name, self.ctrl_cfg.admin_pswd)
ofswitch = OFSwitch(ctrl, args.switch)
try:
for group in groups:
gid = group['group-id']
js = json.dumps(group, default=lambda o: o.__dict__)
result = ofswitch.add_modify_group_json(gid, js)
status = result.get_status()
if(status.eq(STATUS.OK)):
print "Group id '%s', success" % gid
else:
print ("Group id '%s', failure, reason: %s") \
% (gid, status.detailed())
print "\n".strip()
except(Exception) as e:
msg = "Error: %s" % repr(e)
dbg_print(msg)
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def help(self, options):
parser = argparse.ArgumentParser(add_help=False,
usage="oftool help <command>")
parser.add_argument('command')
args = parser.parse_args(options)
cmd = args.command.replace('-', '_')
if not hasattr(self, cmd):
print 'Unrecognized command %s' % cmd
return
getattr(self, cmd)(['-U'])
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def get_ctrl_cfg(self, path):
try:
with open(path, 'r') as f:
obj = yaml.load(f)
d = {}
for k, v in obj.iteritems():
d[k] = v
addr = d['ipaddr']
port = d['port']
name = d['name']
pswd = d['pswd']
cfg = CtrlCfg(addr, port, name, pswd)
return cfg
except (IOError, KeyError) as e:
if isinstance(e, IOError):
print("Error: failed to read file '%s'" % path)
elif isinstance(e, KeyError):
print ("Error: unknown attribute %s in file '%s'" % (e, path))
return None
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def read_data(self, path):
try:
with open(path, 'r') as f:
objs = json.load(f, cls=ConcatJSONObjects,
object_pairs_hook=OrderedDict)
if not objs:
raise ValueError('no data')
return objs
except (Exception) as e:
if isinstance(e, IOError):
print("Error: failed to read file '%s'" % path)
elif isinstance(e, ValueError):
print "Error: failed to parse data in file '%s' [%s]" % (path, e)
else:
print "!!!Error: %s" % e
return None
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def positive_int(self, value):
msg = "is not a valid positive integer"
try:
if (int(value) < 0):
raise argparse.ArgumentTypeError("'%s' %s" % (value, msg))
except:
raise argparse.ArgumentTypeError("'%s' %s" % (value, msg))
return value
if __name__ == '__main__':
OFToolParser()
|
bsd-3-clause
|
gclenaghan/scikit-learn
|
sklearn/datasets/kddcup99.py
|
4
|
12759
|
"""KDDCUP 99 dataset.
A classic dataset for anomaly detection.
The dataset page is available from UCI Machine Learning Repository
https://archive.ics.uci.edu/ml/machine-learning-databases/kddcup99-mld/kddcup.data.gz
"""
import sys
import errno
from gzip import GzipFile
from io import BytesIO
import logging
import os
from os.path import exists, join
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
import numpy as np
from .base import get_data_home
from .base import Bunch
from ..externals import joblib
from ..utils import check_random_state
from ..utils import shuffle as shuffle_method
URL10 = ('http://archive.ics.uci.edu/ml/'
'machine-learning-databases/kddcup99-mld/kddcup.data_10_percent.gz')
URL = ('http://archive.ics.uci.edu/ml/'
'machine-learning-databases/kddcup99-mld/kddcup.data.gz')
logger = logging.getLogger()
def fetch_kddcup99(subset=None, shuffle=False, random_state=None,
percent10=False):
"""Load and return the kddcup 99 dataset (regression).
The KDD Cup '99 dataset was created by processing the tcpdump portions
of the 1998 DARPA Intrusion Detection System (IDS) Evaluation dataset,
created by MIT Lincoln Lab [1] . The artificial data was generated using
a closed network and hand-injected attacks to produce a large number of
different types of attack with normal activity in the background.
As the initial goal was to produce a large training set for supervised
learning algorithms, there is a large proportion (80.1%) of abnormal
data which is unrealistic in real world, and inapropriate for unsupervised
anomaly detection which aims at detecting 'abnormal' data, ie
1) qualitatively different from normal data.
2) in large minority among the observations.
We thus transform the KDD Data set into two differents data set: SA and SF.
- SA is obtained by simply selecting all the normal data, and a small
proportion of abnormal data to gives an anomaly proportion of 1%.
- SF is obtained as in [2]
by simply picking up the data whose attribute logged_in is positive, thus
focusing on the intrusion attack, which gives a proportion of 0.3% of
attack.
- http and smtp are two subsets of SF corresponding with third feature
equal to 'http' (resp. to 'smtp')
General KDD structure :
================ ==========================================
Samples total 4898431
Dimensionality 41
Features discrete (int) or continuous (float)
Targets str, 'normal.' or name of the anomaly type
================ ==========================================
SA structure :
================ ==========================================
Samples total 976158
Dimensionality 41
Features discrete (int) or continuous (float)
Targets str, 'normal.' or name of the anomaly type
================ ==========================================
SF structure :
================ ==========================================
Samples total 699691
Dimensionality 40
Features discrete (int) or continuous (float)
Targets str, 'normal.' or name of the anomaly type
================ ==========================================
http structure :
================ ==========================================
Samples total 619052
Dimensionality 39
Features discrete (int) or continuous (float)
Targets str, 'normal.' or name of the anomaly type
================ ==========================================
smtp structure :
================ ==========================================
Samples total 95373
Dimensionality 39
Features discrete (int) or continuous (float)
Targets str, 'normal.' or name of the anomaly type
================ ==========================================
Parameters
----------
subset : None, 'SA', 'SF', 'http', 'smtp'
To return the corresponding classical subsets of kddcup 99.
If None, return the entire kddcup 99 dataset.
random_state : int, RandomState instance or None, optional (default=None)
Random state for shuffling the dataset.
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
shuffle : bool, default=False
Whether to shuffle dataset.
percent10 : bool, default=False
Whether to load only 10 percent of the data.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn and 'target', the regression target for each
sample.
References
----------
.. [1] Analysis and Results of the 1999 DARPA Off-Line Intrusion
Detection Evaluation Richard Lippmann, Joshua W. Haines,
David J. Fried, Jonathan Korba, Kumar Das
.. [2] A Geometric Framework for Unsupervised Anomaly Detection: Detecting
Intrusions in Unlabeled Data (2002) by Eleazar Eskin, Andrew Arnold,
Michael Prerau, Leonid Portnoy, Sal Stolfo
"""
kddcup99 = _fetch_brute_kddcup99(shuffle=shuffle, percent10=percent10)
data = kddcup99.data
target = kddcup99.target
if subset == 'SA':
s = target == 'normal.'
t = np.logical_not(s)
normal_samples = data[s, :]
normal_targets = target[s]
abnormal_samples = data[t, :]
abnormal_targets = target[t]
n_samples_abnormal = abnormal_samples.shape[0]
# selected abnormal samples:
random_state = check_random_state(random_state)
r = random_state.randint(0, n_samples_abnormal, 3377)
abnormal_samples = abnormal_samples[r]
abnormal_targets = abnormal_targets[r]
data = np.r_[normal_samples, abnormal_samples]
target = np.r_[normal_targets, abnormal_targets]
if subset == 'SF' or subset == 'http' or subset == 'smtp':
# select all samples with positive logged_in attribute:
s = data[:, 11] == 1
data = np.c_[data[s, :11], data[s, 12:]]
target = target[s]
data[:, 0] = np.log((data[:, 0] + 0.1).astype(float))
data[:, 4] = np.log((data[:, 4] + 0.1).astype(float))
data[:, 5] = np.log((data[:, 5] + 0.1).astype(float))
if subset == 'http':
s = data[:, 2] == 'http'
data = data[s]
target = target[s]
data = np.c_[data[:, 0], data[:, 4], data[:, 5]]
if subset == 'smtp':
s = data[:, 2] == 'smtp'
data = data[s]
target = target[s]
data = np.c_[data[:, 0], data[:, 4], data[:, 5]]
if subset == 'SF':
data = np.c_[data[:, 0], data[:, 2], data[:, 4], data[:, 5]]
return Bunch(data=data, target=target)
def _fetch_brute_kddcup99(subset=None, data_home=None,
download_if_missing=True, random_state=None,
shuffle=False, percent10=False):
"""Load the kddcup99 dataset, downloading it if necessary.
Parameters
----------
subset : None, 'SA', 'SF', 'http', 'smtp'
To return the corresponding classical subsets of kddcup 99.
If None, return the entire kddcup 99 dataset.
data_home : string, optional
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing : boolean, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
random_state : int, RandomState instance or None, optional (default=None)
Random state for shuffling the dataset.
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
shuffle : bool, default=False
Whether to shuffle dataset.
percent10 : bool, default=False
Whether to load only 10 percent of the data.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (494021, 41)
Each row corresponds to the 41 features in the dataset.
dataset.target : numpy array of shape (494021,)
Each value corresponds to one of the 21 attack types or to the
label 'normal.'.
dataset.DESCR : string
Description of the kddcup99 dataset.
"""
data_home = get_data_home(data_home=data_home)
if sys.version_info[0] == 3:
# The zlib compression format use by joblib is not compatible when
# switching from Python 2 to Python 3, let us use a separate folder
# under Python 3:
dir_suffix = "-py3"
else:
# Backward compat for Python 2 users
dir_suffix = ""
if percent10:
kddcup_dir = join(data_home, "kddcup99_10" + dir_suffix)
else:
kddcup_dir = join(data_home, "kddcup99" + dir_suffix)
samples_path = join(kddcup_dir, "samples")
targets_path = join(kddcup_dir, "targets")
available = exists(samples_path)
if download_if_missing and not available:
_mkdirp(kddcup_dir)
URL_ = URL10 if percent10 else URL
logger.warning("Downloading %s" % URL_)
f = BytesIO(urlopen(URL_).read())
dt = [('duration', int),
('protocol_type', 'S4'),
('service', 'S11'),
('flag', 'S6'),
('src_bytes', int),
('dst_bytes', int),
('land', int),
('wrong_fragment', int),
('urgent', int),
('hot', int),
('num_failed_logins', int),
('logged_in', int),
('num_compromised', int),
('root_shell', int),
('su_attempted', int),
('num_root', int),
('num_file_creations', int),
('num_shells', int),
('num_access_files', int),
('num_outbound_cmds', int),
('is_host_login', int),
('is_guest_login', int),
('count', int),
('srv_count', int),
('serror_rate', float),
('srv_serror_rate', float),
('rerror_rate', float),
('srv_rerror_rate', float),
('same_srv_rate', float),
('diff_srv_rate', float),
('srv_diff_host_rate', float),
('dst_host_count', int),
('dst_host_srv_count', int),
('dst_host_same_srv_rate', float),
('dst_host_diff_srv_rate', float),
('dst_host_same_src_port_rate', float),
('dst_host_srv_diff_host_rate', float),
('dst_host_serror_rate', float),
('dst_host_srv_serror_rate', float),
('dst_host_rerror_rate', float),
('dst_host_srv_rerror_rate', float),
('labels', 'S16')]
DT = np.dtype(dt)
file_ = GzipFile(fileobj=f, mode='r')
Xy = []
for line in file_.readlines():
Xy.append(line.replace('\n', '').split(','))
file_.close()
print('extraction done')
Xy = np.asarray(Xy, dtype=object)
for j in range(42):
Xy[:, j] = Xy[:, j].astype(DT[j])
X = Xy[:, :-1]
y = Xy[:, -1]
# XXX bug when compress!=0:
# (error: 'Incorrect data length while decompressing[...] the file
# could be corrupted.')
joblib.dump(X, samples_path, compress=0)
joblib.dump(y, targets_path, compress=0)
try:
X, y
except NameError:
X = joblib.load(samples_path)
y = joblib.load(targets_path)
if shuffle:
X, y = shuffle_method(X, y, random_state=random_state)
return Bunch(data=X, target=y, DESCR=__doc__)
def _mkdirp(d):
"""Ensure directory d exists (like mkdir -p on Unix)
No guarantee that the directory is writable.
"""
try:
os.makedirs(d)
except OSError as e:
if e.errno != errno.EEXIST:
raise
|
bsd-3-clause
|
orione7/plugin.video.streamondemand-pureita
|
core/servertools.py
|
1
|
18391
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# streamondemand 5
# Copyright 2015 [email protected]
# http://www.mimediacenter.info/foro/viewforum.php?f=36
#
# Distributed under the terms of GNU General Public License v3 (GPLv3)
# http://www.gnu.org/licenses/gpl-3.0.html
# ------------------------------------------------------------
# This file is part of streamondemand 5.
#
# streamondemand 5 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# streamondemand 5 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with streamondemand 5. If not, see <http://www.gnu.org/licenses/>.
# --------------------------------------------------------------------------------
# Server management
# ------------------------------------------------------------
import os
from core import config
from core import logger
from core import scrapertools
# Funciónn genérica para encontrar ídeos en una página
def find_video_items(item=None, data=None, channel=""):
logger.info("streamondemand-pureita-master.core.servertools find_video_items")
# Descarga la página
if data is None:
from core import scrapertools
data = scrapertools.cache_page(item.url)
# logger.info(data)
# Busca los enlaces a los videos
from core.item import Item
listavideos = findvideos(data)
if item is None:
item = Item()
itemlist = []
for video in listavideos:
# scrapedtitle = " [ "+video[2]+" ] "
# DrZ3r0
scrapedtitle = item.title.strip() + " - " + video[0].strip()
scrapedurl = video[1]
server = video[2]
# DrZ3r0
thumbnail = item.thumbnail
if not thumbnail:
if get_server_parameters(server)["thumbnail"]:
thumbnail = get_server_parameters(server)["thumbnail"]
else:
thumbnail = "http://media.tvalacarta.info/servers/server_"+server+".png"
itemlist.append( Item(channel=item.channel, title=scrapedtitle, action="play", server=server, url=scrapedurl, thumbnail=thumbnail, fulltitle=item.fulltitle, show=item.show, plot=item.plot, parentContent=item, folder=False) )
return itemlist
def guess_server_thumbnail(title):
logger.info("streamondemand-pureita-master.core.servertools guess_server_thumbnail title=" + title)
lowcase_title = title.lower()
if "netu" in lowcase_title:
logger.info("streamondemand-pureita-master.core.servertools guess_server_thumbnail caso especial netutv")
return "http://media.tvalacarta.info/servers/server_netutv.png"
if "ul.to" in lowcase_title:
logger.info("streamondemand-pureita-master.core.servertools guess_server_thumbnail caso especial ul.to")
return "http://media.tvalacarta.info/servers/server_uploadedto.png"
if "waaw" in lowcase_title:
logger.info("streamondemand-pureita-master.core.servertools guess_server_thumbnail caso especial waaw")
return "http://media.tvalacarta.info/servers/server_waaw.png"
if "streamin" in lowcase_title:
logger.info("streamondemand-pureita-master.core.servertools guess_server_thumbnail caso especial streamin")
return "http://media.tvalacarta.info/servers/server_streaminto.png"
servers = get_servers_list()
for serverid in servers:
if serverid in lowcase_title:
logger.info("streamondemand-pureita-master.core.servertools guess_server_thumbnail encontrado " + serverid)
return "http://media.tvalacarta.info/servers/server_" + serverid + ".png"
return ""
def findvideosbyserver(data, serverid):
logger.info("streamondemand-pureita-master.core.servertools findvideosbyserver")
encontrados = set()
devuelve = []
try:
exec "from servers import " + serverid
exec "devuelve.extend(" + serverid + ".find_videos(data))"
except ImportError:
logger.info("Non esiste il connettore per #" + serverid + "#")
# import traceback
# logger.info(traceback.format_exc())
except:
logger.info("Errore del connettore #" + serverid + "#")
import traceback
logger.info(traceback.format_exc())
return devuelve
def findvideos(data, skip=False):
logger.info("streamondemand-pureita-master.core.servertools findvideos") # en #"+data+"#")
encontrados = set()
devuelve = []
# Ejecuta el findvideos en cada servidor
server_list = get_servers_list()
for serverid in server_list:
try:
# Sustituye el código por otro "Plex compatible"
# exec "from servers import "+serverid
# exec "devuelve.extend("+serverid+".find_videos(data))"
servers_module = __import__("servers." + serverid)
server_module = getattr(servers_module, serverid)
result = server_module.find_videos(data)
if result and skip: return result
devuelve.extend(result)
except ImportError:
logger.info("No existe conector para #" + serverid + "#")
# import traceback
# logger.info(traceback.format_exc())
except:
logger.info("Error en el conector #" + serverid + "#")
import traceback
logger.info(traceback.format_exc())
return devuelve
def get_video_urls(server, url):
'''
servers_module = __import__("servers."+server)
server_module = getattr(servers_module,server)
return server_module.get_video_url( page_url=url)
'''
video_urls, puede, motivo = resolve_video_urls_for_playing(server, url)
return video_urls
def get_channel_module(channel_name):
if not "." in channel_name:
channel_module = __import__('channels.%s' % channel_name, None, None, ["channels.%s" % channel_name])
else:
channel_module = __import__(channel_name, None, None, [channel_name])
return channel_module
def get_server_from_url(url):
encontrado = findvideos(url, True)
if len(encontrado) > 0:
devuelve = encontrado[0][2]
else:
devuelve = "directo"
return devuelve
def resolve_video_urls_for_playing(server, url, video_password="", muestra_dialogo=False):
logger.info("streamondemand-pureita-master.core.servertools resolve_video_urls_for_playing, server=" + server + ", url=" + url)
video_urls = []
torrent = False
server = server.lower()
# Si el vídeo es "directo", no hay que buscar más
if server == "directo" or server == "local":
logger.info("streamondemand-pureita-master.core.servertools server=directo, la url es la buena")
try:
import urlparse
parsed_url = urlparse.urlparse(url)
logger.info("parsed_url=" + str(parsed_url))
extension = parsed_url.path[-4:]
except:
extension = url[-4:]
video_urls = [["%s [%s]" % (extension, server), url]]
return video_urls, True, ""
# Averigua las URL de los vídeos
else:
# Carga el conector
try:
# Muestra un diágo de progreso
if muestra_dialogo:
from platformcode import platformtools
progreso = platformtools.dialog_progress("StreamOnDemand PureITA", "Connessione con " + server)
server_parameters = get_server_parameters(server)
# Cuenta las opciones disponibles, para calcular el porcentaje
opciones = []
if server_parameters["free"] == "true":
opciones.append("free")
opciones.extend([premium for premium in server_parameters["premium"] if
config.get_setting(premium + "premium") == "true"])
logger.info("streamondemand-pureita-master.core.servertools opciones disponibles para " + server + ": " + str(
len(opciones)) + " " + str(opciones))
# Sustituye el código por otro "Plex compatible"
# exec "from servers import "+server+" as server_connector"
servers_module = __import__("servers." + server)
server_connector = getattr(servers_module, server)
logger.info("streamondemand-pureita-master.core.servertools servidor de " + server + " importado")
# Si tiene una función para ver si el vídeo existe, lo comprueba ahora
if hasattr(server_connector, 'test_video_exists'):
logger.info("streamondemand-pureita-master.core.servertools invocando a " + server + ".test_video_exists")
puedes, motivo = server_connector.test_video_exists(page_url=url)
# Si la funcion dice que no existe, fin
if not puedes:
logger.info("streamondemand-pureita-master.core.servertools test_video_exists dice que el video no existe")
if muestra_dialogo: progreso.close()
return video_urls, puedes, motivo
else:
logger.info("streamondemand-pureita-master.core.servertools test_video_exists dice que el video SI existe")
# Obtiene enlaces free
if server_parameters["free"] == "true":
if muestra_dialogo:
progreso.update((100 / len(opciones)) * opciones.index("free"), "Connessione con " + server)
logger.info("streamondemand-pureita-master.core.servertools invocando a " + server + ".get_video_url")
video_urls = server_connector.get_video_url(page_url=url, video_password=video_password)
# Si no se encuentran vídeos en modo free, es porque el vídeo no existe
if len(video_urls) == 0:
if muestra_dialogo: progreso.close()
return video_urls, False, "Non trovo il video su " + server
# Obtiene enlaces para las diferentes opciones premium
error_message = []
for premium in server_parameters["premium"]:
if config.get_setting(premium + "premium") == "true":
if muestra_dialogo:
progreso.update((100 / len(opciones)) * opciones.index(premium), "Connessione con " + premium)
exec "from servers import " + premium + " as premium_conector"
if premium == "realdebrid":
debrid_urls = premium_conector.get_video_url(page_url=url, premium=True,
video_password=video_password)
if not "REAL-DEBRID:" in debrid_urls[0][0]:
video_urls.extend(debrid_urls)
else:
error_message.append(debrid_urls[0][0])
elif premium == "alldebrid":
alldebrid_urls = premium_conector.get_video_url(page_url=url, premium=True,
user=config.get_setting(premium + "user"),
password=config.get_setting(
premium + "password"),
video_password=video_password)
if not "Alldebrid:" in alldebrid_urls[0][0]:
video_urls.extend(alldebrid_urls)
else:
error_message.append(alldebrid_urls[0][0])
else:
video_urls.extend(premium_conector.get_video_url(page_url=url, premium=True,
user=config.get_setting(premium + "user"),
password=config.get_setting(
premium + "password"),
video_password=video_password))
if not video_urls and error_message:
return video_urls, False, " || ".join(error_message)
if muestra_dialogo:
progreso.update(100, "Processo terminato")
# Cierra el diálogo de progreso
if muestra_dialogo: progreso.close()
# Llegas hasta aquí y no tienes ningún enlace para ver, así que no vas a poder ver el vídeo
if len(video_urls) == 0:
# ¿Cual es el motivo?
# 1) No existe -> Ya está controlado
# 2) No tienes alguna de las cuentas premium compatibles
# Lista de las cuentas que soportan este servidor
listapremium = []
for premium in server_parameters["premium"]:
listapremium.append(get_server_parameters(premium)["name"])
return video_urls, False, "Per il video su " + server + " è necessario<br/>un account " + " o ".join(
listapremium)
except:
if muestra_dialogo: progreso.close()
import traceback
logger.info(traceback.format_exc())
return video_urls, False, "Si è verificato un errore<br/>con il connettore " + server
return video_urls, True, ""
def is_server_enabled(server):
try:
server_parameters = get_server_parameters(server)
if server_parameters["active"] == "true":
if not config.get_setting("hidepremium") == "true":
return True
else:
if server_parameters["free"] == "true":
return True
if [premium for premium in server_parameters["premium"] if
config.get_setting(premium + "premium") == "true"]:
return True
else:
return False
else:
return False
except:
import traceback
logger.info(traceback.format_exc())
return False
def get_server_parameters(server):
server = scrapertools.find_single_match(server, '([^\.]+)')
try:
JSONFile = xml2dict(os.path.join(config.get_runtime_path(), "servers", server + ".xml"))["server"]
if type(JSONFile["premium"]) == dict: JSONFile["premium"] = JSONFile["premium"]["value"]
if JSONFile["premium"] == "": JSONFile["premium"] = []
if type(JSONFile["premium"]) == str and not JSONFile["premium"] == "": JSONFile["premium"] = [
JSONFile["premium"]]
return JSONFile
except:
logger.info("Error al cargar el servidor: " + server)
import traceback
logger.info(traceback.format_exc())
return {}
def get_servers_list():
logger.info("streamondemand-pureita-master.core.servertools get_servers_list")
ServersPath = os.path.join(config.get_runtime_path(), "servers")
ServerList = {}
for server in os.listdir(ServersPath):
if server.endswith(".xml"):
if is_server_enabled(server):
server_parameters = get_server_parameters(server)
ServerList[server_parameters["id"]] = server_parameters
return ServerList
def xml2dict(file=None, xmldata=None):
import re, sys, os
parse = globals().get(sys._getframe().f_code.co_name)
if xmldata is None and file is None: raise Exception("Non è possibile convertirlo!")
if xmldata is None:
if not os.path.exists(file): raise Exception("Il file non esiste!")
with open(file, "rb") as f:
xmldata = f.read()
matches = re.compile("<(?P<tag>[^>]+)>[\n]*[\s]*[\t]*(?P<value>.*?)[\n]*[\s]*[\t]*<\/(?P=tag)\s*>",
re.DOTALL).findall(xmldata)
return_dict = {}
for tag, value in matches:
# Si tiene elementos
if "<" and "</" in value:
if tag in return_dict:
if type(return_dict[tag]) == list:
return_dict[tag].append(parse(xmldata=value))
else:
return_dict[tag] = [dct[tags[x]]]
return_dict[tag].append(parse(xmldata=value))
else:
return_dict[tag] = parse(xmldata=value)
else:
if tag in return_dict:
if type(return_dict[tag]) == list:
return_dict[tag].append(value)
else:
return_dict[tag] = [return_dict[tag]]
return_dict[tag].append(value)
else:
return_dict[tag] = value
return return_dict
def get_server_remote_url(server_name):
server_parameters = get_server_parameters(server_name)
remote_server_url = server_parameters["update_url"] + server_name + ".py"
remote_version_url = server_parameters["update_url"] + server_name + ".xml"
logger.info("streamondemand-pureita-master.core.servertools remote_server_url=" + remote_server_url)
logger.info("streamondemand-pureita-master.core.servertools remote_version_url=" + remote_version_url)
return remote_server_url, remote_version_url
def get_server_local_path(server_name):
local_server_path = os.path.join(config.get_runtime_path(), 'servers', server_name + ".py")
local_version_path = os.path.join(config.get_runtime_path(), 'servers', server_name + ".xml")
local_compiled_path = os.path.join(config.get_runtime_path(), 'servers', server_name + ".pyo")
logger.info("streamondemand-pureita-master.core.servertools local_servers_path=" + local_server_path)
logger.info("streamondemand-pureita-master.core.servertools local_version_path=" + local_version_path)
logger.info("streamondemand-pureita-master.core.servertools local_compiled_path=" + local_compiled_path)
return local_server_path, local_version_path, local_compiled_path
|
gpl-3.0
|
skg-net/ansible
|
test/units/module_utils/facts/test_facts.py
|
38
|
22766
|
# This file is part of Ansible
# -*- coding: utf-8 -*-
#
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Make coding more python3-ish
from __future__ import (absolute_import, division)
__metaclass__ = type
import os
import pytest
# for testing
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import Mock, patch
from ansible.module_utils import facts
from ansible.module_utils.facts import hardware
from ansible.module_utils.facts import network
from ansible.module_utils.facts import virtual
class BaseTestFactsPlatform(unittest.TestCase):
platform_id = 'Generic'
fact_class = hardware.base.Hardware
collector_class = None
"""Verify that the automagic in Hardware.__new__ selects the right subclass."""
@patch('platform.system')
def test_new(self, mock_platform):
if not self.fact_class:
pytest.skip('This platform (%s) does not have a fact_class.' % self.platform_id)
mock_platform.return_value = self.platform_id
inst = self.fact_class(module=Mock(), load_on_init=False)
self.assertIsInstance(inst, self.fact_class)
self.assertEqual(inst.platform, self.platform_id)
def test_subclass(self):
if not self.fact_class:
pytest.skip('This platform (%s) does not have a fact_class.' % self.platform_id)
# 'Generic' will try to map to platform.system() that we are not mocking here
if self.platform_id == 'Generic':
return
inst = self.fact_class(module=Mock(), load_on_init=False)
self.assertIsInstance(inst, self.fact_class)
self.assertEqual(inst.platform, self.platform_id)
def test_collector(self):
if not self.collector_class:
pytest.skip('This test class needs to be updated to specify collector_class')
inst = self.collector_class()
self.assertIsInstance(inst, self.collector_class)
self.assertEqual(inst._platform, self.platform_id)
class TestLinuxFactsPlatform(BaseTestFactsPlatform):
platform_id = 'Linux'
fact_class = hardware.linux.LinuxHardware
collector_class = hardware.linux.LinuxHardwareCollector
class TestHurdFactsPlatform(BaseTestFactsPlatform):
platform_id = 'GNU'
fact_class = hardware.hurd.HurdHardware
collector_class = hardware.hurd.HurdHardwareCollector
class TestSunOSHardware(BaseTestFactsPlatform):
platform_id = 'SunOS'
fact_class = hardware.sunos.SunOSHardware
collector_class = hardware.sunos.SunOSHardwareCollector
class TestOpenBSDHardware(BaseTestFactsPlatform):
platform_id = 'OpenBSD'
fact_class = hardware.openbsd.OpenBSDHardware
collector_class = hardware.openbsd.OpenBSDHardwareCollector
class TestFreeBSDHardware(BaseTestFactsPlatform):
platform_id = 'FreeBSD'
fact_class = hardware.freebsd.FreeBSDHardware
collector_class = hardware.freebsd.FreeBSDHardwareCollector
class TestDragonFlyHardware(BaseTestFactsPlatform):
platform_id = 'DragonFly'
fact_class = None
collector_class = hardware.dragonfly.DragonFlyHardwareCollector
class TestNetBSDHardware(BaseTestFactsPlatform):
platform_id = 'NetBSD'
fact_class = hardware.netbsd.NetBSDHardware
collector_class = hardware.netbsd.NetBSDHardwareCollector
class TestAIXHardware(BaseTestFactsPlatform):
platform_id = 'AIX'
fact_class = hardware.aix.AIXHardware
collector_class = hardware.aix.AIXHardwareCollector
class TestHPUXHardware(BaseTestFactsPlatform):
platform_id = 'HP-UX'
fact_class = hardware.hpux.HPUXHardware
collector_class = hardware.hpux.HPUXHardwareCollector
class TestDarwinHardware(BaseTestFactsPlatform):
platform_id = 'Darwin'
fact_class = hardware.darwin.DarwinHardware
collector_class = hardware.darwin.DarwinHardwareCollector
class TestGenericNetwork(BaseTestFactsPlatform):
platform_id = 'Generic'
fact_class = network.base.Network
class TestHurdPfinetNetwork(BaseTestFactsPlatform):
platform_id = 'GNU'
fact_class = network.hurd.HurdPfinetNetwork
collector_class = network.hurd.HurdNetworkCollector
class TestLinuxNetwork(BaseTestFactsPlatform):
platform_id = 'Linux'
fact_class = network.linux.LinuxNetwork
collector_class = network.linux.LinuxNetworkCollector
class TestGenericBsdIfconfigNetwork(BaseTestFactsPlatform):
platform_id = 'Generic_BSD_Ifconfig'
fact_class = network.generic_bsd.GenericBsdIfconfigNetwork
collector_class = None
class TestHPUXNetwork(BaseTestFactsPlatform):
platform_id = 'HP-UX'
fact_class = network.hpux.HPUXNetwork
collector_class = network.hpux.HPUXNetworkCollector
class TestDarwinNetwork(BaseTestFactsPlatform):
platform_id = 'Darwin'
fact_class = network.darwin.DarwinNetwork
collector_class = network.darwin.DarwinNetworkCollector
class TestFreeBSDNetwork(BaseTestFactsPlatform):
platform_id = 'FreeBSD'
fact_class = network.freebsd.FreeBSDNetwork
collector_class = network.freebsd.FreeBSDNetworkCollector
class TestDragonFlyNetwork(BaseTestFactsPlatform):
platform_id = 'DragonFly'
fact_class = network.dragonfly.DragonFlyNetwork
collector_class = network.dragonfly.DragonFlyNetworkCollector
class TestAIXNetwork(BaseTestFactsPlatform):
platform_id = 'AIX'
fact_class = network.aix.AIXNetwork
collector_class = network.aix.AIXNetworkCollector
class TestNetBSDNetwork(BaseTestFactsPlatform):
platform_id = 'NetBSD'
fact_class = network.netbsd.NetBSDNetwork
collector_class = network.netbsd.NetBSDNetworkCollector
class TestOpenBSDNetwork(BaseTestFactsPlatform):
platform_id = 'OpenBSD'
fact_class = network.openbsd.OpenBSDNetwork
collector_class = network.openbsd.OpenBSDNetworkCollector
class TestSunOSNetwork(BaseTestFactsPlatform):
platform_id = 'SunOS'
fact_class = network.sunos.SunOSNetwork
collector_class = network.sunos.SunOSNetworkCollector
class TestLinuxVirtual(BaseTestFactsPlatform):
platform_id = 'Linux'
fact_class = virtual.linux.LinuxVirtual
collector_class = virtual.linux.LinuxVirtualCollector
class TestFreeBSDVirtual(BaseTestFactsPlatform):
platform_id = 'FreeBSD'
fact_class = virtual.freebsd.FreeBSDVirtual
collector_class = virtual.freebsd.FreeBSDVirtualCollector
class TestNetBSDVirtual(BaseTestFactsPlatform):
platform_id = 'NetBSD'
fact_class = virtual.netbsd.NetBSDVirtual
collector_class = virtual.netbsd.NetBSDVirtualCollector
class TestOpenBSDVirtual(BaseTestFactsPlatform):
platform_id = 'OpenBSD'
fact_class = virtual.openbsd.OpenBSDVirtual
collector_class = virtual.openbsd.OpenBSDVirtualCollector
class TestHPUXVirtual(BaseTestFactsPlatform):
platform_id = 'HP-UX'
fact_class = virtual.hpux.HPUXVirtual
collector_class = virtual.hpux.HPUXVirtualCollector
class TestSunOSVirtual(BaseTestFactsPlatform):
platform_id = 'SunOS'
fact_class = virtual.sunos.SunOSVirtual
collector_class = virtual.sunos.SunOSVirtualCollector
LSBLK_OUTPUT = b"""
/dev/sda
/dev/sda1 32caaec3-ef40-4691-a3b6-438c3f9bc1c0
/dev/sda2 66Ojcd-ULtu-1cZa-Tywo-mx0d-RF4O-ysA9jK
/dev/mapper/fedora_dhcp129--186-swap eae6059d-2fbe-4d1c-920d-a80bbeb1ac6d
/dev/mapper/fedora_dhcp129--186-root d34cf5e3-3449-4a6c-8179-a1feb2bca6ce
/dev/mapper/fedora_dhcp129--186-home 2d3e4853-fa69-4ccf-8a6a-77b05ab0a42d
/dev/sr0
/dev/loop0 0f031512-ab15-497d-9abd-3a512b4a9390
/dev/loop1 7c1b0f30-cf34-459f-9a70-2612f82b870a
/dev/loop9 0f031512-ab15-497d-9abd-3a512b4a9390
/dev/loop9 7c1b4444-cf34-459f-9a70-2612f82b870a
/dev/mapper/docker-253:1-1050967-pool
/dev/loop2
/dev/mapper/docker-253:1-1050967-pool
"""
LSBLK_OUTPUT_2 = b"""
/dev/sda
/dev/sda1 32caaec3-ef40-4691-a3b6-438c3f9bc1c0
/dev/sda2 66Ojcd-ULtu-1cZa-Tywo-mx0d-RF4O-ysA9jK
/dev/mapper/fedora_dhcp129--186-swap eae6059d-2fbe-4d1c-920d-a80bbeb1ac6d
/dev/mapper/fedora_dhcp129--186-root d34cf5e3-3449-4a6c-8179-a1feb2bca6ce
/dev/mapper/fedora_dhcp129--186-home 2d3e4853-fa69-4ccf-8a6a-77b05ab0a42d
/dev/mapper/an-example-mapper with a space in the name 84639acb-013f-4d2f-9392-526a572b4373
/dev/sr0
/dev/loop0 0f031512-ab15-497d-9abd-3a512b4a9390
"""
LSBLK_UUIDS = {'/dev/sda1': '66Ojcd-ULtu-1cZa-Tywo-mx0d-RF4O-ysA9jK'}
UDEVADM_UUID = 'N/A'
MTAB = """
sysfs /sys sysfs rw,seclabel,nosuid,nodev,noexec,relatime 0 0
proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0
devtmpfs /dev devtmpfs rw,seclabel,nosuid,size=8044400k,nr_inodes=2011100,mode=755 0 0
securityfs /sys/kernel/security securityfs rw,nosuid,nodev,noexec,relatime 0 0
tmpfs /dev/shm tmpfs rw,seclabel,nosuid,nodev 0 0
devpts /dev/pts devpts rw,seclabel,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0
tmpfs /run tmpfs rw,seclabel,nosuid,nodev,mode=755 0 0
tmpfs /sys/fs/cgroup tmpfs ro,seclabel,nosuid,nodev,noexec,mode=755 0 0
cgroup /sys/fs/cgroup/systemd cgroup rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd 0 0
pstore /sys/fs/pstore pstore rw,seclabel,nosuid,nodev,noexec,relatime 0 0
cgroup /sys/fs/cgroup/devices cgroup rw,nosuid,nodev,noexec,relatime,devices 0 0
cgroup /sys/fs/cgroup/freezer cgroup rw,nosuid,nodev,noexec,relatime,freezer 0 0
cgroup /sys/fs/cgroup/memory cgroup rw,nosuid,nodev,noexec,relatime,memory 0 0
cgroup /sys/fs/cgroup/pids cgroup rw,nosuid,nodev,noexec,relatime,pids 0 0
cgroup /sys/fs/cgroup/blkio cgroup rw,nosuid,nodev,noexec,relatime,blkio 0 0
cgroup /sys/fs/cgroup/cpuset cgroup rw,nosuid,nodev,noexec,relatime,cpuset 0 0
cgroup /sys/fs/cgroup/cpu,cpuacct cgroup rw,nosuid,nodev,noexec,relatime,cpu,cpuacct 0 0
cgroup /sys/fs/cgroup/hugetlb cgroup rw,nosuid,nodev,noexec,relatime,hugetlb 0 0
cgroup /sys/fs/cgroup/perf_event cgroup rw,nosuid,nodev,noexec,relatime,perf_event 0 0
cgroup /sys/fs/cgroup/net_cls,net_prio cgroup rw,nosuid,nodev,noexec,relatime,net_cls,net_prio 0 0
configfs /sys/kernel/config configfs rw,relatime 0 0
/dev/mapper/fedora_dhcp129--186-root / ext4 rw,seclabel,relatime,data=ordered 0 0
selinuxfs /sys/fs/selinux selinuxfs rw,relatime 0 0
systemd-1 /proc/sys/fs/binfmt_misc autofs rw,relatime,fd=24,pgrp=1,timeout=0,minproto=5,maxproto=5,direct 0 0
debugfs /sys/kernel/debug debugfs rw,seclabel,relatime 0 0
hugetlbfs /dev/hugepages hugetlbfs rw,seclabel,relatime 0 0
tmpfs /tmp tmpfs rw,seclabel 0 0
mqueue /dev/mqueue mqueue rw,seclabel,relatime 0 0
/dev/loop0 /var/lib/machines btrfs rw,seclabel,relatime,space_cache,subvolid=5,subvol=/ 0 0
/dev/sda1 /boot ext4 rw,seclabel,relatime,data=ordered 0 0
/dev/mapper/fedora_dhcp129--186-home /home ext4 rw,seclabel,relatime,data=ordered 0 0
tmpfs /run/user/1000 tmpfs rw,seclabel,nosuid,nodev,relatime,size=1611044k,mode=700,uid=1000,gid=1000 0 0
gvfsd-fuse /run/user/1000/gvfs fuse.gvfsd-fuse rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0
fusectl /sys/fs/fuse/connections fusectl rw,relatime 0 0
grimlock.g.a: /home/adrian/sshfs-grimlock fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0
grimlock.g.a:test_path/path_with'single_quotes /home/adrian/sshfs-grimlock-single-quote fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0
grimlock.g.a:path_with'single_quotes /home/adrian/sshfs-grimlock-single-quote-2 fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0
grimlock.g.a:/mnt/data/foto's /home/adrian/fotos fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0
"""
MTAB_ENTRIES = [
[
'sysfs',
'/sys',
'sysfs',
'rw,seclabel,nosuid,nodev,noexec,relatime',
'0',
'0'
],
['proc', '/proc', 'proc', 'rw,nosuid,nodev,noexec,relatime', '0', '0'],
[
'devtmpfs',
'/dev',
'devtmpfs',
'rw,seclabel,nosuid,size=8044400k,nr_inodes=2011100,mode=755',
'0',
'0'
],
[
'securityfs',
'/sys/kernel/security',
'securityfs',
'rw,nosuid,nodev,noexec,relatime',
'0',
'0'
],
['tmpfs', '/dev/shm', 'tmpfs', 'rw,seclabel,nosuid,nodev', '0', '0'],
[
'devpts',
'/dev/pts',
'devpts',
'rw,seclabel,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000',
'0',
'0'
],
['tmpfs', '/run', 'tmpfs', 'rw,seclabel,nosuid,nodev,mode=755', '0', '0'],
[
'tmpfs',
'/sys/fs/cgroup',
'tmpfs',
'ro,seclabel,nosuid,nodev,noexec,mode=755',
'0',
'0'
],
[
'cgroup',
'/sys/fs/cgroup/systemd',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd',
'0',
'0'
],
[
'pstore',
'/sys/fs/pstore',
'pstore',
'rw,seclabel,nosuid,nodev,noexec,relatime',
'0',
'0'
],
[
'cgroup',
'/sys/fs/cgroup/devices',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,devices',
'0',
'0'
],
[
'cgroup',
'/sys/fs/cgroup/freezer',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,freezer',
'0',
'0'
],
[
'cgroup',
'/sys/fs/cgroup/memory',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,memory',
'0',
'0'
],
[
'cgroup',
'/sys/fs/cgroup/pids',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,pids',
'0',
'0'
],
[
'cgroup',
'/sys/fs/cgroup/blkio',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,blkio',
'0',
'0'
],
[
'cgroup',
'/sys/fs/cgroup/cpuset',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,cpuset',
'0',
'0'
],
[
'cgroup',
'/sys/fs/cgroup/cpu,cpuacct',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,cpu,cpuacct',
'0',
'0'
],
[
'cgroup',
'/sys/fs/cgroup/hugetlb',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,hugetlb',
'0',
'0'
],
[
'cgroup',
'/sys/fs/cgroup/perf_event',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,perf_event',
'0',
'0'
],
[
'cgroup',
'/sys/fs/cgroup/net_cls,net_prio',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,net_cls,net_prio',
'0',
'0'
],
['configfs', '/sys/kernel/config', 'configfs', 'rw,relatime', '0', '0'],
[
'/dev/mapper/fedora_dhcp129--186-root',
'/',
'ext4',
'rw,seclabel,relatime,data=ordered',
'0',
'0'
],
['selinuxfs', '/sys/fs/selinux', 'selinuxfs', 'rw,relatime', '0', '0'],
[
'systemd-1',
'/proc/sys/fs/binfmt_misc',
'autofs',
'rw,relatime,fd=24,pgrp=1,timeout=0,minproto=5,maxproto=5,direct',
'0',
'0'
],
['debugfs', '/sys/kernel/debug', 'debugfs', 'rw,seclabel,relatime', '0', '0'],
[
'hugetlbfs',
'/dev/hugepages',
'hugetlbfs',
'rw,seclabel,relatime',
'0',
'0'
],
['tmpfs', '/tmp', 'tmpfs', 'rw,seclabel', '0', '0'],
['mqueue', '/dev/mqueue', 'mqueue', 'rw,seclabel,relatime', '0', '0'],
[
'/dev/loop0',
'/var/lib/machines',
'btrfs',
'rw,seclabel,relatime,space_cache,subvolid=5,subvol=/',
'0',
'0'
],
['/dev/sda1', '/boot', 'ext4', 'rw,seclabel,relatime,data=ordered', '0', '0'],
# A 'none' fstype
['/dev/sdz3', '/not/a/real/device', 'none', 'rw,seclabel,relatime,data=ordered', '0', '0'],
# lets assume this is a bindmount
['/dev/sdz4', '/not/a/real/bind_mount', 'ext4', 'rw,seclabel,relatime,data=ordered', '0', '0'],
[
'/dev/mapper/fedora_dhcp129--186-home',
'/home',
'ext4',
'rw,seclabel,relatime,data=ordered',
'0',
'0'
],
[
'tmpfs',
'/run/user/1000',
'tmpfs',
'rw,seclabel,nosuid,nodev,relatime,size=1611044k,mode=700,uid=1000,gid=1000',
'0',
'0'
],
[
'gvfsd-fuse',
'/run/user/1000/gvfs',
'fuse.gvfsd-fuse',
'rw,nosuid,nodev,relatime,user_id=1000,group_id=1000',
'0',
'0'
],
['fusectl', '/sys/fs/fuse/connections', 'fusectl', 'rw,relatime', '0', '0']]
BIND_MOUNTS = ['/not/a/real/bind_mount']
with open(os.path.join(os.path.dirname(__file__), 'fixtures/findmount_output.txt')) as f:
FINDMNT_OUTPUT = f.read()
class TestFactsLinuxHardwareGetMountFacts(unittest.TestCase):
# FIXME: mock.patch instead
def setUp(self):
# The @timeout tracebacks if there isn't a GATHER_TIMEOUT is None (the default until get_all_facts sets it via global)
facts.GATHER_TIMEOUT = 10
def tearDown(self):
facts.GATHER_TIMEOUT = None
# The Hardware subclasses freakout if instaniated directly, so
# mock platform.system and inst Hardware() so we get a LinuxHardware()
# we can test.
@patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._mtab_entries', return_value=MTAB_ENTRIES)
@patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._find_bind_mounts', return_value=BIND_MOUNTS)
@patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._lsblk_uuid', return_value=LSBLK_UUIDS)
@patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._udevadm_uuid', return_value=UDEVADM_UUID)
def test_get_mount_facts(self,
mock_lsblk_uuid,
mock_find_bind_mounts,
mock_mtab_entries,
mock_udevadm_uuid):
module = Mock()
# Returns a LinuxHardware-ish
lh = hardware.linux.LinuxHardware(module=module, load_on_init=False)
# Nothing returned, just self.facts modified as a side effect
mount_facts = lh.get_mount_facts()
self.assertIsInstance(mount_facts, dict)
self.assertIn('mounts', mount_facts)
self.assertIsInstance(mount_facts['mounts'], list)
self.assertIsInstance(mount_facts['mounts'][0], dict)
@patch('ansible.module_utils.facts.hardware.linux.get_file_content', return_value=MTAB)
def test_get_mtab_entries(self, mock_get_file_content):
module = Mock()
lh = hardware.linux.LinuxHardware(module=module, load_on_init=False)
mtab_entries = lh._mtab_entries()
self.assertIsInstance(mtab_entries, list)
self.assertIsInstance(mtab_entries[0], list)
self.assertEqual(len(mtab_entries), 38)
@patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._run_findmnt', return_value=(0, FINDMNT_OUTPUT, ''))
def test_find_bind_mounts(self, mock_run_findmnt):
module = Mock()
lh = hardware.linux.LinuxHardware(module=module, load_on_init=False)
bind_mounts = lh._find_bind_mounts()
# If bind_mounts becomes another seq type, feel free to change
self.assertIsInstance(bind_mounts, set)
self.assertEqual(len(bind_mounts), 1)
self.assertIn('/not/a/real/bind_mount', bind_mounts)
@patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._run_findmnt', return_value=(37, '', ''))
def test_find_bind_mounts_non_zero(self, mock_run_findmnt):
module = Mock()
lh = hardware.linux.LinuxHardware(module=module, load_on_init=False)
bind_mounts = lh._find_bind_mounts()
self.assertIsInstance(bind_mounts, set)
self.assertEqual(len(bind_mounts), 0)
def test_find_bind_mounts_no_findmnts(self):
module = Mock()
module.get_bin_path = Mock(return_value=None)
lh = hardware.linux.LinuxHardware(module=module, load_on_init=False)
bind_mounts = lh._find_bind_mounts()
self.assertIsInstance(bind_mounts, set)
self.assertEqual(len(bind_mounts), 0)
@patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._run_lsblk', return_value=(0, LSBLK_OUTPUT, ''))
def test_lsblk_uuid(self, mock_run_lsblk):
module = Mock()
lh = hardware.linux.LinuxHardware(module=module, load_on_init=False)
lsblk_uuids = lh._lsblk_uuid()
self.assertIsInstance(lsblk_uuids, dict)
self.assertIn(b'/dev/loop9', lsblk_uuids)
self.assertIn(b'/dev/sda1', lsblk_uuids)
self.assertEqual(lsblk_uuids[b'/dev/sda1'], b'32caaec3-ef40-4691-a3b6-438c3f9bc1c0')
@patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._run_lsblk', return_value=(37, LSBLK_OUTPUT, ''))
def test_lsblk_uuid_non_zero(self, mock_run_lsblk):
module = Mock()
lh = hardware.linux.LinuxHardware(module=module, load_on_init=False)
lsblk_uuids = lh._lsblk_uuid()
self.assertIsInstance(lsblk_uuids, dict)
self.assertEqual(len(lsblk_uuids), 0)
def test_lsblk_uuid_no_lsblk(self):
module = Mock()
module.get_bin_path = Mock(return_value=None)
lh = hardware.linux.LinuxHardware(module=module, load_on_init=False)
lsblk_uuids = lh._lsblk_uuid()
self.assertIsInstance(lsblk_uuids, dict)
self.assertEqual(len(lsblk_uuids), 0)
@patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._run_lsblk', return_value=(0, LSBLK_OUTPUT_2, ''))
def test_lsblk_uuid_dev_with_space_in_name(self, mock_run_lsblk):
module = Mock()
lh = hardware.linux.LinuxHardware(module=module, load_on_init=False)
lsblk_uuids = lh._lsblk_uuid()
self.assertIsInstance(lsblk_uuids, dict)
self.assertIn(b'/dev/loop0', lsblk_uuids)
self.assertIn(b'/dev/sda1', lsblk_uuids)
self.assertEqual(lsblk_uuids[b'/dev/mapper/an-example-mapper with a space in the name'], b'84639acb-013f-4d2f-9392-526a572b4373')
self.assertEqual(lsblk_uuids[b'/dev/sda1'], b'32caaec3-ef40-4691-a3b6-438c3f9bc1c0')
|
gpl-3.0
|
sdague/home-assistant
|
tests/components/totalconnect/test_config_flow.py
|
6
|
3882
|
"""Tests for the iCloud config flow."""
from homeassistant import data_entry_flow
from homeassistant.components.totalconnect.const import DOMAIN
from homeassistant.config_entries import SOURCE_IMPORT, SOURCE_USER
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from tests.async_mock import patch
from tests.common import MockConfigEntry
USERNAME = "[email protected]"
PASSWORD = "password"
async def test_user(hass):
"""Test user config."""
# no data provided so show the form
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
# now data is provided, so check if login is correct and create the entry
with patch(
"homeassistant.components.totalconnect.config_flow.TotalConnectClient.TotalConnectClient"
) as client_mock:
client_mock.return_value.is_valid_credentials.return_value = True
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
async def test_import(hass):
"""Test import step with good username and password."""
with patch(
"homeassistant.components.totalconnect.config_flow.TotalConnectClient.TotalConnectClient"
) as client_mock:
client_mock.return_value.is_valid_credentials.return_value = True
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
async def test_abort_if_already_setup(hass):
"""Test abort if the account is already setup."""
MockConfigEntry(
domain=DOMAIN,
data={CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
unique_id=USERNAME,
).add_to_hass(hass)
# Should fail, same USERNAME (import)
with patch(
"homeassistant.components.totalconnect.config_flow.TotalConnectClient.TotalConnectClient"
) as client_mock:
client_mock.return_value.is_valid_credentials.return_value = True
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
# Should fail, same USERNAME (flow)
with patch(
"homeassistant.components.totalconnect.config_flow.TotalConnectClient.TotalConnectClient"
) as client_mock:
client_mock.return_value.is_valid_credentials.return_value = True
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_login_failed(hass):
"""Test when we have errors during login."""
with patch(
"homeassistant.components.totalconnect.config_flow.TotalConnectClient.TotalConnectClient"
) as client_mock:
client_mock.return_value.is_valid_credentials.return_value = False
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "invalid_auth"}
|
apache-2.0
|
dehivix/compilerURG
|
unergParse.py
|
1
|
8812
|
from ply import *
import unergLex
tokens = unergLex.tokens
precedence = (
('left', 'PLUS','MINUS'),
('left', 'TIMES','DIVIDE'),
('left', 'POWER'),
('right','UMINUS')
)
#### A BASIC program is a series of statements. We represent the program as a
#### dictionary of tuples indexed by line number.
def p_program(p):
'''program : program statement
| statement'''
if len(p) == 2 and p[1]:
p[0] = { }
line,stat = p[1]
p[0][line] = stat
elif len(p) ==3:
p[0] = p[1]
if not p[0]: p[0] = { }
if p[2]:
line,stat = p[2]
p[0][line] = stat
#### This catch-all rule is used for any catastrophic errors. In this case,
#### we simply return nothing
def p_program_error(p):
'''program : error'''
p[0] = None
p.parser.error = 1
#### Format of all BASIC statements.
def p_statement(p):
'''statement : INTEGER command NEWLINE'''
if isinstance(p[2],str):
print("%s %s %s" % (p[2],"AT LINE", p[1]))
p[0] = None
p.parser.error = 1
else:
lineno = int(p[1])
p[0] = (lineno,p[2])
#### Interactive statements.
def p_statement_interactive(p):
'''statement : RUN NEWLINE
| LIST NEWLINE
| NEW NEWLINE'''
p[0] = (0, (p[1],0))
#### Blank line number
def p_statement_blank(p):
'''statement : INTEGER NEWLINE'''
p[0] = (0,('BLANK',int(p[1])))
#### Error handling for malformed statements
def p_statement_bad(p):
'''statement : INTEGER error NEWLINE'''
print("MALFORMED STATEMENT AT LINE %s" % p[1])
p[0] = None
p.parser.error = 1
#### Blank line
def p_statement_newline(p):
'''statement : NEWLINE'''
p[0] = None
#### LET statement
def p_command_let(p):
'''command : LET variable EQUALS expr'''
p[0] = ('LET',p[2],p[4])
def p_command_let_bad(p):
'''command : LET variable EQUALS error'''
p[0] = "BAD EXPRESSION IN LET"
#### READ statement
def p_command_read(p):
'''command : READ varlist'''
p[0] = ('READ',p[2])
def p_command_read_bad(p):
'''command : READ error'''
p[0] = "MALFORMED VARIABLE LIST IN READ"
#### DATA statement
def p_command_data(p):
'''command : DATA numlist'''
p[0] = ('DATA',p[2])
def p_command_data_bad(p):
'''command : DATA error'''
p[0] = "MALFORMED NUMBER LIST IN DATA"
#### PRINT statement
def p_command_print(p):
'''command : PRINT plist optend'''
p[0] = ('PRINT',p[2],p[3])
def p_command_print_bad(p):
'''command : PRINT error'''
p[0] = "MALFORMED PRINT STATEMENT"
#### Optional ending on PRINT. Either a comma (,) or semicolon (;)
def p_optend(p):
'''optend : COMMA
| SEMI
|'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = None
#### PRINT statement with no arguments
def p_command_print_empty(p):
'''command : PRINT'''
p[0] = ('PRINT',[],None)
#### GOTO statement
def p_command_goto(p):
'''command : GOTO INTEGER'''
p[0] = ('GOTO',int(p[2]))
def p_command_goto_bad(p):
'''command : GOTO error'''
p[0] = "INVALID LINE NUMBER IN GOTO"
#### IF-THEN statement
def p_command_if(p):
'''command : IF relexpr THEN INTEGER'''
p[0] = ('IF',p[2],int(p[4]))
def p_command_if_bad(p):
'''command : IF error THEN INTEGER'''
p[0] = "BAD RELATIONAL EXPRESSION"
def p_command_if_bad2(p):
'''command : IF relexpr THEN error'''
p[0] = "INVALID LINE NUMBER IN THEN"
#### FOR statement
def p_command_for(p):
'''command : FOR ID EQUALS expr TO expr optstep'''
p[0] = ('FOR',p[2],p[4],p[6],p[7])
def p_command_for_bad_initial(p):
'''command : FOR ID EQUALS error TO expr optstep'''
p[0] = "BAD INITIAL VALUE IN FOR STATEMENT"
def p_command_for_bad_final(p):
'''command : FOR ID EQUALS expr TO error optstep'''
p[0] = "BAD FINAL VALUE IN FOR STATEMENT"
def p_command_for_bad_step(p):
'''command : FOR ID EQUALS expr TO expr STEP error'''
p[0] = "MALFORMED STEP IN FOR STATEMENT"
#### Optional STEP qualifier on FOR statement
def p_optstep(p):
'''optstep : STEP expr
| empty'''
if len(p) == 3:
p[0] = p[2]
else:
p[0] = None
#### NEXT statement
def p_command_next(p):
'''command : NEXT ID'''
p[0] = ('NEXT',p[2])
def p_command_next_bad(p):
'''command : NEXT error'''
p[0] = "MALFORMED NEXT"
#### END statement
def p_command_end(p):
'''command : END'''
p[0] = ('END',)
#### REM statement
def p_command_rem(p):
'''command : REM'''
p[0] = ('REM',p[1])
#### STOP statement
def p_command_stop(p):
'''command : STOP'''
p[0] = ('STOP',)
#### DEF statement
def p_command_def(p):
'''command : DEF ID LPAREN ID RPAREN EQUALS expr'''
p[0] = ('FUNC',p[2],p[4],p[7])
def p_command_def_bad_rhs(p):
'''command : DEF ID LPAREN ID RPAREN EQUALS error'''
p[0] = "BAD EXPRESSION IN DEF STATEMENT"
def p_command_def_bad_arg(p):
'''command : DEF ID LPAREN error RPAREN EQUALS expr'''
p[0] = "BAD ARGUMENT IN DEF STATEMENT"
#### GOSUB statement
def p_command_gosub(p):
'''command : GOSUB INTEGER'''
p[0] = ('GOSUB',int(p[2]))
def p_command_gosub_bad(p):
'''command : GOSUB error'''
p[0] = "INVALID LINE NUMBER IN GOSUB"
#### RETURN statement
def p_command_return(p):
'''command : RETURN'''
p[0] = ('RETURN',)
#### DIM statement
def p_command_dim(p):
'''command : DIM dimlist'''
p[0] = ('DIM',p[2])
def p_command_dim_bad(p):
'''command : DIM error'''
p[0] = "MALFORMED VARIABLE LIST IN DIM"
#### List of variables supplied to DIM statement
def p_dimlist(p):
'''dimlist : dimlist COMMA dimitem
| dimitem'''
if len(p) == 4:
p[0] = p[1]
p[0].append(p[3])
else:
p[0] = [p[1]]
#### DIM items
def p_dimitem_single(p):
'''dimitem : ID LPAREN INTEGER RPAREN'''
p[0] = (p[1],eval(p[3]),0)
def p_dimitem_double(p):
'''dimitem : ID LPAREN INTEGER COMMA INTEGER RPAREN'''
p[0] = (p[1],eval(p[3]),eval(p[5]))
#### Arithmetic expressions
def p_expr_binary(p):
'''expr : expr PLUS expr
| expr MINUS expr
| expr TIMES expr
| expr DIVIDE expr
| expr POWER expr'''
p[0] = ('BINOP',p[2],p[1],p[3])
def p_expr_number(p):
'''expr : INTEGER
| FLOAT'''
p[0] = ('NUM',eval(p[1]))
def p_expr_variable(p):
'''expr : variable'''
p[0] = ('VAR',p[1])
def p_expr_group(p):
'''expr : LPAREN expr RPAREN'''
p[0] = ('GROUP',p[2])
def p_expr_unary(p):
'''expr : MINUS expr %prec UMINUS'''
p[0] = ('UNARY','-',p[2])
#### Relational expressions
def p_relexpr(p):
'''relexpr : expr LT expr
| expr LE expr
| expr GT expr
| expr GE expr
| expr EQUALS expr
| expr NE expr'''
p[0] = ('RELOP',p[2],p[1],p[3])
#### Variables
def p_variable(p):
'''variable : ID
| ID LPAREN expr RPAREN
| ID LPAREN expr COMMA expr RPAREN'''
if len(p) == 2:
p[0] = (p[1],None,None)
elif len(p) == 5:
p[0] = (p[1],p[3],None)
else:
p[0] = (p[1],p[3],p[5])
#### Builds a list of variable targets as a Python list
def p_varlist(p):
'''varlist : varlist COMMA variable
| variable'''
if len(p) > 2:
p[0] = p[1]
p[0].append(p[3])
else:
p[0] = [p[1]]
#### Builds a list of numbers as a Python list
def p_numlist(p):
'''numlist : numlist COMMA number
| number'''
if len(p) > 2:
p[0] = p[1]
p[0].append(p[3])
else:
p[0] = [p[1]]
#### A number. May be an integer or a float
def p_number(p):
'''number : INTEGER
| FLOAT'''
p[0] = eval(p[1])
#### A signed number.
def p_number_signed(p):
'''number : MINUS INTEGER
| MINUS FLOAT'''
p[0] = eval("-"+p[2])
#### List of targets for a print statement
#### Returns a list of tuples (label,expr)
def p_plist(p):
'''plist : plist COMMA pitem
| pitem'''
if len(p) > 3:
p[0] = p[1]
p[0].append(p[3])
else:
p[0] = [p[1]]
def p_item_string(p):
'''pitem : STRING'''
p[0] = (p[1][1:-1],None)
def p_item_string_expr(p):
'''pitem : STRING expr'''
p[0] = (p[1][1:-1],p[2])
def p_item_expr(p):
'''pitem : expr'''
p[0] = ("",p[1])
#### Empty
def p_empty(p):
'''empty : '''
#### Catastrophic error handler
def p_error(p):
if not p:
print("SYNTAX ERROR AT EOF")
bparser = yacc.yacc()
def parse(data,debug=0):
bparser.error = 0
p = bparser.parse(data,debug=debug)
if bparser.error: return None
return p
|
gpl-3.0
|
jeorgen/ngcccbase
|
ngcccbase/p2ptrade/protocol_objects.py
|
4
|
4722
|
import time
from coloredcoinlib import IncompatibleTypesError
from ngcccbase.txcons import RawTxSpec
from utils import make_random_id
from utils import CommonEqualityMixin
class EOffer(CommonEqualityMixin):
"""
A is the offer side's ColorValue
B is the replyer side's ColorValue
"""
def __init__(self, oid, A, B):
self.oid = oid or make_random_id()
self.A = A
self.B = B
self.expires = None
def expired(self):
return self.expired_shift(0)
def expired_shift(self, shift):
return (not self.expires) or (self.expires < (time.time() + shift))
def refresh(self, delta):
self.expires = time.time() + delta
def get_data(self):
return {"oid": self.oid,
"A": self.A,
"B": self.B}
def matches(self, offer):
"""A <=x=> B"""
return self.A == offer.B and offer.A == self.B
def is_same_as_mine(self, my_offer):
return self.A == my_offer.A and self.B == my_offer.B
@classmethod
def from_data(cls, data):
x = cls(data["oid"], data["A"], data["B"])
return x
class MyEOffer(EOffer):
def __init__(self, oid, A, B):
super(MyEOffer, self).__init__(oid, A, B)
self.auto_post = True
class ETxSpec(CommonEqualityMixin):
def __init__(self, inputs, targets, my_utxo_list):
self.inputs = inputs
self.targets = targets
self.my_utxo_list = my_utxo_list
def get_data(self):
return {"inputs": self.inputs,
"targets": self.targets}
@classmethod
def from_data(cls, data):
return cls(data['inputs'], data['targets'], None)
class EProposal(CommonEqualityMixin):
def __init__(self, pid, ewctrl, offer):
self.pid = pid
self.ewctrl = ewctrl
self.offer = offer
def get_data(self):
return {"pid": self.pid, "offer": self.offer.get_data()}
class MyEProposal(EProposal):
def __init__(self, ewctrl, orig_offer, my_offer):
super(MyEProposal, self).__init__(make_random_id(),
ewctrl, orig_offer)
self.my_offer = my_offer
if not orig_offer.matches(my_offer):
raise Exception("Offers are incongruent!")
self.etx_spec = ewctrl.make_etx_spec(self.offer.B, self.offer.A)
self.etx_data = None
def get_data(self):
res = super(MyEProposal, self).get_data()
if self.etx_data:
res["etx_data"] = self.etx_data
else:
res["etx_spec"] = self.etx_spec.get_data()
return res
def process_reply(self, reply_ep):
rtxs = RawTxSpec.from_tx_data(self.ewctrl.model,
reply_ep.etx_data.decode('hex'))
if self.ewctrl.check_tx(rtxs, self.etx_spec):
rtxs.sign(self.etx_spec.my_utxo_list)
self.ewctrl.publish_tx(rtxs, self.my_offer)
self.etx_data = rtxs.get_hex_tx_data()
else:
raise Exception('P2ptrade reply tx check failed!')
class MyReplyEProposal(EProposal):
def __init__(self, ewctrl, foreign_ep, my_offer):
super(MyReplyEProposal, self).__init__(foreign_ep.pid,
ewctrl,
foreign_ep.offer)
self.my_offer = my_offer
self.tx = self.ewctrl.make_reply_tx(foreign_ep.etx_spec,
my_offer.A,
my_offer.B)
def get_data(self):
data = super(MyReplyEProposal, self).get_data()
data['etx_data'] = self.tx.get_hex_tx_data()
return data
def process_reply(self, reply_ep):
# FIXME how is ever valid to call this function???
rtxs = RawTxSpec.from_tx_data(self.ewctrl.model,
reply_ep.etx_data.decode('hex'))
self.ewctrl.publish_tx(rtxs, self.my_offer) # TODO: ???
class ForeignEProposal(EProposal):
def __init__(self, ewctrl, ep_data):
offer = EOffer.from_data(ep_data['offer'])
super(ForeignEProposal, self).__init__(ep_data['pid'], ewctrl, offer)
self.etx_spec = None
if 'etx_spec' in ep_data:
self.etx_spec = ETxSpec.from_data(ep_data['etx_spec'])
self.etx_data = ep_data.get('etx_data', None)
def accept(self, my_offer):
if not self.offer.is_same_as_mine(my_offer):
raise Exception("Incompatible offer!") # pragma: no cover
if not self.etx_spec:
raise Exception("Need etx_spec!") # pragma: no cover
return MyReplyEProposal(self.ewctrl, self, my_offer)
|
mit
|
ajanson/SCIRun
|
src/Externals/libxml2/check-relaxng-test-suite2.py
|
17
|
10537
|
#!/usr/bin/python
import sys
import time
import os
import string
import StringIO
sys.path.insert(0, "python")
import libxml2
# Memory debug specific
libxml2.debugMemory(1)
debug = 0
quiet = 1
#
# the testsuite description
#
CONF="test/relaxng/testsuite.xml"
LOG="check-relaxng-test-suite2.log"
log = open(LOG, "w")
nb_schemas_tests = 0
nb_schemas_success = 0
nb_schemas_failed = 0
nb_instances_tests = 0
nb_instances_success = 0
nb_instances_failed = 0
libxml2.lineNumbersDefault(1)
#
# Resolver callback
#
resources = {}
def resolver(URL, ID, ctxt):
global resources
if resources.has_key(URL):
return(StringIO.StringIO(resources[URL]))
log.write("Resolver failure: asked %s\n" % (URL))
log.write("resources: %s\n" % (resources))
return None
#
# Load the previous results
#
#results = {}
#previous = {}
#
#try:
# res = libxml2.parseFile(RES)
#except:
# log.write("Could not parse %s" % (RES))
#
# handle a valid instance
#
def handle_valid(node, schema):
global log
global nb_instances_success
global nb_instances_failed
instance = node.prop("dtd")
if instance == None:
instance = ""
child = node.children
while child != None:
if child.type != 'text':
instance = instance + child.serialize()
child = child.next
# mem = libxml2.debugMemory(1);
try:
doc = libxml2.parseDoc(instance)
except:
doc = None
if doc == None:
log.write("\nFailed to parse correct instance:\n-----\n")
log.write(instance)
log.write("\n-----\n")
nb_instances_failed = nb_instances_failed + 1
return
if debug:
print "instance line %d" % (node.lineNo())
try:
ctxt = schema.relaxNGNewValidCtxt()
ret = doc.relaxNGValidateDoc(ctxt)
del ctxt
except:
ret = -1
doc.freeDoc()
# if mem != libxml2.debugMemory(1):
# print "validating instance %d line %d leaks" % (
# nb_instances_tests, node.lineNo())
if ret != 0:
log.write("\nFailed to validate correct instance:\n-----\n")
log.write(instance)
log.write("\n-----\n")
nb_instances_failed = nb_instances_failed + 1
else:
nb_instances_success = nb_instances_success + 1
#
# handle an invalid instance
#
def handle_invalid(node, schema):
global log
global nb_instances_success
global nb_instances_failed
instance = node.prop("dtd")
if instance == None:
instance = ""
child = node.children
while child != None:
if child.type != 'text':
instance = instance + child.serialize()
child = child.next
# mem = libxml2.debugMemory(1);
try:
doc = libxml2.parseDoc(instance)
except:
doc = None
if doc == None:
log.write("\nStrange: failed to parse incorrect instance:\n-----\n")
log.write(instance)
log.write("\n-----\n")
return
if debug:
print "instance line %d" % (node.lineNo())
try:
ctxt = schema.relaxNGNewValidCtxt()
ret = doc.relaxNGValidateDoc(ctxt)
del ctxt
except:
ret = -1
doc.freeDoc()
# mem2 = libxml2.debugMemory(1)
# if mem != mem2:
# print "validating instance %d line %d leaks %d bytes" % (
# nb_instances_tests, node.lineNo(), mem2 - mem)
if ret == 0:
log.write("\nFailed to detect validation problem in instance:\n-----\n")
log.write(instance)
log.write("\n-----\n")
nb_instances_failed = nb_instances_failed + 1
else:
nb_instances_success = nb_instances_success + 1
#
# handle an incorrect test
#
def handle_correct(node):
global log
global nb_schemas_success
global nb_schemas_failed
schema = ""
child = node.children
while child != None:
if child.type != 'text':
schema = schema + child.serialize()
child = child.next
try:
rngp = libxml2.relaxNGNewMemParserCtxt(schema, len(schema))
rngs = rngp.relaxNGParse()
except:
rngs = None
if rngs == None:
log.write("\nFailed to compile correct schema:\n-----\n")
log.write(schema)
log.write("\n-----\n")
nb_schemas_failed = nb_schemas_failed + 1
else:
nb_schemas_success = nb_schemas_success + 1
return rngs
def handle_incorrect(node):
global log
global nb_schemas_success
global nb_schemas_failed
schema = ""
child = node.children
while child != None:
if child.type != 'text':
schema = schema + child.serialize()
child = child.next
try:
rngp = libxml2.relaxNGNewMemParserCtxt(schema, len(schema))
rngs = rngp.relaxNGParse()
except:
rngs = None
if rngs != None:
log.write("\nFailed to detect schema error in:\n-----\n")
log.write(schema)
log.write("\n-----\n")
nb_schemas_failed = nb_schemas_failed + 1
else:
# log.write("\nSuccess detecting schema error in:\n-----\n")
# log.write(schema)
# log.write("\n-----\n")
nb_schemas_success = nb_schemas_success + 1
return None
#
# resource handling: keep a dictionary of URL->string mappings
#
def handle_resource(node, dir):
global resources
try:
name = node.prop('name')
except:
name = None
if name == None or name == '':
log.write("resource has no name")
return;
if dir != None:
# name = libxml2.buildURI(name, dir)
name = dir + '/' + name
res = ""
child = node.children
while child != None:
if child.type != 'text':
res = res + child.serialize()
child = child.next
resources[name] = res
#
# dir handling: pseudo directory resources
#
def handle_dir(node, dir):
try:
name = node.prop('name')
except:
name = None
if name == None or name == '':
log.write("resource has no name")
return;
if dir != None:
# name = libxml2.buildURI(name, dir)
name = dir + '/' + name
dirs = node.xpathEval('dir')
for dir in dirs:
handle_dir(dir, name)
res = node.xpathEval('resource')
for r in res:
handle_resource(r, name)
#
# handle a testCase element
#
def handle_testCase(node):
global nb_schemas_tests
global nb_instances_tests
global resources
sections = node.xpathEval('string(section)')
log.write("\n ======== test %d line %d section %s ==========\n" % (
nb_schemas_tests, node.lineNo(), sections))
resources = {}
if debug:
print "test %d line %d" % (nb_schemas_tests, node.lineNo())
dirs = node.xpathEval('dir')
for dir in dirs:
handle_dir(dir, None)
res = node.xpathEval('resource')
for r in res:
handle_resource(r, None)
tsts = node.xpathEval('incorrect')
if tsts != []:
if len(tsts) != 1:
print "warning test line %d has more than one <incorrect> example" %(node.lineNo())
schema = handle_incorrect(tsts[0])
else:
tsts = node.xpathEval('correct')
if tsts != []:
if len(tsts) != 1:
print "warning test line %d has more than one <correct> example"% (node.lineNo())
schema = handle_correct(tsts[0])
else:
print "warning <testCase> line %d has no <correct> nor <incorrect> child" % (node.lineNo())
nb_schemas_tests = nb_schemas_tests + 1;
valids = node.xpathEval('valid')
invalids = node.xpathEval('invalid')
nb_instances_tests = nb_instances_tests + len(valids) + len(invalids)
if schema != None:
for valid in valids:
handle_valid(valid, schema)
for invalid in invalids:
handle_invalid(invalid, schema)
#
# handle a testSuite element
#
def handle_testSuite(node, level = 0):
global nb_schemas_tests, nb_schemas_success, nb_schemas_failed
global nb_instances_tests, nb_instances_success, nb_instances_failed
if level >= 1:
old_schemas_tests = nb_schemas_tests
old_schemas_success = nb_schemas_success
old_schemas_failed = nb_schemas_failed
old_instances_tests = nb_instances_tests
old_instances_success = nb_instances_success
old_instances_failed = nb_instances_failed
docs = node.xpathEval('documentation')
authors = node.xpathEval('author')
if docs != []:
msg = ""
for doc in docs:
msg = msg + doc.content + " "
if authors != []:
msg = msg + "written by "
for author in authors:
msg = msg + author.content + " "
if quiet == 0:
print msg
sections = node.xpathEval('section')
if sections != [] and level <= 0:
msg = ""
for section in sections:
msg = msg + section.content + " "
if quiet == 0:
print "Tests for section %s" % (msg)
for test in node.xpathEval('testCase'):
handle_testCase(test)
for test in node.xpathEval('testSuite'):
handle_testSuite(test, level + 1)
if level >= 1 and sections != []:
msg = ""
for section in sections:
msg = msg + section.content + " "
print "Result of tests for section %s" % (msg)
if nb_schemas_tests != old_schemas_tests:
print "found %d test schemas: %d success %d failures" % (
nb_schemas_tests - old_schemas_tests,
nb_schemas_success - old_schemas_success,
nb_schemas_failed - old_schemas_failed)
if nb_instances_tests != old_instances_tests:
print "found %d test instances: %d success %d failures" % (
nb_instances_tests - old_instances_tests,
nb_instances_success - old_instances_success,
nb_instances_failed - old_instances_failed)
#
# Parse the conf file
#
libxml2.substituteEntitiesDefault(1);
testsuite = libxml2.parseFile(CONF)
#
# Error and warnng callbacks
#
def callback(ctx, str):
global log
log.write("%s%s" % (ctx, str))
libxml2.registerErrorHandler(callback, "")
libxml2.setEntityLoader(resolver)
root = testsuite.getRootElement()
if root.name != 'testSuite':
print "%s doesn't start with a testSuite element, aborting" % (CONF)
sys.exit(1)
if quiet == 0:
print "Running Relax NG testsuite"
handle_testSuite(root)
if quiet == 0:
print "\nTOTAL:\n"
if quiet == 0 or nb_schemas_failed != 0:
print "found %d test schemas: %d success %d failures" % (
nb_schemas_tests, nb_schemas_success, nb_schemas_failed)
if quiet == 0 or nb_instances_failed != 0:
print "found %d test instances: %d success %d failures" % (
nb_instances_tests, nb_instances_success, nb_instances_failed)
testsuite.freeDoc()
# Memory debug specific
libxml2.relaxNGCleanupTypes()
libxml2.cleanupParser()
if libxml2.debugMemory(1) == 0:
if quiet == 0:
print "OK"
else:
print "Memory leak %d bytes" % (libxml2.debugMemory(1))
libxml2.dumpMemory()
|
mit
|
coldzhang/cpp_features
|
coroutine/unit_test/gtest_unit/gtest/xcode/Scripts/versiongenerate.py
|
3088
|
4536
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A script to prepare version informtion for use the gtest Info.plist file.
This script extracts the version information from the configure.ac file and
uses it to generate a header file containing the same information. The
#defines in this header file will be included in during the generation of
the Info.plist of the framework, giving the correct value to the version
shown in the Finder.
This script makes the following assumptions (these are faults of the script,
not problems with the Autoconf):
1. The AC_INIT macro will be contained within the first 1024 characters
of configure.ac
2. The version string will be 3 integers separated by periods and will be
surrounded by squre brackets, "[" and "]" (e.g. [1.0.1]). The first
segment represents the major version, the second represents the minor
version and the third represents the fix version.
3. No ")" character exists between the opening "(" and closing ")" of
AC_INIT, including in comments and character strings.
"""
import sys
import re
# Read the command line argument (the output directory for Version.h)
if (len(sys.argv) < 3):
print "Usage: versiongenerate.py input_dir output_dir"
sys.exit(1)
else:
input_dir = sys.argv[1]
output_dir = sys.argv[2]
# Read the first 1024 characters of the configure.ac file
config_file = open("%s/configure.ac" % input_dir, 'r')
buffer_size = 1024
opening_string = config_file.read(buffer_size)
config_file.close()
# Extract the version string from the AC_INIT macro
# The following init_expression means:
# Extract three integers separated by periods and surrounded by squre
# brackets(e.g. "[1.0.1]") between "AC_INIT(" and ")". Do not be greedy
# (*? is the non-greedy flag) since that would pull in everything between
# the first "(" and the last ")" in the file.
version_expression = re.compile(r"AC_INIT\(.*?\[(\d+)\.(\d+)\.(\d+)\].*?\)",
re.DOTALL)
version_values = version_expression.search(opening_string)
major_version = version_values.group(1)
minor_version = version_values.group(2)
fix_version = version_values.group(3)
# Write the version information to a header file to be included in the
# Info.plist file.
file_data = """//
// DO NOT MODIFY THIS FILE (but you can delete it)
//
// This file is autogenerated by the versiongenerate.py script. This script
// is executed in a "Run Script" build phase when creating gtest.framework. This
// header file is not used during compilation of C-source. Rather, it simply
// defines some version strings for substitution in the Info.plist. Because of
// this, we are not not restricted to C-syntax nor are we using include guards.
//
#define GTEST_VERSIONINFO_SHORT %s.%s
#define GTEST_VERSIONINFO_LONG %s.%s.%s
""" % (major_version, minor_version, major_version, minor_version, fix_version)
version_file = open("%s/Version.h" % output_dir, 'w')
version_file.write(file_data)
version_file.close()
|
lgpl-3.0
|
trec-kba/streamcorpus-pipeline
|
examples/john_smith_chunk_writer.py
|
1
|
2313
|
'''Example of how to transform a corpus into the streamcorpus format
and save it as a chunk file.
~/streamcorpus-pipeline$ python examples/john_smith_chunk_writer.py data/john-smith/original foo.sc
~/streamcorpus-pipeline$ streamcorpus_dump --count foo.sc
197 0 foo.sc
Copyright 2012-2014 Diffeo, Inc.
'''
import argparse
import logging
import os
## this assumes that streamcorpus has been installed; it is in pypi
import streamcorpus
logger = logging.getLogger(__name__)
def paths(input_dir):
'yield all file paths under input_dir'
for root, dirs, fnames in os.walk(input_dir):
for i_fname in fnames:
i_path = os.path.join(root, i_fname)
yield i_path
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'input_dir',
help='path to a directory containing files to put in a streamcorpus.Chunk file')
parser.add_argument(
'output_path',
help='file path to create the Chunk file')
args = parser.parse_args()
o_fh = open(args.output_path, 'wb')
o_chunk = streamcorpus.Chunk(file_obj=o_fh, mode='wb')
## NB: can also pass a path into Chunk. If it ends in .xz, then
## it will handle file compress for you.
#o_chunk = streamcorpus.Chunk('output.sc.xz', mode='wb')
for i_path in paths(args.input_dir):
### In the example code below, strings that start with
### 'headers.' should be replaced by code that pulls the
### indicated information from scrapy
## Every StreamItem has a stream_time property. It usually
## comes from the document creation time. This may be either
## a unix-time number or a string like
creation_time = '2000-01-01T12:34:00.000123Z' ## should come from headers.last_modified
stream_item = streamcorpus.make_stream_item(
creation_time,
'headers.absolute URL')
## These docs came from the authors of the paper cited above.
stream_item.source = 'scrapy'
i_file = open(i_path)
stream_item.body.raw = i_file.read()
stream_item.body.media_type = 'headers.mime_type'
stream_item.body.encoding = 'headers.encoding'
o_chunk.add(stream_item)
o_chunk.close()
if __name__ == '__main__':
main()
|
mit
|
ampax/edx-platform
|
common/lib/xmodule/xmodule/modulestore/draft_and_published.py
|
71
|
5876
|
"""
This module provides an abstraction for Module Stores that support Draft and Published branches.
"""
import threading
from abc import ABCMeta, abstractmethod
from contextlib import contextmanager
from . import ModuleStoreEnum, BulkOperationsMixin
# Things w/ these categories should never be marked as version=DRAFT
DIRECT_ONLY_CATEGORIES = ['course', 'chapter', 'sequential', 'about', 'static_tab', 'course_info']
class BranchSettingMixin(object):
"""
A mixin to manage a module store's branch setting.
The order of override is (from higher precedence to lower):
1. thread-specific setting temporarily set using the branch_setting contextmanager
2. the return value of the branch_setting_func passed into this mixin's init method
3. the default branch setting being ModuleStoreEnum.Branch.published_only
"""
def __init__(self, *args, **kwargs):
"""
:param branch_setting_func: a function that returns the default branch setting for this object.
If not specified, ModuleStoreEnum.Branch.published_only is used as the default setting.
"""
self.default_branch_setting_func = kwargs.pop(
'branch_setting_func',
lambda: ModuleStoreEnum.Branch.published_only
)
super(BranchSettingMixin, self).__init__(*args, **kwargs)
# cache the branch setting on a local thread to support a multi-threaded environment
self.thread_cache = threading.local()
@contextmanager
def branch_setting(self, branch_setting, course_id=None): # pylint: disable=unused-argument
"""
A context manager for temporarily setting a store's branch value on the current thread.
"""
previous_thread_branch_setting = getattr(self.thread_cache, 'branch_setting', None)
try:
self.thread_cache.branch_setting = branch_setting
yield
finally:
self.thread_cache.branch_setting = previous_thread_branch_setting
def get_branch_setting(self, course_id=None): # pylint: disable=unused-argument
"""
Returns the current branch_setting on the store.
Returns the thread-local setting, if set.
Otherwise, returns the default value of the setting function set during the store's initialization.
"""
# first check the thread-local cache
thread_local_branch_setting = getattr(self.thread_cache, 'branch_setting', None)
if thread_local_branch_setting:
return thread_local_branch_setting
else:
# return the default value
return self.default_branch_setting_func()
class ModuleStoreDraftAndPublished(BranchSettingMixin, BulkOperationsMixin):
"""
A mixin for a read-write database backend that supports two branches, Draft and Published, with
options to prefer Draft and fallback to Published.
"""
__metaclass__ = ABCMeta
@abstractmethod
def delete_item(self, location, user_id, revision=None, **kwargs):
raise NotImplementedError
@abstractmethod
def get_parent_location(self, location, revision=None, **kwargs):
raise NotImplementedError
@abstractmethod
def has_changes(self, xblock):
raise NotImplementedError
@abstractmethod
def publish(self, location, user_id):
raise NotImplementedError
@abstractmethod
def unpublish(self, location, user_id):
"""
Turn the published version into a draft, removing the published version.
Raises: InvalidVersionError if called on a DIRECT_ONLY_CATEGORY
"""
raise NotImplementedError
@abstractmethod
def revert_to_published(self, location, user_id):
raise NotImplementedError
@abstractmethod
def has_published_version(self, xblock):
raise NotImplementedError
@abstractmethod
def convert_to_draft(self, location, user_id):
raise NotImplementedError
@abstractmethod
def import_xblock(self, user_id, course_key, block_type, block_id, fields=None, runtime=None, **kwargs):
"""
Import the given xblock into the current branch setting: import completely overwrites any
existing block of the same id.
In ModuleStoreDraftAndPublished, importing a published block ensures that access from the draft
will get a block (either the one imported or a preexisting one). See xml_importer
"""
raise NotImplementedError
def _flag_publish_event(self, course_key):
"""
Wrapper around calls to fire the course_published signal
Unless we're nested in an active bulk operation, this simply fires the signal
otherwise a publish will be signalled at the end of the bulk operation
Arguments:
course_key - course_key to which the signal applies
"""
if self.signal_handler:
bulk_record = self._get_bulk_ops_record(course_key) if isinstance(self, BulkOperationsMixin) else None
if bulk_record and bulk_record.active:
bulk_record.has_publish_item = True
else:
# We remove the branch, because publishing always means copying from draft to published
self.signal_handler.send("course_published", course_key=course_key.for_branch(None))
class UnsupportedRevisionError(ValueError):
"""
This error is raised if a method is called with an unsupported revision parameter.
"""
def __init__(self, allowed_revisions=None):
if not allowed_revisions:
allowed_revisions = [
None,
ModuleStoreEnum.RevisionOption.published_only,
ModuleStoreEnum.RevisionOption.draft_only
]
super(UnsupportedRevisionError, self).__init__('revision not one of {}'.format(allowed_revisions))
|
agpl-3.0
|
CongLi/avocado-vt
|
virttest/staging/backports/_itertools.py
|
11
|
1278
|
"""
This module contains some itertools functions people have been using in
avocado-vt that are not present in python 2.4, the minimum supported version.
"""
def product(*args, **kwds):
"""
(avocado-vt backport)
Cartesian product of input iterables. Equivalent to nested for-loops.
For example, product(A, B) returns the same as: ((x,y) for x in A for y in B).
The leftmost iterators are in the outermost for-loop, so the output tuples
cycle in a manner similar to an odometer (with the rightmost element changing
on every iteration).
To compute the product of an iterable with itself, specify the number
of repetitions with the optional repeat keyword argument. For example,
product(A, repeat=4) means the same as product(A, A, A, A).
product('ab', range(3)) --> ('a',0) ('a',1) ('a',2) ('b',0) ('b',1) ('b',2)
product((0,1), (0,1), (0,1)) --> (0,0,0) (0,0,1) (0,1,0) (0,1,1) (1,0,0) ...
"""
# product('ABCD', 'xy') --> Ax Ay Bx By Cx Cy Dx Dy
# product(range(2), repeat=3) --> 000 001 010 011 100 101 110 111
pools = map(tuple, args) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x + [y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
|
gpl-2.0
|
rs2/pandas
|
pandas/tests/window/moments/test_moments_ewm.py
|
1
|
8505
|
import numpy as np
from numpy.random import randn
import pytest
import pandas as pd
from pandas import DataFrame, Series
import pandas._testing as tm
def check_ew(name=None, preserve_nan=False, series=None, frame=None, nan_locs=None):
series_result = getattr(series.ewm(com=10), name)()
assert isinstance(series_result, Series)
frame_result = getattr(frame.ewm(com=10), name)()
assert type(frame_result) == DataFrame
result = getattr(series.ewm(com=10), name)()
if preserve_nan:
assert result[nan_locs].isna().all()
def test_ewma(series, frame, nan_locs):
check_ew(name="mean", frame=frame, series=series, nan_locs=nan_locs)
vals = pd.Series(np.zeros(1000))
vals[5] = 1
result = vals.ewm(span=100, adjust=False).mean().sum()
assert np.abs(result - 1) < 1e-2
@pytest.mark.parametrize("adjust", [True, False])
@pytest.mark.parametrize("ignore_na", [True, False])
def test_ewma_cases(adjust, ignore_na):
# try adjust/ignore_na args matrix
s = Series([1.0, 2.0, 4.0, 8.0])
if adjust:
expected = Series([1.0, 1.6, 2.736842, 4.923077])
else:
expected = Series([1.0, 1.333333, 2.222222, 4.148148])
result = s.ewm(com=2.0, adjust=adjust, ignore_na=ignore_na).mean()
tm.assert_series_equal(result, expected)
def test_ewma_nan_handling():
s = Series([1.0] + [np.nan] * 5 + [1.0])
result = s.ewm(com=5).mean()
tm.assert_series_equal(result, Series([1.0] * len(s)))
s = Series([np.nan] * 2 + [1.0] + [np.nan] * 2 + [1.0])
result = s.ewm(com=5).mean()
tm.assert_series_equal(result, Series([np.nan] * 2 + [1.0] * 4))
# GH 7603
s0 = Series([np.nan, 1.0, 101.0])
s1 = Series([1.0, np.nan, 101.0])
s2 = Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan])
s3 = Series([1.0, np.nan, 101.0, 50.0])
com = 2.0
alpha = 1.0 / (1.0 + com)
def simple_wma(s, w):
return (s.multiply(w).cumsum() / w.cumsum()).fillna(method="ffill")
for (s, adjust, ignore_na, w) in [
(s0, True, False, [np.nan, (1.0 - alpha), 1.0]),
(s0, True, True, [np.nan, (1.0 - alpha), 1.0]),
(s0, False, False, [np.nan, (1.0 - alpha), alpha]),
(s0, False, True, [np.nan, (1.0 - alpha), alpha]),
(s1, True, False, [(1.0 - alpha) ** 2, np.nan, 1.0]),
(s1, True, True, [(1.0 - alpha), np.nan, 1.0]),
(s1, False, False, [(1.0 - alpha) ** 2, np.nan, alpha]),
(s1, False, True, [(1.0 - alpha), np.nan, alpha]),
(s2, True, False, [np.nan, (1.0 - alpha) ** 3, np.nan, np.nan, 1.0, np.nan]),
(s2, True, True, [np.nan, (1.0 - alpha), np.nan, np.nan, 1.0, np.nan]),
(
s2,
False,
False,
[np.nan, (1.0 - alpha) ** 3, np.nan, np.nan, alpha, np.nan],
),
(s2, False, True, [np.nan, (1.0 - alpha), np.nan, np.nan, alpha, np.nan]),
(s3, True, False, [(1.0 - alpha) ** 3, np.nan, (1.0 - alpha), 1.0]),
(s3, True, True, [(1.0 - alpha) ** 2, np.nan, (1.0 - alpha), 1.0]),
(
s3,
False,
False,
[
(1.0 - alpha) ** 3,
np.nan,
(1.0 - alpha) * alpha,
alpha * ((1.0 - alpha) ** 2 + alpha),
],
),
(s3, False, True, [(1.0 - alpha) ** 2, np.nan, (1.0 - alpha) * alpha, alpha]),
]:
expected = simple_wma(s, Series(w))
result = s.ewm(com=com, adjust=adjust, ignore_na=ignore_na).mean()
tm.assert_series_equal(result, expected)
if ignore_na is False:
# check that ignore_na defaults to False
result = s.ewm(com=com, adjust=adjust).mean()
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("name", ["var", "vol"])
def test_ewmvar_ewmvol(series, frame, nan_locs, name):
check_ew(name=name, frame=frame, series=series, nan_locs=nan_locs)
def test_ewma_span_com_args(series):
A = series.ewm(com=9.5).mean()
B = series.ewm(span=20).mean()
tm.assert_almost_equal(A, B)
msg = "comass, span, halflife, and alpha are mutually exclusive"
with pytest.raises(ValueError, match=msg):
series.ewm(com=9.5, span=20)
msg = "Must pass one of comass, span, halflife, or alpha"
with pytest.raises(ValueError, match=msg):
series.ewm().mean()
def test_ewma_halflife_arg(series):
A = series.ewm(com=13.932726172912965).mean()
B = series.ewm(halflife=10.0).mean()
tm.assert_almost_equal(A, B)
msg = "comass, span, halflife, and alpha are mutually exclusive"
with pytest.raises(ValueError, match=msg):
series.ewm(span=20, halflife=50)
with pytest.raises(ValueError):
series.ewm(com=9.5, halflife=50)
with pytest.raises(ValueError):
series.ewm(com=9.5, span=20, halflife=50)
with pytest.raises(ValueError):
series.ewm()
def test_ewm_alpha(arr):
# GH 10789
s = Series(arr)
a = s.ewm(alpha=0.61722699889169674).mean()
b = s.ewm(com=0.62014947789973052).mean()
c = s.ewm(span=2.240298955799461).mean()
d = s.ewm(halflife=0.721792864318).mean()
tm.assert_series_equal(a, b)
tm.assert_series_equal(a, c)
tm.assert_series_equal(a, d)
def test_ewm_alpha_arg(series):
# GH 10789
s = series
msg = "Must pass one of comass, span, halflife, or alpha"
with pytest.raises(ValueError, match=msg):
s.ewm()
msg = "comass, span, halflife, and alpha are mutually exclusive"
with pytest.raises(ValueError, match=msg):
s.ewm(com=10.0, alpha=0.5)
with pytest.raises(ValueError, match=msg):
s.ewm(span=10.0, alpha=0.5)
with pytest.raises(ValueError, match=msg):
s.ewm(halflife=10.0, alpha=0.5)
def test_ewm_domain_checks(arr):
# GH 12492
s = Series(arr)
msg = "comass must satisfy: comass >= 0"
with pytest.raises(ValueError, match=msg):
s.ewm(com=-0.1)
s.ewm(com=0.0)
s.ewm(com=0.1)
msg = "span must satisfy: span >= 1"
with pytest.raises(ValueError, match=msg):
s.ewm(span=-0.1)
with pytest.raises(ValueError, match=msg):
s.ewm(span=0.0)
with pytest.raises(ValueError, match=msg):
s.ewm(span=0.9)
s.ewm(span=1.0)
s.ewm(span=1.1)
msg = "halflife must satisfy: halflife > 0"
with pytest.raises(ValueError, match=msg):
s.ewm(halflife=-0.1)
with pytest.raises(ValueError, match=msg):
s.ewm(halflife=0.0)
s.ewm(halflife=0.1)
msg = "alpha must satisfy: 0 < alpha <= 1"
with pytest.raises(ValueError, match=msg):
s.ewm(alpha=-0.1)
with pytest.raises(ValueError, match=msg):
s.ewm(alpha=0.0)
s.ewm(alpha=0.1)
s.ewm(alpha=1.0)
with pytest.raises(ValueError, match=msg):
s.ewm(alpha=1.1)
@pytest.mark.parametrize("method", ["mean", "vol", "var"])
def test_ew_empty_series(method):
vals = pd.Series([], dtype=np.float64)
ewm = vals.ewm(3)
result = getattr(ewm, method)()
tm.assert_almost_equal(result, vals)
@pytest.mark.parametrize("min_periods", [0, 1])
@pytest.mark.parametrize("name", ["mean", "var", "vol"])
def test_ew_min_periods(min_periods, name):
# excluding NaNs correctly
arr = randn(50)
arr[:10] = np.NaN
arr[-10:] = np.NaN
s = Series(arr)
# check min_periods
# GH 7898
result = getattr(s.ewm(com=50, min_periods=2), name)()
assert result[:11].isna().all()
assert not result[11:].isna().any()
result = getattr(s.ewm(com=50, min_periods=min_periods), name)()
if name == "mean":
assert result[:10].isna().all()
assert not result[10:].isna().any()
else:
# ewm.std, ewm.vol, ewm.var (with bias=False) require at least
# two values
assert result[:11].isna().all()
assert not result[11:].isna().any()
# check series of length 0
result = getattr(Series(dtype=object).ewm(com=50, min_periods=min_periods), name)()
tm.assert_series_equal(result, Series(dtype="float64"))
# check series of length 1
result = getattr(Series([1.0]).ewm(50, min_periods=min_periods), name)()
if name == "mean":
tm.assert_series_equal(result, Series([1.0]))
else:
# ewm.std, ewm.vol, ewm.var with bias=False require at least
# two values
tm.assert_series_equal(result, Series([np.NaN]))
# pass in ints
result2 = getattr(Series(np.arange(50)).ewm(span=10), name)()
assert result2.dtype == np.float_
|
bsd-3-clause
|
CyanogenMod/android_kernel_oppo_n3
|
tools/perf/scripts/python/failed-syscalls-by-pid.py
|
11180
|
2058
|
# failed system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
|
gpl-2.0
|
catapult-project/catapult-csm
|
third_party/gsutil/third_party/boto/boto/ec2/ec2object.py
|
150
|
5554
|
# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents an EC2 Object
"""
from boto.ec2.tag import TagSet
class EC2Object(object):
def __init__(self, connection=None):
self.connection = connection
if self.connection and hasattr(self.connection, 'region'):
self.region = connection.region
else:
self.region = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
setattr(self, name, value)
class TaggedEC2Object(EC2Object):
"""
Any EC2 resource that can be tagged should be represented
by a Python object that subclasses this class. This class
has the mechanism in place to handle the tagSet element in
the Describe* responses. If tags are found, it will create
a TagSet object and allow it to parse and collect the tags
into a dict that is stored in the "tags" attribute of the
object.
"""
def __init__(self, connection=None):
super(TaggedEC2Object, self).__init__(connection)
self.tags = TagSet()
def startElement(self, name, attrs, connection):
if name == 'tagSet':
return self.tags
else:
return None
def add_tag(self, key, value='', dry_run=False):
"""
Add a tag to this object. Tags are stored by AWS and can be used
to organize and filter resources. Adding a tag involves a round-trip
to the EC2 service.
:type key: str
:param key: The key or name of the tag being stored.
:type value: str
:param value: An optional value that can be stored with the tag.
If you want only the tag name and no value, the
value should be the empty string.
"""
self.add_tags({key: value}, dry_run)
def add_tags(self, tags, dry_run=False):
"""
Add tags to this object. Tags are stored by AWS and can be used
to organize and filter resources. Adding tags involves a round-trip
to the EC2 service.
:type tags: dict
:param tags: A dictionary of key-value pairs for the tags being stored.
If for some tags you want only the name and no value, the
corresponding value for that tag name should be an empty
string.
"""
status = self.connection.create_tags(
[self.id],
tags,
dry_run=dry_run
)
if self.tags is None:
self.tags = TagSet()
self.tags.update(tags)
def remove_tag(self, key, value=None, dry_run=False):
"""
Remove a tag from this object. Removing a tag involves a round-trip
to the EC2 service.
:type key: str
:param key: The key or name of the tag being stored.
:type value: str
:param value: An optional value that can be stored with the tag.
If a value is provided, it must match the value currently
stored in EC2. If not, the tag will not be removed. If
a value of None is provided, the tag will be
unconditionally deleted.
NOTE: There is an important distinction between a value
of '' and a value of None.
"""
self.remove_tags({key: value}, dry_run)
def remove_tags(self, tags, dry_run=False):
"""
Removes tags from this object. Removing tags involves a round-trip
to the EC2 service.
:type tags: dict
:param tags: A dictionary of key-value pairs for the tags being removed.
For each key, the provided value must match the value
currently stored in EC2. If not, that particular tag will
not be removed. However, if a value of None is provided,
the tag will be unconditionally deleted.
NOTE: There is an important distinction between a value of
'' and a value of None.
"""
status = self.connection.delete_tags(
[self.id],
tags,
dry_run=dry_run
)
for key, value in tags.items():
if key in self.tags:
if value is None or value == self.tags[key]:
del self.tags[key]
|
bsd-3-clause
|
laslabs/odoo
|
addons/website_project_issue/tests/test_access_rights.py
|
45
|
6654
|
# -*- coding: utf-8 -*-
from openerp.addons.project.tests.test_access_rights import TestPortalProjectBase
from openerp.exceptions import AccessError
from openerp.tools import mute_logger
class TestPortalProjectBase(TestPortalProjectBase):
def setUp(self):
super(TestPortalProjectBase, self).setUp()
Issue = self.env['project.issue'].with_context({'mail_create_nolog': True})
self.issue_1 = Issue.create({
'name': 'Test1', 'user_id': False, 'project_id': self.project_pigs.id})
self.issue_2 = Issue.create({
'name': 'Test2', 'user_id': False, 'project_id': self.project_pigs.id})
self.issue_3 = Issue.create({
'name': 'Test3', 'user_id': False, 'project_id': self.project_pigs.id})
self.issue_4 = Issue.create({
'name': 'Test4', 'user_id': self.user_projectuser.id, 'project_id': self.project_pigs.id})
self.issue_5 = Issue.create({
'name': 'Test5', 'user_id': self.user_portal.id, 'project_id': self.project_pigs.id})
self.issue_6 = Issue.create({
'name': 'Test6', 'user_id': self.user_public.id, 'project_id': self.project_pigs.id})
class TestPortalIssue(TestPortalProjectBase):
@mute_logger('openerp.addons.base.ir.ir_model', 'openerp.models')
def test_00_project_access_rights(self):
""" Test basic project access rights, for project and portal_project """
pigs_id = self.project_pigs.id
Issue = self.env['project.issue']
# ----------------------------------------
# CASE1: portal project
# ----------------------------------------
self.project_pigs.write({'privacy_visibility': 'portal'})
# Do: Alfred reads project -> ok (employee ok public)
# Test: all project issues visible
issues = Issue.sudo(self.user_projectuser.id).search([('project_id', '=', pigs_id)])
test_issue_ids = set([self.issue_1.id, self.issue_2.id, self.issue_3.id, self.issue_4.id, self.issue_5.id, self.issue_6.id])
self.assertEqual(set(issues.ids), test_issue_ids,
'access rights: project user cannot see all issues of a portal project')
# Do: Bert reads project -> crash, no group
# Test: no project issue searchable
self.assertRaises(AccessError, Issue.sudo(self.user_noone.id).search, [('project_id', '=', pigs_id)])
# Data: issue follower
self.issue_1.sudo(self.user_projectuser.id).message_subscribe_users(user_ids=[self.user_portal.id])
self.issue_3.sudo(self.user_projectuser.id).message_subscribe_users(user_ids=[self.user_portal.id])
# Do: Chell reads project -> ok (portal ok public)
# Test: only followed project issues visible + assigned
issues = Issue.sudo(self.user_portal.id).search([('project_id', '=', pigs_id)])
self.assertEqual(set(issues.ids), set([self.issue_1.id, self.issue_3.id, self.issue_5.id]),
'access rights: portal user should see the followed issues of a portal project')
# Data: issue follower cleaning
self.issue_1.sudo(self.user_projectuser.id).message_unsubscribe_users(user_ids=[self.user_portal.id])
self.issue_3.sudo(self.user_projectuser.id).message_unsubscribe_users(user_ids=[self.user_portal.id])
# ----------------------------------------
# CASE2: employee project
# ----------------------------------------
self.project_pigs.write({'privacy_visibility': 'employees'})
# Do: Alfred reads project -> ok (employee ok employee)
# Test: all project issues visible
issues = Issue.sudo(self.user_projectuser.id).search([('project_id', '=', pigs_id)])
self.assertEqual(set(issues.ids), set([self.issue_1.id, self.issue_2.id, self.issue_3.id,
self.issue_4.id, self.issue_5.id, self.issue_6.id]),
'access rights: project user cannot see all issues of an employees project')
# Do: Chell reads project -> ko (portal ko employee)
# Test: no project issue visible + assigned
issues = Issue.sudo(self.user_portal.id).search([('project_id', '=', pigs_id)])
self.assertFalse(issues.ids, 'access rights: portal user should not see issues of an employees project, even if assigned')
# ----------------------------------------
# CASE3: followers project
# ----------------------------------------
self.project_pigs.write({'privacy_visibility': 'followers'})
# Do: Alfred reads project -> ko (employee ko followers)
# Test: no project issue visible
issues = Issue.sudo(self.user_projectuser.id).search([('project_id', '=', pigs_id)])
self.assertEqual(set(issues.ids), set([self.issue_4.id]),
'access rights: employee user should not see issues of a not-followed followers project, only assigned')
# Do: Chell reads project -> ko (portal ko employee)
# Test: no project issue visible
issues = Issue.sudo(self.user_portal.id).search([('project_id', '=', pigs_id)])
self.assertEqual(set(issues.ids), set([self.issue_5.id]),
'access rights: portal user should not see issues of a not-followed followers project, only assigned')
# Data: subscribe Alfred, Chell and Donovan as follower
self.project_pigs.message_subscribe_users(user_ids=[self.user_projectuser.id, self.user_portal.id, self.user_public.id])
self.issue_1.sudo(self.user_projectmanager.id).message_subscribe_users(user_ids=[self.user_portal.id, self.user_projectuser.id])
self.issue_3.sudo(self.user_projectmanager.id).message_subscribe_users(user_ids=[self.user_portal.id, self.user_projectuser.id])
# Do: Alfred reads project -> ok (follower ok followers)
# Test: followed + assigned issues visible
issues = Issue.sudo(self.user_projectuser.id).search([('project_id', '=', pigs_id)])
self.assertEqual(set(issues.ids), set([self.issue_1.id, self.issue_3.id, self.issue_4.id]),
'access rights: employee user should not see followed + assigned issues of a follower project')
# Do: Chell reads project -> ok (follower ok follower)
# Test: followed + assigned issues visible
issues = Issue.sudo(self.user_portal.id).search([('project_id', '=', pigs_id)])
self.assertEqual(set(issues.ids), set([self.issue_1.id, self.issue_3.id, self.issue_5.id]),
'access rights: employee user should not see followed + assigned issues of a follower project')
|
agpl-3.0
|
Jgarcia-IAS/SITE
|
addons/account/project/report/quantity_cost_ledger.py
|
358
|
6204
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.report import report_sxw
class account_analytic_quantity_cost_ledger(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(account_analytic_quantity_cost_ledger, self).__init__(cr, uid, name, context=context)
self.localcontext.update( {
'time': time,
'lines_g': self._lines_g,
'lines_a': self._lines_a,
'sum_quantity': self._sum_quantity,
'account_sum_quantity': self._account_sum_quantity,
})
def _lines_g(self, account_id, date1, date2, journals):
if not journals:
self.cr.execute("SELECT sum(aal.unit_amount) AS quantity, \
aa.code AS code, aa.name AS name, aa.id AS id \
FROM account_account AS aa, account_analytic_line AS aal \
WHERE (aal.account_id=%s) AND (aal.date>=%s) \
AND (aal.date<=%s) AND (aal.general_account_id=aa.id) \
AND aa.active \
GROUP BY aa.code, aa.name, aa.id ORDER BY aa.code",
(account_id, date1, date2))
else:
journal_ids = journals
self.cr.execute("SELECT sum(aal.unit_amount) AS quantity, \
aa.code AS code, aa.name AS name, aa.id AS id \
FROM account_account AS aa, account_analytic_line AS aal \
WHERE (aal.account_id=%s) AND (aal.date>=%s) \
AND (aal.date<=%s) AND (aal.general_account_id=aa.id) \
AND aa.active \
AND (aal.journal_id IN %s ) \
GROUP BY aa.code, aa.name, aa.id ORDER BY aa.code",
(account_id, date1, date2, tuple(journal_ids)))
res = self.cr.dictfetchall()
return res
def _lines_a(self, general_account_id, account_id, date1, date2, journals):
if not journals:
self.cr.execute("SELECT aal.name AS name, aal.code AS code, \
aal.unit_amount AS quantity, aal.date AS date, \
aaj.code AS cj \
FROM account_analytic_line AS aal, \
account_analytic_journal AS aaj \
WHERE (aal.general_account_id=%s) AND (aal.account_id=%s) \
AND (aal.date>=%s) AND (aal.date<=%s) \
AND (aal.journal_id=aaj.id) \
ORDER BY aal.date, aaj.code, aal.code",
(general_account_id, account_id, date1, date2))
else:
journal_ids = journals
self.cr.execute("SELECT aal.name AS name, aal.code AS code, \
aal.unit_amount AS quantity, aal.date AS date, \
aaj.code AS cj \
FROM account_analytic_line AS aal, \
account_analytic_journal AS aaj \
WHERE (aal.general_account_id=%s) AND (aal.account_id=%s) \
AND (aal.date>=%s) AND (aal.date<=%s) \
AND (aal.journal_id=aaj.id) AND (aaj.id IN %s) \
ORDER BY aal.date, aaj.code, aal.code",
(general_account_id, account_id, date1, date2,tuple(journal_ids)))
res = self.cr.dictfetchall()
return res
def _account_sum_quantity(self, account_id, date1, date2, journals):
if not journals:
self.cr.execute("SELECT sum(unit_amount) \
FROM account_analytic_line \
WHERE account_id=%s AND date>=%s AND date<=%s",
(account_id, date1, date2))
else:
journal_ids = journals
self.cr.execute("SELECT sum(unit_amount) \
FROM account_analytic_line \
WHERE account_id = %s AND date >= %s AND date <= %s \
AND journal_id IN %s",
(account_id, date1, date2, tuple(journal_ids),))
return self.cr.fetchone()[0] or 0.0
def _sum_quantity(self, accounts, date1, date2, journals):
ids = map(lambda x: x.id, accounts)
if not ids:
return 0.0
if not journals:
self.cr.execute("SELECT sum(unit_amount) \
FROM account_analytic_line \
WHERE account_id IN %s AND date>=%s AND date<=%s",
(tuple(ids), date1, date2,))
else:
journal_ids = journals
self.cr.execute("SELECT sum(unit_amount) \
FROM account_analytic_line \
WHERE account_id IN %s AND date >= %s AND date <= %s \
AND journal_id IN %s",(tuple(ids), date1, date2, tuple(journal_ids)))
return self.cr.fetchone()[0] or 0.0
class report_analyticcostledgerquantity(osv.AbstractModel):
_name = 'report.account.report_analyticcostledgerquantity'
_inherit = 'report.abstract_report'
_template = 'account.report_analyticcostledgerquantity'
_wrapped_report_class = account_analytic_quantity_cost_ledger
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
nooperpudd/trading-with-python
|
cookbook/getDataFromYahooFinance.py
|
77
|
1391
|
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 16 18:37:23 2011
@author: jev
"""
from urllib import urlretrieve
from urllib2 import urlopen
from pandas import Index, DataFrame
from datetime import datetime
import matplotlib.pyplot as plt
sDate = (2005,1,1)
eDate = (2011,10,1)
symbol = 'SPY'
fName = symbol+'.csv'
try: # try to load saved csv file, otherwise get from the net
fid = open(fName)
lines = fid.readlines()
fid.close()
print 'Loaded from ' , fName
except Exception as e:
print e
urlStr = 'http://ichart.finance.yahoo.com/table.csv?s={0}&a={1}&b={2}&c={3}&d={4}&e={5}&f={6}'.\
format(symbol.upper(),sDate[1]-1,sDate[2],sDate[0],eDate[1]-1,eDate[2],eDate[0])
print 'Downloading from ', urlStr
urlretrieve(urlStr,symbol+'.csv')
lines = urlopen(urlStr).readlines()
dates = []
data = [[] for i in range(6)]
#high
# header : Date,Open,High,Low,Close,Volume,Adj Close
for line in lines[1:]:
fields = line.rstrip().split(',')
dates.append(datetime.strptime( fields[0],'%Y-%m-%d'))
for i,field in enumerate(fields[1:]):
data[i].append(float(field))
idx = Index(dates)
data = dict(zip(['open','high','low','close','volume','adj_close'],data))
# create a pandas dataframe structure
df = DataFrame(data,index=idx).sort()
df.plot(secondary_y=['volume'])
|
bsd-3-clause
|
wuzhihui1123/django-cms
|
cms/test_utils/project/pluginapp/plugins/meta/south_migrations/0001_initial.py
|
46
|
4092
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'TestPluginModel'
db.create_table(u'meta_testpluginmodel', (
(u'cmsplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['cms.CMSPlugin'], unique=True, primary_key=True)),
))
db.send_create_signal(u'meta', ['TestPluginModel'])
# Adding model 'TestPluginModel2'
db.create_table('meta_testpluginmodel2', (
(u'cmsplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['cms.CMSPlugin'], unique=True, primary_key=True)),
))
db.send_create_signal('meta', ['TestPluginModel2'])
# Adding model 'TestPluginModel4'
db.create_table('or_another_4', (
(u'cmsplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['cms.CMSPlugin'], unique=True, primary_key=True)),
))
db.send_create_signal(u'meta', ['TestPluginModel4'])
def backwards(self, orm):
# Deleting model 'TestPluginModel'
db.delete_table(u'meta_testpluginmodel')
# Deleting model 'TestPluginModel2'
db.delete_table('meta_testpluginmodel2')
# Deleting model 'TestPluginModel4'
db.delete_table('or_another_4')
models = {
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
u'meta.testpluginmodel': {
'Meta': {'object_name': 'TestPluginModel', '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'})
},
'meta.testpluginmodel2': {
'Meta': {'object_name': 'TestPluginModel2', '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'})
},
u'meta.testpluginmodel4': {
'Meta': {'object_name': 'TestPluginModel4', 'db_table': "'or_another_4'", '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'})
}
}
complete_apps = ['meta']
|
bsd-3-clause
|
monoku/conekta-python
|
tests/test_charges.py
|
1
|
3186
|
#!/usr/bin/python
#coding: utf-8
#(c) 2013 Julian Ceballos <@jceb>
from . import BaseEndpointTestCase
class OrdersEndpointTestCase(BaseEndpointTestCase):
def test_card_charge_done(self):
self.client.api_key = '1tv5yJp3xnVZ7eK67m4h'
response = self.client.Charge.create(self.card_charge_object)
assert 'id' in response.parseJSON()
def test_cash_charge_done(self):
self.client.api_key = '1tv5yJp3xnVZ7eK67m4h'
response = self.client.Charge.create(self.cash_charge_object)
assert 'id' in response.parseJSON()
def test_bank_charge_done(self):
self.client.api_key = '1tv5yJp3xnVZ7eK67m4h'
response = self.client.Charge.create(self.bank_charge_object)
assert 'id' in response.parseJSON()
def test_card_charge_authentication_fail(self):
self.client.api_key = ''
response = self.client.Charge.create(self.card_charge_object)
assert 'authentication_error' == response.parseJSON()['error']['type']
def test_cash_charge_authentication_fail(self):
self.client.api_key = ''
response = self.client.Charge.create(self.cash_charge_object)
assert 'authentication_error' == response.parseJSON()['error']['type']
def test_bank_charge_authentication_fail(self):
self.client.api_key = ''
response = self.client.Charge.create(self.bank_charge_object)
assert 'authentication_error' == response.parseJSON()['error']['type']
class OrdersEndpointTestCaseWithCustomAPIKey(BaseEndpointTestCase):
""" The same tests as in OrdersEndpointTestCase but not
setting the API key globally rather than using it differently
for each call """
def test_card_charge_done(self):
self.client.api_key = ''
response = self.client.Charge.create(self.card_charge_object, other_api_key='1tv5yJp3xnVZ7eK67m4h')
assert 'id' in response.parseJSON()
def test_cash_charge_done(self):
self.client.api_key = ''
response = self.client.Charge.create(self.cash_charge_object, other_api_key='1tv5yJp3xnVZ7eK67m4h')
assert 'id' in response.parseJSON()
def test_bank_charge_done(self):
self.client.api_key = ''
response = self.client.Charge.create(self.bank_charge_object, other_api_key='1tv5yJp3xnVZ7eK67m4h')
assert 'id' in response.parseJSON()
def test_card_charge_authentication_fail(self):
self.client.api_key = '1tv5yJp3xnVZ7eK67m4h'
response = self.client.Charge.create(self.card_charge_object, other_api_key='')
assert 'authentication_error' == response.parseJSON()['error']['type']
def test_cash_charge_authentication_fail(self):
self.client.api_key = '1tv5yJp3xnVZ7eK67m4h'
response = self.client.Charge.create(self.cash_charge_object, other_api_key='')
assert 'authentication_error' == response.parseJSON()['error']['type']
def test_bank_charge_authentication_fail(self):
self.client.api_key = '1tv5yJp3xnVZ7eK67m4h'
response = self.client.Charge.create(self.bank_charge_object, other_api_key='')
assert 'authentication_error' == response.parseJSON()['error']['type']
|
mit
|
jburger424/MediaQueueHCI
|
m-q-env/lib/python3.4/site-packages/flask_script/commands.py
|
56
|
18544
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import,print_function
import os
import sys
import code
import warnings
import string
import inspect
import argparse
from flask import _request_ctx_stack
from .cli import prompt, prompt_pass, prompt_bool, prompt_choices
from ._compat import izip, text_type
class InvalidCommand(Exception):
"""\
This is a generic error for "bad" commands.
It is not used in Flask-Script itself, but you should throw
this error (or one derived from it) in your command handlers,
and your main code should display this error's message without
a stack trace.
This way, we maintain interoperability if some other plug-in code
supplies Flask-Script hooks.
"""
pass
class Group(object):
"""
Stores argument groups and mutually exclusive groups for
`ArgumentParser.add_argument_group <http://argparse.googlecode.com/svn/trunk/doc/other-methods.html#argument-groups>`
or `ArgumentParser.add_mutually_exclusive_group <http://argparse.googlecode.com/svn/trunk/doc/other-methods.html#add_mutually_exclusive_group>`.
Note: The title and description params cannot be used with the exclusive
or required params.
:param options: A list of Option classes to add to this group
:param title: A string to use as the title of the argument group
:param description: A string to use as the description of the argument
group
:param exclusive: A boolean indicating if this is an argument group or a
mutually exclusive group
:param required: A boolean indicating if this mutually exclusive group
must have an option selected
"""
def __init__(self, *options, **kwargs):
self.option_list = options
self.title = kwargs.pop("title", None)
self.description = kwargs.pop("description", None)
self.exclusive = kwargs.pop("exclusive", None)
self.required = kwargs.pop("required", None)
if ((self.title or self.description) and
(self.required or self.exclusive)):
raise TypeError("title and/or description cannot be used with "
"required and/or exclusive.")
super(Group, self).__init__(**kwargs)
def get_options(self):
"""
By default, returns self.option_list. Override if you
need to do instance-specific configuration.
"""
return self.option_list
class Option(object):
"""
Stores positional and optional arguments for `ArgumentParser.add_argument
<http://argparse.googlecode.com/svn/trunk/doc/add_argument.html>`_.
:param name_or_flags: Either a name or a list of option strings,
e.g. foo or -f, --foo
:param action: The basic type of action to be taken when this argument
is encountered at the command-line.
:param nargs: The number of command-line arguments that should be consumed.
:param const: A constant value required by some action and nargs selections.
:param default: The value produced if the argument is absent from
the command-line.
:param type: The type to which the command-line arg should be converted.
:param choices: A container of the allowable values for the argument.
:param required: Whether or not the command-line option may be omitted
(optionals only).
:param help: A brief description of what the argument does.
:param metavar: A name for the argument in usage messages.
:param dest: The name of the attribute to be added to the object
returned by parse_args().
"""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
class Command(object):
"""
Base class for creating commands.
:param func: Initialize this command by introspecting the function.
"""
option_list = ()
help_args = None
def __init__(self, func=None):
if func is None:
if not self.option_list:
self.option_list = []
return
args, varargs, keywords, defaults = inspect.getargspec(func)
if inspect.ismethod(func):
args = args[1:]
options = []
# first arg is always "app" : ignore
defaults = defaults or []
kwargs = dict(izip(*[reversed(l) for l in (args, defaults)]))
for arg in args:
if arg in kwargs:
default = kwargs[arg]
if isinstance(default, bool):
options.append(Option('-%s' % arg[0],
'--%s' % arg,
action="store_true",
dest=arg,
required=False,
default=default))
else:
options.append(Option('-%s' % arg[0],
'--%s' % arg,
dest=arg,
type=text_type,
required=False,
default=default))
else:
options.append(Option(arg, type=text_type))
self.run = func
self.__doc__ = func.__doc__
self.option_list = options
@property
def description(self):
description = self.__doc__ or ''
return description.strip()
def add_option(self, option):
"""
Adds Option to option list.
"""
self.option_list.append(option)
def get_options(self):
"""
By default, returns self.option_list. Override if you
need to do instance-specific configuration.
"""
return self.option_list
def create_parser(self, *args, **kwargs):
func_stack = kwargs.pop('func_stack',())
parent = kwargs.pop('parent',None)
parser = argparse.ArgumentParser(*args, add_help=False, **kwargs)
help_args = self.help_args
while help_args is None and parent is not None:
help_args = parent.help_args
parent = getattr(parent,'parent',None)
if help_args:
from flask_script import add_help
add_help(parser,help_args)
for option in self.get_options():
if isinstance(option, Group):
if option.exclusive:
group = parser.add_mutually_exclusive_group(
required=option.required,
)
else:
group = parser.add_argument_group(
title=option.title,
description=option.description,
)
for opt in option.get_options():
group.add_argument(*opt.args, **opt.kwargs)
else:
parser.add_argument(*option.args, **option.kwargs)
parser.set_defaults(func_stack=func_stack+(self,))
self.parser = parser
self.parent = parent
return parser
def __call__(self, app=None, *args, **kwargs):
"""
Handles the command with the given app.
Default behaviour is to call ``self.run`` within a test request context.
"""
with app.test_request_context():
return self.run(*args, **kwargs)
def run(self):
"""
Runs a command. This must be implemented by the subclass. Should take
arguments as configured by the Command options.
"""
raise NotImplementedError
class Shell(Command):
"""
Runs a Python shell inside Flask application context.
:param banner: banner appearing at top of shell when started
:param make_context: a callable returning a dict of variables
used in the shell namespace. By default
returns a dict consisting of just the app.
:param use_bpython: use BPython shell if available, ignore if not.
The BPython shell can be turned off in command
line by passing the **--no-bpython** flag.
:param use_ipython: use IPython shell if available, ignore if not.
The IPython shell can be turned off in command
line by passing the **--no-ipython** flag.
"""
banner = ''
help = description = 'Runs a Python shell inside Flask application context.'
def __init__(self, banner=None, make_context=None, use_ipython=True,
use_bpython=True):
self.banner = banner or self.banner
self.use_ipython = use_ipython
self.use_bpython = use_bpython
if make_context is None:
make_context = lambda: dict(app=_request_ctx_stack.top.app)
self.make_context = make_context
def get_options(self):
return (
Option('--no-ipython',
action="store_true",
dest='no_ipython',
default=not(self.use_ipython),
help="Do not use the BPython shell"),
Option('--no-bpython',
action="store_true",
dest='no_bpython',
default=not(self.use_bpython),
help="Do not use the IPython shell"),
)
def get_context(self):
"""
Returns a dict of context variables added to the shell namespace.
"""
return self.make_context()
def run(self, no_ipython, no_bpython):
"""
Runs the shell. If no_bpython is False or use_bpython is True, then
a BPython shell is run (if installed). Else, if no_ipython is False or
use_python is True then a IPython shell is run (if installed).
"""
context = self.get_context()
if not no_bpython:
# Try BPython
try:
from bpython import embed
embed(banner=self.banner, locals_=context)
return
except ImportError:
pass
if not no_ipython:
# Try IPython
try:
try:
# 0.10.x
from IPython.Shell import IPShellEmbed
ipshell = IPShellEmbed(banner=self.banner)
ipshell(global_ns=dict(), local_ns=context)
except ImportError:
# 0.12+
from IPython import embed
embed(banner1=self.banner, user_ns=context)
return
except ImportError:
pass
# Use basic python shell
code.interact(self.banner, local=context)
class Server(Command):
"""
Runs the Flask development server i.e. app.run()
:param host: server host
:param port: server port
:param use_debugger: Flag whether to default to using the Werkzeug debugger.
This can be overriden in the command line
by passing the **-d** or **-D** flag.
Defaults to False, for security.
:param use_reloader: Flag whether to use the auto-reloader.
Default to True when debugging.
This can be overriden in the command line by
passing the **-r**/**-R** flag.
:param threaded: should the process handle each request in a separate
thread?
:param processes: number of processes to spawn
:param passthrough_errors: disable the error catching. This means that the server will die on errors but it can be useful to hook debuggers in (pdb etc.)
:param options: :func:`werkzeug.run_simple` options.
"""
help = description = 'Runs the Flask development server i.e. app.run()'
def __init__(self, host='127.0.0.1', port=5000, use_debugger=None,
use_reloader=None, threaded=False, processes=1,
passthrough_errors=False, **options):
self.port = port
self.host = host
self.use_debugger = use_debugger
self.use_reloader = use_reloader if use_reloader is not None else use_debugger
self.server_options = options
self.threaded = threaded
self.processes = processes
self.passthrough_errors = passthrough_errors
def get_options(self):
options = (
Option('-h', '--host',
dest='host',
default=self.host),
Option('-p', '--port',
dest='port',
type=int,
default=self.port),
Option('--threaded',
dest='threaded',
action='store_true',
default=self.threaded),
Option('--processes',
dest='processes',
type=int,
default=self.processes),
Option('--passthrough-errors',
action='store_true',
dest='passthrough_errors',
default=self.passthrough_errors),
Option('-d', '--debug',
action='store_true',
dest='use_debugger',
help='enable the Werkzeug debugger (DO NOT use in production code)',
default=self.use_debugger),
Option('-D', '--no-debug',
action='store_false',
dest='use_debugger',
help='disable the Werkzeug debugger',
default=self.use_debugger),
Option('-r', '--reload',
action='store_true',
dest='use_reloader',
help='monitor Python files for changes (not 100% safe for production use)',
default=self.use_reloader),
Option('-R', '--no-reload',
action='store_false',
dest='use_reloader',
help='do not monitor Python files for changes',
default=self.use_reloader),
)
return options
def __call__(self, app, host, port, use_debugger, use_reloader,
threaded, processes, passthrough_errors):
# we don't need to run the server in request context
# so just run it directly
if use_debugger is None:
use_debugger = app.debug
if use_debugger is None:
use_debugger = True
if sys.stderr.isatty():
print("Debugging is on. DANGER: Do not allow random users to connect to this server.", file=sys.stderr)
if use_reloader is None:
use_reloader = app.debug
app.run(host=host,
port=port,
debug=use_debugger,
use_debugger=use_debugger,
use_reloader=use_reloader,
threaded=threaded,
processes=processes,
passthrough_errors=passthrough_errors,
**self.server_options)
class Clean(Command):
"Remove *.pyc and *.pyo files recursively starting at current directory"
def run(self):
for dirpath, dirnames, filenames in os.walk('.'):
for filename in filenames:
if filename.endswith('.pyc') or filename.endswith('.pyo'):
full_pathname = os.path.join(dirpath, filename)
print('Removing %s' % full_pathname)
os.remove(full_pathname)
class ShowUrls(Command):
"""
Displays all of the url matching routes for the project
"""
def __init__(self, order='rule'):
self.order = order
def get_options(self):
return (
Option('url',
nargs='?',
help='Url to test (ex. /static/image.png)'),
Option('--order',
dest='order',
default=self.order,
help='Property on Rule to order by (default: %s)' % self.order)
)
return options
def run(self, url, order):
from flask import current_app
from werkzeug.exceptions import NotFound, MethodNotAllowed
rows = []
column_length = 0
column_headers = ('Rule', 'Endpoint', 'Arguments')
if url:
try:
rule, arguments = current_app.url_map \
.bind('localhost') \
.match(url, return_rule=True)
rows.append((rule.rule, rule.endpoint, arguments))
column_length = 3
except (NotFound, MethodNotAllowed) as e:
rows.append(("<%s>" % e, None, None))
column_length = 1
else:
rules = sorted(current_app.url_map.iter_rules(), key=lambda rule: getattr(rule, order))
for rule in rules:
rows.append((rule.rule, rule.endpoint, None))
column_length = 2
str_template = ''
table_width = 0
if column_length >= 1:
max_rule_length = max(len(r[0]) for r in rows)
max_rule_length = max_rule_length if max_rule_length > 4 else 4
str_template += '%-' + str(max_rule_length) + 's'
table_width += max_rule_length
if column_length >= 2:
max_endpoint_length = max(len(str(r[1])) for r in rows)
# max_endpoint_length = max(rows, key=len)
max_endpoint_length = max_endpoint_length if max_endpoint_length > 8 else 8
str_template += ' %-' + str(max_endpoint_length) + 's'
table_width += 2 + max_endpoint_length
if column_length >= 3:
max_arguments_length = max(len(str(r[2])) for r in rows)
max_arguments_length = max_arguments_length if max_arguments_length > 9 else 9
str_template += ' %-' + str(max_arguments_length) + 's'
table_width += 2 + max_arguments_length
print(str_template % (column_headers[:column_length]))
print('-' * table_width)
for row in rows:
print(str_template % row[:column_length])
|
mit
|
EasyList-Lithuania/easylist_lithuania
|
tools/addChecksum.py
|
1
|
2767
|
#!/usr/bin/env python
# coding: utf-8
# This file is part of Adblock Plus <http://adblockplus.org/>,
# Copyright (C) 2006-2014 Eyeo GmbH
#
# Adblock Plus is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# Adblock Plus is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Adblock Plus. If not, see <http://www.gnu.org/licenses/>.
#############################################################################
# This is a reference script to add checksums to downloadable #
# subscriptions. The checksum will be validated by Adblock Plus on download #
# and checksum mismatches (broken downloads) will be rejected. #
# #
# To add a checksum to a subscription file, run the script like this: #
# #
# python addChecksum.py < subscription.txt > subscriptionSigned.txt #
# #
# Note: your subscription file should be saved in UTF-8 encoding, otherwise #
# the operation will fail. #
# #
#############################################################################
import sys, re, codecs, hashlib, base64
checksumRegexp = re.compile(r'^\s*!\s*checksum[\s\-:]+([\w\+\/=]+).*\n', re.I | re.M)
def addChecksum(data):
checksum = calculateChecksum(data)
data = re.sub(checksumRegexp, '', data)
data = re.sub(r'(\r?\n)', r'\1! Checksum: %s\1' % checksum, data, 1)
return data
def calculateChecksum(data):
md5 = hashlib.md5()
md5.update(normalize(data).encode('utf-8'))
return base64.b64encode(md5.digest()).rstrip('=')
def normalize(data):
data = re.sub(r'\r', '', data)
data = re.sub(r'\n+', '\n', data)
data = re.sub(checksumRegexp, '', data)
return data
def readStream(stream):
reader = codecs.getreader('utf8')(stream)
try:
return reader.read()
except Exception, e:
raise Exception('Failed reading data, most likely not encoded as UTF-8:\n%s' % e)
if __name__ == '__main__':
if sys.platform == "win32":
import os, msvcrt
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
data = addChecksum(readStream(sys.stdin))
sys.stdout.write(data.encode('utf-8'))
|
gpl-3.0
|
holmes/intellij-community
|
python/helpers/docutils/readers/__init__.py
|
70
|
3265
|
# $Id: __init__.py 5618 2008-07-28 08:37:32Z strank $
# Authors: David Goodger <[email protected]>; Ueli Schlaepfer
# Copyright: This module has been placed in the public domain.
"""
This package contains Docutils Reader modules.
"""
__docformat__ = 'reStructuredText'
from docutils import utils, parsers, Component
from docutils.transforms import universal
class Reader(Component):
"""
Abstract base class for docutils Readers.
Each reader module or package must export a subclass also called 'Reader'.
The two steps of a Reader's responsibility are `scan()` and
`parse()`. Call `read()` to process a document.
"""
component_type = 'reader'
config_section = 'readers'
def get_transforms(self):
return Component.get_transforms(self) + [
universal.Decorations,
universal.ExposeInternals,
universal.StripComments,]
def __init__(self, parser=None, parser_name=None):
"""
Initialize the Reader instance.
Several instance attributes are defined with dummy initial values.
Subclasses may use these attributes as they wish.
"""
self.parser = parser
"""A `parsers.Parser` instance shared by all doctrees. May be left
unspecified if the document source determines the parser."""
if parser is None and parser_name:
self.set_parser(parser_name)
self.source = None
"""`docutils.io` IO object, source of input data."""
self.input = None
"""Raw text input; either a single string or, for more complex cases,
a collection of strings."""
def set_parser(self, parser_name):
"""Set `self.parser` by name."""
parser_class = parsers.get_parser_class(parser_name)
self.parser = parser_class()
def read(self, source, parser, settings):
self.source = source
if not self.parser:
self.parser = parser
self.settings = settings
self.input = self.source.read()
self.parse()
return self.document
def parse(self):
"""Parse `self.input` into a document tree."""
self.document = document = self.new_document()
self.parser.parse(self.input, document)
document.current_source = document.current_line = None
def new_document(self):
"""Create and return a new empty document tree (root node)."""
document = utils.new_document(self.source.source_path, self.settings)
return document
class ReReader(Reader):
"""
A reader which rereads an existing document tree (e.g. a
deserializer).
Often used in conjunction with `writers.UnfilteredWriter`.
"""
def get_transforms(self):
# Do not add any transforms. They have already been applied
# by the reader which originally created the document.
return Component.get_transforms(self)
_reader_aliases = {}
def get_reader_class(reader_name):
"""Return the Reader class from the `reader_name` module."""
reader_name = reader_name.lower()
if reader_name in _reader_aliases:
reader_name = _reader_aliases[reader_name]
module = __import__(reader_name, globals(), locals())
return module.Reader
|
apache-2.0
|
cevaris/pants
|
tests/python/pants_test/build_graph/test_build_file_parser.py
|
3
|
14916
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from collections import namedtuple
from textwrap import dedent
from pants.base.build_file import BuildFile
from pants.base.file_system_project_tree import FileSystemProjectTree
from pants.build_graph.address import BuildFileAddress
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.build_graph.build_file_parser import BuildFileParser
from pants.build_graph.target import Target
from pants.util.strutil import ensure_binary
from pants_test.base_test import BaseTest
# TODO(Eric Ayers) Explicit unit tests are missing for registered_alises, parse_spec,
# parse_build_file_family
class ErrorTarget(Target):
def __init__(self, *args, **kwargs):
assert False, "This fake target should never be initialized in this test!"
class BuildFileParserBasicsTest(BaseTest):
@property
def alias_groups(self):
return BuildFileAliases(targets={'jvm_binary': ErrorTarget,
'java_library': ErrorTarget})
def create_buildfile(self, path):
return BuildFile(FileSystemProjectTree(self.build_root), path)
def assert_parser_error(self, build_file, err_string):
with self.assertRaises(BuildFileParser.BuildFileParserError) as ctx:
self.build_file_parser.parse_build_file(build_file)
self.assertIn(err_string, str(ctx.exception))
def test_name_injection(self):
# Target with no name is fine: target gets name of directory.
self.add_to_build_file('foo/bar/BUILD', '\njava_library()\n')
build_file = self.create_buildfile('foo/bar/BUILD')
addresses = list(self.build_file_parser.parse_build_file(build_file).keys())
self.assertEqual(1, len(addresses))
self.assertEqual('bar', addresses[0].target_name)
# Two targets with no name in the same BUILD file cause an error.
self.add_to_build_file('foo/bar/BUILD', '\njvm_binary()\n')
self.assert_parser_error(build_file, "defines address 'bar' more than once")
# Test that omitting the name in a target at the build root is disallowed.
self.add_to_build_file('BUILD', '\njava_library()\n')
build_file = self.create_buildfile('BUILD')
self.assert_parser_error(build_file,
"Targets in root-level BUILD files must be named explicitly")
def test_addressable_exceptions(self):
self.add_to_build_file('b/BUILD', 'java_library(name="foo", "bad_arg")')
build_file_b = self.create_buildfile('b/BUILD')
self.assert_parser_error(build_file_b,
'non-keyword arg after keyword arg')
self.add_to_build_file('d/BUILD', dedent(
"""
java_library(
name="foo",
dependencies=[
object(),
]
)
"""
))
build_file_d = self.create_buildfile('d/BUILD')
self.assert_parser_error(build_file_d,
'dependencies passed to Target constructors must be strings')
def test_noop_parse(self):
self.add_to_build_file('BUILD', '')
build_file = self.create_buildfile('BUILD')
address_map = set(self.build_file_parser.parse_build_file(build_file))
self.assertEqual(len(address_map), 0)
def test_invalid_unicode_in_build_file(self):
"""Demonstrate that unicode characters causing parse errors raise real parse errors."""
self.add_to_build_file('BUILD', ensure_binary(dedent(
"""
jvm_binary(name = ‘hello’, # Parse error due to smart quotes (non ascii characters)
source = 'HelloWorld.java'
main = 'foo.HelloWorld',
)
"""
)))
build_file = self.create_buildfile('BUILD')
self.assert_parser_error(build_file, 'invalid syntax')
def test_unicode_string_in_build_file(self):
"""Demonstrates that a string containing unicode should work in a BUILD file."""
self.add_to_build_file('BUILD', ensure_binary(dedent(
"""
java_library(
name='foo',
sources=['א.java']
)
"""
)))
build_file = self.create_buildfile('BUILD')
self.build_file_parser.parse_build_file(build_file)
class BuildFileParserTargetTest(BaseTest):
@property
def alias_groups(self):
return BuildFileAliases(targets={'fake': ErrorTarget})
def create_buildfile(self, path):
return BuildFile(FileSystemProjectTree(self.build_root), path)
def test_trivial_target(self):
self.add_to_build_file('BUILD', 'fake(name="foozle")')
build_file = self.create_buildfile('BUILD')
address_map = self.build_file_parser.parse_build_file(build_file)
self.assertEqual(len(address_map), 1)
address, proxy = address_map.popitem()
self.assertEqual(address, BuildFileAddress(build_file, 'foozle'))
self.assertEqual(proxy.addressed_name, 'foozle')
self.assertEqual(proxy.addressed_type, ErrorTarget)
def test_sibling_build_files(self):
self.add_to_build_file('BUILD', dedent(
"""
fake(name="base",
dependencies=[
':foo',
])
"""))
self.add_to_build_file('BUILD.foo', dedent(
"""
fake(name="foo",
dependencies=[
':bat',
])
"""))
self.add_to_build_file('./BUILD.bar', dedent(
"""
fake(name="bat")
"""))
bar_build_file = self.create_buildfile('BUILD.bar')
base_build_file = self.create_buildfile('BUILD')
foo_build_file = self.create_buildfile('BUILD.foo')
address_map = self.build_file_parser.address_map_from_build_files(
BuildFile.get_build_files_family(FileSystemProjectTree(self.build_root), "."))
addresses = address_map.keys()
self.assertEqual({bar_build_file, base_build_file, foo_build_file},
set([address.build_file for address in addresses]))
self.assertEqual({'//:base', '//:foo', '//:bat'},
set([address.spec for address in addresses]))
def test_build_file_duplicates(self):
# This workspace has two targets in the same file with the same name.
self.add_to_build_file('BUILD', 'fake(name="foo")\n')
self.add_to_build_file('BUILD', 'fake(name="foo")\n')
with self.assertRaises(BuildFileParser.AddressableConflictException):
base_build_file = self.create_buildfile('BUILD')
self.build_file_parser.parse_build_file(base_build_file)
def test_sibling_build_files_duplicates(self):
# This workspace is malformed, you can't shadow a name in a sibling BUILD file
self.add_to_build_file('BUILD', dedent(
"""
fake(name="base",
dependencies=[
':foo',
])
"""))
self.add_to_build_file('BUILD.foo', dedent(
"""
fake(name="foo",
dependencies=[
':bat',
])
"""))
self.add_to_build_file('./BUILD.bar', dedent(
"""
fake(name="base")
"""))
with self.assertRaises(BuildFileParser.SiblingConflictException):
self.build_file_parser.address_map_from_build_files(
BuildFile.get_build_files_family(FileSystemProjectTree(self.build_root), '.'))
class BuildFileParserExposedObjectTest(BaseTest):
@property
def alias_groups(self):
return BuildFileAliases(objects={'fake_object': object()})
def test_exposed_object(self):
self.add_to_build_file('BUILD', """fake_object""")
build_file = BuildFile(FileSystemProjectTree(self.build_root), 'BUILD')
address_map = self.build_file_parser.parse_build_file(build_file)
self.assertEqual(len(address_map), 0)
class BuildFileParserExposedContextAwareObjectFactoryTest(BaseTest):
Jar = namedtuple('Jar', ['org', 'name', 'rev'])
Repository = namedtuple('Repository', ['name', 'url', 'push_db_basedir'])
Artifact = namedtuple('Artifact', ['org', 'name', 'repo'])
class JarLibrary(Target):
def __init__(self, jars=None, **kwargs):
super(BuildFileParserExposedContextAwareObjectFactoryTest.JarLibrary, self).__init__(**kwargs)
self.jars = jars or []
class JvmLibrary(Target):
def __init__(self, provides=None, **kwargs):
super(BuildFileParserExposedContextAwareObjectFactoryTest.JvmLibrary, self).__init__(**kwargs)
self.provides = provides
class JavaLibrary(JvmLibrary):
pass
class ScalaLibrary(JvmLibrary):
pass
@classmethod
def make_lib(cls, parse_context):
def real_make_lib(org, name, rev):
dep = parse_context.create_object('jar', org=org, name=name, rev=rev)
parse_context.create_object('jar_library', name=name, jars=[dep])
return real_make_lib
@classmethod
def create_java_libraries(cls, parse_context):
def real_create_java_libraries(base_name,
org='com.twitter',
provides_java_name=None,
provides_scala_name=None):
def provides_artifact(provides_name):
if provides_name is None:
return None
jvm_repo = cls.Repository(
name='maven-central',
url='http://maven.example.com',
push_db_basedir=os.path.join('build-support', 'ivy', 'pushdb'),
)
return parse_context.create_object('artifact',
org=org,
name=provides_name,
repo=jvm_repo)
parse_context.create_object('java_library',
name='{}-java'.format(base_name),
provides=provides_artifact(provides_java_name))
parse_context.create_object('scala_library',
name='{}-scala'.format(base_name),
provides=provides_artifact(provides_scala_name))
return real_create_java_libraries
def setUp(self):
super(BuildFileParserExposedContextAwareObjectFactoryTest, self).setUp()
self._paths = set()
def path_relative_util(self, parse_context):
def real_path_relative_util(path):
self._paths.add(os.path.join(parse_context.rel_path, path))
return real_path_relative_util
@property
def alias_groups(self):
return BuildFileAliases(
targets={
'jar_library': self.JarLibrary,
'java_library': self.JavaLibrary,
'scala_library': self.ScalaLibrary,
},
context_aware_object_factories={
'make_lib': self.make_lib,
'create_java_libraries': self.create_java_libraries,
'path_util': self.path_relative_util,
},
objects={
'artifact': self.Artifact,
'jar': self.Jar,
}
)
def test_context_aware_object_factories(self):
contents = dedent("""
create_java_libraries(base_name="create-java-libraries",
provides_java_name="test-java",
provides_scala_name="test-scala")
make_lib("com.foo.test", "does_not_exists", "1.0")
path_util("baz")
""")
self.create_file('3rdparty/BUILD', contents)
build_file = BuildFile(FileSystemProjectTree(self.build_root), '3rdparty/BUILD')
address_map = self.build_file_parser.parse_build_file(build_file)
registered_proxies = set(address_map.values())
self.assertEqual(len(registered_proxies), 3)
targets_created = {}
for target_proxy in registered_proxies:
targets_created[target_proxy.addressed_name] = target_proxy.addressed_type
self.assertEqual({'does_not_exists',
'create-java-libraries-scala',
'create-java-libraries-java'},
set(targets_created.keys()))
self.assertEqual(targets_created['does_not_exists'], self.JarLibrary)
self.assertEqual(targets_created['create-java-libraries-java'], self.JavaLibrary)
self.assertEqual(targets_created['create-java-libraries-scala'], self.ScalaLibrary)
self.assertEqual({'3rdparty/baz'}, self._paths)
def test_raises_parse_error(self):
self.add_to_build_file('BUILD', 'foo(name = = "baz")')
build_file = BuildFile(FileSystemProjectTree(self.build_root), 'BUILD')
with self.assertRaises(BuildFileParser.ParseError):
self.build_file_parser.parse_build_file(build_file)
# Test some corner cases for the context printing
# Error at beginning of BUILD file
build_file = self.add_to_build_file('begin/BUILD', dedent("""
*?&INVALID! = 'foo'
target(
name='bar',
dependencies= [
':baz',
],
)
"""))
with self.assertRaises(BuildFileParser.ParseError):
self.build_file_parser.parse_build_file(build_file)
# Error at end of BUILD file
build_file = self.add_to_build_file('end/BUILD', dedent("""
target(
name='bar',
dependencies= [
':baz',
],
)
*?&INVALID! = 'foo'
"""))
with self.assertRaises(BuildFileParser.ParseError):
self.build_file_parser.parse_build_file(build_file)
# Error in the middle of BUILD file > 6 lines
build_file = self.add_to_build_file('middle/BUILD', dedent("""
target(
name='bar',
*?&INVALID! = 'foo'
dependencies = [
':baz',
],
)
"""))
with self.assertRaises(BuildFileParser.ParseError):
self.build_file_parser.parse_build_file(build_file)
# Error in very short build file.
build_file = self.add_to_build_file('short/BUILD', dedent("""
target(name='bar', dependencies = [':baz'],) *?&INVALID! = 'foo'
"""))
with self.assertRaises(BuildFileParser.ParseError):
self.build_file_parser.parse_build_file(build_file)
def test_raises_execute_error(self):
self.add_to_build_file('BUILD', 'undefined_alias(name="baz")')
build_file = BuildFile(FileSystemProjectTree(self.build_root), 'BUILD')
with self.assertRaises(BuildFileParser.ExecuteError):
self.build_file_parser.parse_build_file(build_file)
def test_build_file_parser_error_hierarcy(self):
"""Exception handling code depends on the fact that all explicit exceptions from BuildFileParser
are subclassed from the BuildFileParserError base class.
"""
def assert_build_file_parser_error(e):
self.assertIsInstance(e, BuildFileParser.BuildFileParserError)
assert_build_file_parser_error(BuildFileParser.BuildFileScanError())
assert_build_file_parser_error(BuildFileParser.AddressableConflictException())
assert_build_file_parser_error(BuildFileParser.SiblingConflictException())
assert_build_file_parser_error(BuildFileParser.ParseError())
assert_build_file_parser_error(BuildFileParser.ExecuteError())
|
apache-2.0
|
n0m4dz/odoo
|
addons/delivery/sale.py
|
42
|
4541
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
class sale_order_line(osv.Model):
_inherit = 'sale.order.line'
_columns = {
'is_delivery': fields.boolean("Is a Delivery"),
}
_defaults = {
'is_delivery': False
}
class sale_order(osv.Model):
_inherit = 'sale.order'
_columns = {
'carrier_id': fields.many2one(
"delivery.carrier", string="Delivery Method",
help="Complete this field if you plan to invoice the shipping based on picking."),
}
def onchange_partner_id(self, cr, uid, ids, part, context=None):
result = super(sale_order, self).onchange_partner_id(cr, uid, ids, part, context=context)
if part:
dtype = self.pool.get('res.partner').browse(cr, uid, part, context=context).property_delivery_carrier.id
# TDE NOTE: not sure the aded 'if dtype' is valid
if dtype:
result['value']['carrier_id'] = dtype
return result
def _delivery_unset(self, cr, uid, ids, context=None):
sale_obj = self.pool['sale.order.line']
line_ids = sale_obj.search(cr, uid, [('order_id', 'in', ids), ('is_delivery', '=', True)],context=context)
sale_obj.unlink(cr, uid, line_ids, context=context)
def delivery_set(self, cr, uid, ids, context=None):
line_obj = self.pool.get('sale.order.line')
grid_obj = self.pool.get('delivery.grid')
carrier_obj = self.pool.get('delivery.carrier')
acc_fp_obj = self.pool.get('account.fiscal.position')
self._delivery_unset(cr, uid, ids, context=context)
currency_obj = self.pool.get('res.currency')
line_ids = []
for order in self.browse(cr, uid, ids, context=context):
grid_id = carrier_obj.grid_get(cr, uid, [order.carrier_id.id], order.partner_shipping_id.id)
if not grid_id:
raise osv.except_osv(_('No Grid Available!'), _('No grid matching for this carrier!'))
if order.state not in ('draft', 'sent'):
raise osv.except_osv(_('Order not in Draft State!'), _('The order state have to be draft to add delivery lines.'))
grid = grid_obj.browse(cr, uid, grid_id, context=context)
taxes = grid.carrier_id.product_id.taxes_id.filtered(lambda t: t.company_id.id == order.company_id.id)
fpos = order.fiscal_position or False
taxes_ids = acc_fp_obj.map_tax(cr, uid, fpos, taxes)
price_unit = grid_obj.get_price(cr, uid, grid.id, order, time.strftime('%Y-%m-%d'), context)
if order.company_id.currency_id.id != order.pricelist_id.currency_id.id:
price_unit = currency_obj.compute(cr, uid, order.company_id.currency_id.id, order.pricelist_id.currency_id.id,
price_unit, context=dict(context or {}, date=order.date_order))
values = {
'order_id': order.id,
'name': grid.carrier_id.name,
'product_uom_qty': 1,
'product_uom': grid.carrier_id.product_id.uom_id.id,
'product_id': grid.carrier_id.product_id.id,
'price_unit': price_unit,
'tax_id': [(6, 0, taxes_ids)],
'is_delivery': True,
}
if order.order_line:
values['sequence'] = order.order_line[-1].sequence + 1
line_id = line_obj.create(cr, uid, values, context=context)
line_ids.append(line_id)
return line_ids
|
agpl-3.0
|
idegtiarov/ceilometer
|
ceilometer/storage/sqlalchemy/migrate_repo/versions/044_restore_long_uuid_data_types.py
|
11
|
1630
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData
from sqlalchemy import String
from sqlalchemy import Table
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
resource = Table('resource', meta, autoload=True)
resource.c.user_id.alter(type=String(255))
resource.c.project_id.alter(type=String(255))
resource.c.resource_id.alter(type=String(255))
resource.c.source_id.alter(type=String(255))
sample = Table('sample', meta, autoload=True)
sample.c.message_signature.alter(type=String(64))
sample.c.message_id.alter(type=String(128))
alarm = Table('alarm', meta, autoload=True)
alarm.c.alarm_id.alter(type=String(128))
alarm.c.user_id.alter(type=String(255))
alarm.c.project_id.alter(type=String(255))
alarm_history = Table('alarm_history', meta, autoload=True)
alarm_history.c.alarm_id.alter(type=String(128))
alarm_history.c.user_id.alter(type=String(255))
alarm_history.c.project_id.alter(type=String(255))
alarm_history.c.event_id.alter(type=String(128))
alarm_history.c.on_behalf_of.alter(type=String(255))
|
apache-2.0
|
marscher/cython
|
Cython/Build/BuildExecutable.py
|
27
|
4322
|
"""
Compile a Python script into an executable that embeds CPython and run it.
Requires CPython to be built as a shared library ('libpythonX.Y').
Basic usage:
python cythonrun somefile.py [ARGS]
"""
from __future__ import absolute_import
DEBUG = True
import sys
import os
from distutils import sysconfig
def get_config_var(name, default=''):
return sysconfig.get_config_var(name) or default
INCDIR = sysconfig.get_python_inc()
LIBDIR1 = get_config_var('LIBDIR')
LIBDIR2 = get_config_var('LIBPL')
PYLIB = get_config_var('LIBRARY')
PYLIB_DYN = get_config_var('LDLIBRARY')
if PYLIB_DYN == PYLIB:
# no shared library
PYLIB_DYN = ''
else:
PYLIB_DYN = os.path.splitext(PYLIB_DYN[3:])[0] # 'lib(XYZ).so' -> XYZ
CC = get_config_var('CC', os.environ.get('CC', ''))
CFLAGS = get_config_var('CFLAGS') + ' ' + os.environ.get('CFLAGS', '')
LINKCC = get_config_var('LINKCC', os.environ.get('LINKCC', CC))
LINKFORSHARED = get_config_var('LINKFORSHARED')
LIBS = get_config_var('LIBS')
SYSLIBS = get_config_var('SYSLIBS')
EXE_EXT = sysconfig.get_config_var('EXE')
def _debug(msg, *args):
if DEBUG:
if args:
msg = msg % args
sys.stderr.write(msg + '\n')
def dump_config():
_debug('INCDIR: %s', INCDIR)
_debug('LIBDIR1: %s', LIBDIR1)
_debug('LIBDIR2: %s', LIBDIR2)
_debug('PYLIB: %s', PYLIB)
_debug('PYLIB_DYN: %s', PYLIB_DYN)
_debug('CC: %s', CC)
_debug('CFLAGS: %s', CFLAGS)
_debug('LINKCC: %s', LINKCC)
_debug('LINKFORSHARED: %s', LINKFORSHARED)
_debug('LIBS: %s', LIBS)
_debug('SYSLIBS: %s', SYSLIBS)
_debug('EXE_EXT: %s', EXE_EXT)
def runcmd(cmd, shell=True):
if shell:
cmd = ' '.join(cmd)
_debug(cmd)
else:
_debug(' '.join(cmd))
try:
import subprocess
except ImportError: # Python 2.3 ...
returncode = os.system(cmd)
else:
returncode = subprocess.call(cmd, shell=shell)
if returncode:
sys.exit(returncode)
def clink(basename):
runcmd([LINKCC, '-o', basename + EXE_EXT, basename+'.o', '-L'+LIBDIR1, '-L'+LIBDIR2]
+ [PYLIB_DYN and ('-l'+PYLIB_DYN) or os.path.join(LIBDIR1, PYLIB)]
+ LIBS.split() + SYSLIBS.split() + LINKFORSHARED.split())
def ccompile(basename):
runcmd([CC, '-c', '-o', basename+'.o', basename+'.c', '-I' + INCDIR] + CFLAGS.split())
def cycompile(input_file, options=()):
from ..Compiler import Version, CmdLine, Main
options, sources = CmdLine.parse_command_line(list(options or ()) + ['--embed', input_file])
_debug('Using Cython %s to compile %s', Version.version, input_file)
result = Main.compile(sources, options)
if result.num_errors > 0:
sys.exit(1)
def exec_file(program_name, args=()):
runcmd([os.path.abspath(program_name)] + list(args), shell=False)
def build(input_file, compiler_args=(), force=False):
"""
Build an executable program from a Cython module.
Returns the name of the executable file.
"""
basename = os.path.splitext(input_file)[0]
exe_file = basename + EXE_EXT
if not force and os.path.abspath(exe_file) == os.path.abspath(input_file):
raise ValueError("Input and output file names are the same, refusing to overwrite")
if (not force and os.path.exists(exe_file) and os.path.exists(input_file)
and os.path.getmtime(input_file) <= os.path.getmtime(exe_file)):
_debug("File is up to date, not regenerating %s", exe_file)
return exe_file
cycompile(input_file, compiler_args)
ccompile(basename)
clink(basename)
return exe_file
def build_and_run(args):
"""
Build an executable program from a Cython module and runs it.
Arguments after the module name will be passed verbatimely to the
program.
"""
cy_args = []
last_arg = None
for i, arg in enumerate(args):
if arg.startswith('-'):
cy_args.append(arg)
elif last_arg in ('-X', '--directive'):
cy_args.append(arg)
else:
input_file = arg
args = args[i+1:]
break
last_arg = arg
else:
raise ValueError('no input file provided')
program_name = build(input_file, cy_args)
exec_file(program_name, args)
if __name__ == '__main__':
build_and_run(sys.argv[1:])
|
apache-2.0
|
leiferikb/bitpop
|
src/tools/resources/find_unused_resources.py
|
105
|
6465
|
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script searches for unused art assets listed in a .grd file.
It uses git grep to look for references to the IDR resource id or the base
filename. If neither is found, the file is reported unused.
Requires a git checkout. Must be run from your checkout's "src" root.
Example:
cd /work/chrome/src
tools/resources/find_unused_resouces.py ash/resources/ash_resources.grd
"""
__author__ = '[email protected] (James Cook)'
import os
import re
import subprocess
import sys
def GetBaseResourceId(resource_id):
"""Removes common suffixes from a resource ID.
Removes suffixies that may be added by macros like IMAGE_GRID or IMAGE_BORDER.
For example, converts IDR_FOO_LEFT and IDR_FOO_RIGHT to just IDR_FOO.
Args:
resource_id: String resource ID.
Returns:
A string with the base part of the resource ID.
"""
suffixes = [
'_TOP_LEFT', '_TOP', '_TOP_RIGHT',
'_LEFT', '_CENTER', '_RIGHT',
'_BOTTOM_LEFT', '_BOTTOM', '_BOTTOM_RIGHT',
'_TL', '_T', '_TR',
'_L', '_M', '_R',
'_BL', '_B', '_BR']
# Note: This does not check _HOVER, _PRESSED, _HOT, etc. as those are never
# used in macros.
for suffix in suffixes:
if resource_id.endswith(suffix):
resource_id = resource_id[:-len(suffix)]
return resource_id
def FindFilesWithContents(string_a, string_b):
"""Returns list of paths of files that contain |string_a| or |string_b|.
Uses --name-only to print the file paths. The default behavior of git grep
is to OR together multiple patterns.
Args:
string_a: A string to search for (not a regular expression).
string_b: As above.
Returns:
A list of file paths as strings.
"""
matching_files = subprocess.check_output([
'git', 'grep', '--name-only', '--fixed-strings', '-e', string_a,
'-e', string_b])
files_list = matching_files.split('\n')
# The output ends in a newline, so slice that off.
files_list = files_list[:-1]
return files_list
def GetUnusedResources(grd_filepath):
"""Returns a list of resources that are unused in the code.
Prints status lines to the console because this function is quite slow.
Args:
grd_filepath: Path to a .grd file listing resources.
Returns:
A list of pairs of [resource_id, filepath] for the unused resources.
"""
unused_resources = []
grd_file = open(grd_filepath, 'r')
grd_data = grd_file.read()
print 'Checking:'
# Match the resource id and file path out of substrings like:
# ...name="IDR_FOO_123" file="common/foo.png"...
# by matching between the quotation marks.
pattern = re.compile(
r"""name="([^"]*)" # Match resource ID between quotes.
\s* # Run of whitespace, including newlines.
file="([^"]*)" # Match file path between quotes.""",
re.VERBOSE)
# Use finditer over the file contents because there may be newlines between
# the name and file attributes.
searched = set()
for result in pattern.finditer(grd_data):
# Extract the IDR resource id and file path.
resource_id = result.group(1)
filepath = result.group(2)
filename = os.path.basename(filepath)
base_resource_id = GetBaseResourceId(resource_id)
# Do not bother repeating searches.
key = (base_resource_id, filename)
if key in searched:
continue
searched.add(key)
# Print progress as we go along.
print resource_id
# Ensure the resource isn't used anywhere by checking both for the resource
# id (which should appear in C++ code) and the raw filename (in case the
# file is referenced in a script, test HTML file, etc.).
matching_files = FindFilesWithContents(base_resource_id, filename)
# Each file is matched once in the resource file itself. If there are no
# other matching files, it is unused.
if len(matching_files) == 1:
# Give the user some happy news.
print 'Unused!'
unused_resources.append([resource_id, filepath])
return unused_resources
def GetScaleDirectories(resources_path):
"""Returns a list of paths to per-scale-factor resource directories.
Assumes the directory names end in '_percent', for example,
ash/resources/default_200_percent or
chrome/app/theme/resources/touch_140_percent
Args:
resources_path: The base path of interest.
Returns:
A list of paths relative to the 'src' directory.
"""
file_list = os.listdir(resources_path)
scale_directories = []
for file_entry in file_list:
file_path = os.path.join(resources_path, file_entry)
if os.path.isdir(file_path) and file_path.endswith('_percent'):
scale_directories.append(file_path)
scale_directories.sort()
return scale_directories
def main():
# The script requires exactly one parameter, the .grd file path.
if len(sys.argv) != 2:
print 'Usage: tools/resources/find_unused_resources.py <path/to/grd>'
sys.exit(1)
grd_filepath = sys.argv[1]
# Try to ensure we are in a source checkout.
current_dir = os.getcwd()
if os.path.basename(current_dir) != 'src':
print 'Script must be run in your "src" directory.'
sys.exit(1)
# We require a git checkout to use git grep.
if not os.path.exists(current_dir + '/.git'):
print 'You must use a git checkout for this script to run.'
print current_dir + '/.git', 'not found.'
sys.exit(1)
# Look up the scale-factor directories.
resources_path = os.path.dirname(grd_filepath)
scale_directories = GetScaleDirectories(resources_path)
if not scale_directories:
print 'No scale directories (like "default_100_percent") found.'
sys.exit(1)
# |unused_resources| stores pairs of [resource_id, filepath] for resource ids
# that are not referenced in the code.
unused_resources = GetUnusedResources(grd_filepath)
if not unused_resources:
print 'All resources are used.'
sys.exit(0)
# Dump our output for the user.
print
print 'Unused resource ids:'
for resource_id, filepath in unused_resources:
print resource_id
# Print a list of 'git rm' command lines to remove unused assets.
print
print 'Unused files:'
for resource_id, filepath in unused_resources:
for directory in scale_directories:
print 'git rm ' + os.path.join(directory, filepath)
if __name__ == '__main__':
main()
|
gpl-3.0
|
MinFu/youtube-dl
|
youtube_dl/extractor/freesound.py
|
192
|
1392
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class FreesoundIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?freesound\.org/people/([^/]+)/sounds/(?P<id>[^/]+)'
_TEST = {
'url': 'http://www.freesound.org/people/miklovan/sounds/194503/',
'md5': '12280ceb42c81f19a515c745eae07650',
'info_dict': {
'id': '194503',
'ext': 'mp3',
'title': 'gulls in the city.wav',
'uploader': 'miklovan',
'description': 'the sounds of seagulls in the city',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
music_id = mobj.group('id')
webpage = self._download_webpage(url, music_id)
title = self._html_search_regex(
r'<div id="single_sample_header">.*?<a href="#">(.+?)</a>',
webpage, 'music title', flags=re.DOTALL)
description = self._html_search_regex(
r'<div id="sound_description">(.*?)</div>', webpage, 'description',
fatal=False, flags=re.DOTALL)
return {
'id': music_id,
'title': title,
'url': self._og_search_property('audio', webpage, 'music url'),
'uploader': self._og_search_property('audio:artist', webpage, 'music uploader'),
'description': description,
}
|
unlicense
|
jeezybrick/django
|
django/contrib/gis/geos/coordseq.py
|
374
|
5482
|
"""
This module houses the GEOSCoordSeq object, which is used internally
by GEOSGeometry to house the actual coordinates of the Point,
LineString, and LinearRing geometries.
"""
from ctypes import byref, c_double, c_uint
from django.contrib.gis.geos import prototypes as capi
from django.contrib.gis.geos.base import GEOSBase
from django.contrib.gis.geos.error import GEOSException
from django.contrib.gis.geos.libgeos import CS_PTR
from django.contrib.gis.shortcuts import numpy
from django.utils.six.moves import range
class GEOSCoordSeq(GEOSBase):
"The internal representation of a list of coordinates inside a Geometry."
ptr_type = CS_PTR
def __init__(self, ptr, z=False):
"Initializes from a GEOS pointer."
if not isinstance(ptr, CS_PTR):
raise TypeError('Coordinate sequence should initialize with a CS_PTR.')
self._ptr = ptr
self._z = z
def __iter__(self):
"Iterates over each point in the coordinate sequence."
for i in range(self.size):
yield self[i]
def __len__(self):
"Returns the number of points in the coordinate sequence."
return int(self.size)
def __str__(self):
"Returns the string representation of the coordinate sequence."
return str(self.tuple)
def __getitem__(self, index):
"Returns the coordinate sequence value at the given index."
coords = [self.getX(index), self.getY(index)]
if self.dims == 3 and self._z:
coords.append(self.getZ(index))
return tuple(coords)
def __setitem__(self, index, value):
"Sets the coordinate sequence value at the given index."
# Checking the input value
if isinstance(value, (list, tuple)):
pass
elif numpy and isinstance(value, numpy.ndarray):
pass
else:
raise TypeError('Must set coordinate with a sequence (list, tuple, or numpy array).')
# Checking the dims of the input
if self.dims == 3 and self._z:
n_args = 3
set_3d = True
else:
n_args = 2
set_3d = False
if len(value) != n_args:
raise TypeError('Dimension of value does not match.')
# Setting the X, Y, Z
self.setX(index, value[0])
self.setY(index, value[1])
if set_3d:
self.setZ(index, value[2])
# #### Internal Routines ####
def _checkindex(self, index):
"Checks the given index."
sz = self.size
if (sz < 1) or (index < 0) or (index >= sz):
raise IndexError('invalid GEOS Geometry index: %s' % str(index))
def _checkdim(self, dim):
"Checks the given dimension."
if dim < 0 or dim > 2:
raise GEOSException('invalid ordinate dimension "%d"' % dim)
# #### Ordinate getting and setting routines ####
def getOrdinate(self, dimension, index):
"Returns the value for the given dimension and index."
self._checkindex(index)
self._checkdim(dimension)
return capi.cs_getordinate(self.ptr, index, dimension, byref(c_double()))
def setOrdinate(self, dimension, index, value):
"Sets the value for the given dimension and index."
self._checkindex(index)
self._checkdim(dimension)
capi.cs_setordinate(self.ptr, index, dimension, value)
def getX(self, index):
"Get the X value at the index."
return self.getOrdinate(0, index)
def setX(self, index, value):
"Set X with the value at the given index."
self.setOrdinate(0, index, value)
def getY(self, index):
"Get the Y value at the given index."
return self.getOrdinate(1, index)
def setY(self, index, value):
"Set Y with the value at the given index."
self.setOrdinate(1, index, value)
def getZ(self, index):
"Get Z with the value at the given index."
return self.getOrdinate(2, index)
def setZ(self, index, value):
"Set Z with the value at the given index."
self.setOrdinate(2, index, value)
# ### Dimensions ###
@property
def size(self):
"Returns the size of this coordinate sequence."
return capi.cs_getsize(self.ptr, byref(c_uint()))
@property
def dims(self):
"Returns the dimensions of this coordinate sequence."
return capi.cs_getdims(self.ptr, byref(c_uint()))
@property
def hasz(self):
"""
Returns whether this coordinate sequence is 3D. This property value is
inherited from the parent Geometry.
"""
return self._z
# ### Other Methods ###
def clone(self):
"Clones this coordinate sequence."
return GEOSCoordSeq(capi.cs_clone(self.ptr), self.hasz)
@property
def kml(self):
"Returns the KML representation for the coordinates."
# Getting the substitution string depending on whether the coordinates have
# a Z dimension.
if self.hasz:
substr = '%s,%s,%s '
else:
substr = '%s,%s,0 '
return '<coordinates>%s</coordinates>' % \
''.join(substr % self[i] for i in range(len(self))).strip()
@property
def tuple(self):
"Returns a tuple version of this coordinate sequence."
n = self.size
if n == 1:
return self[0]
else:
return tuple(self[i] for i in range(n))
|
bsd-3-clause
|
kant/inasafe
|
safe_extras/raven/utils/__init__.py
|
25
|
3408
|
"""
raven.utils
~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import hashlib
import hmac
import logging
try:
import pkg_resources
except ImportError:
pkg_resources = None
import sys
logger = logging.getLogger('raven.errors')
def varmap(func, var, context=None, name=None):
"""
Executes ``func(key_name, value)`` on all values
recurisively discovering dict and list scoped
values.
"""
if context is None:
context = {}
objid = id(var)
if objid in context:
return func(name, '<...>')
context[objid] = 1
if isinstance(var, dict):
ret = dict((k, varmap(func, v, context, k)) for k, v in var.iteritems())
elif isinstance(var, (list, tuple)):
ret = [varmap(func, f, context, name) for f in var]
else:
ret = func(name, var)
del context[objid]
return ret
# We store a cache of module_name->version string to avoid
# continuous imports and lookups of modules
_VERSION_CACHE = {}
def get_version_from_app(module_name, app):
if hasattr(app, 'get_version'):
get_version = app.get_version
if callable(get_version):
version = get_version()
else:
version = get_version
elif hasattr(app, 'VERSION'):
version = app.VERSION
elif hasattr(app, '__version__'):
version = app.__version__
elif pkg_resources:
# pull version from pkg_resources if distro exists
try:
version = pkg_resources.get_distribution(module_name).version
except pkg_resources.DistributionNotFound:
return None
else:
return None
if isinstance(version, (list, tuple)):
version = '.'.join(str(o) for o in version)
return version
def get_versions(module_list=None):
if not module_list:
return {}
ext_module_list = set()
for m in module_list:
parts = m.split('.')
ext_module_list.update('.'.join(parts[:idx]) for idx in xrange(1, len(parts) + 1))
versions = {}
for module_name in ext_module_list:
if module_name not in _VERSION_CACHE:
try:
__import__(module_name)
except ImportError:
continue
try:
app = sys.modules[module_name]
except KeyError:
continue
try:
version = get_version_from_app(module_name, app)
except Exception, e:
logger.exception(e)
version = None
_VERSION_CACHE[module_name] = version
else:
version = _VERSION_CACHE[module_name]
if version is None:
continue
versions[module_name] = version
return versions
def get_signature(message, timestamp, key):
return hmac.new(str(key), '%s %s' % (timestamp, message), hashlib.sha1).hexdigest()
def get_auth_header(protocol, timestamp, client, api_key=None, signature=None, **kwargs):
header = [
('sentry_timestamp', timestamp),
('sentry_client', client),
('sentry_version', protocol),
]
if signature:
header.append(('sentry_signature', signature))
if api_key:
header.append(('sentry_key', api_key))
return 'Sentry %s' % ', '.join('%s=%s' % (k, v) for k, v in header)
|
gpl-3.0
|
inspirehep/sqlalchemy
|
test/orm/inheritance/test_assorted_poly.py
|
28
|
59010
|
"""Miscellaneous inheritance-related tests, many very old.
These are generally tests derived from specific user issues.
"""
from sqlalchemy.testing import eq_
from sqlalchemy import *
from sqlalchemy import util
from sqlalchemy.orm import *
from sqlalchemy.orm.interfaces import MANYTOONE
from sqlalchemy.testing import AssertsExecutionResults
from sqlalchemy import testing
from sqlalchemy.testing.util import function_named
from sqlalchemy.testing import fixtures
from test.orm import _fixtures
from sqlalchemy.testing import eq_
from sqlalchemy.testing.schema import Table, Column
class AttrSettable(object):
def __init__(self, **kwargs):
[setattr(self, k, v) for k, v in kwargs.items()]
def __repr__(self):
return self.__class__.__name__ + "(%s)" % (hex(id(self)))
class RelationshipTest1(fixtures.MappedTest):
"""test self-referential relationships on polymorphic mappers"""
@classmethod
def define_tables(cls, metadata):
global people, managers
people = Table('people', metadata,
Column('person_id', Integer, Sequence('person_id_seq',
optional=True),
primary_key=True),
Column('manager_id', Integer,
ForeignKey('managers.person_id',
use_alter=True, name="mpid_fq")),
Column('name', String(50)),
Column('type', String(30)))
managers = Table('managers', metadata,
Column('person_id', Integer, ForeignKey('people.person_id'),
primary_key=True),
Column('status', String(30)),
Column('manager_name', String(50))
)
def teardown(self):
people.update(values={people.c.manager_id:None}).execute()
super(RelationshipTest1, self).teardown()
def test_parent_refs_descendant(self):
class Person(AttrSettable):
pass
class Manager(Person):
pass
mapper(Person, people, properties={
'manager':relationship(Manager, primaryjoin=(
people.c.manager_id ==
managers.c.person_id),
uselist=False, post_update=True)
})
mapper(Manager, managers, inherits=Person,
inherit_condition=people.c.person_id==managers.c.person_id)
eq_(class_mapper(Person).get_property('manager').synchronize_pairs,
[(managers.c.person_id,people.c.manager_id)])
session = create_session()
p = Person(name='some person')
m = Manager(name='some manager')
p.manager = m
session.add(p)
session.flush()
session.expunge_all()
p = session.query(Person).get(p.person_id)
m = session.query(Manager).get(m.person_id)
assert p.manager is m
def test_descendant_refs_parent(self):
class Person(AttrSettable):
pass
class Manager(Person):
pass
mapper(Person, people)
mapper(Manager, managers, inherits=Person,
inherit_condition=people.c.person_id==
managers.c.person_id,
properties={
'employee':relationship(Person, primaryjoin=(
people.c.manager_id ==
managers.c.person_id),
foreign_keys=[people.c.manager_id],
uselist=False, post_update=True)
})
session = create_session()
p = Person(name='some person')
m = Manager(name='some manager')
m.employee = p
session.add(m)
session.flush()
session.expunge_all()
p = session.query(Person).get(p.person_id)
m = session.query(Manager).get(m.person_id)
assert m.employee is p
class RelationshipTest2(fixtures.MappedTest):
"""test self-referential relationships on polymorphic mappers"""
@classmethod
def define_tables(cls, metadata):
global people, managers, data
people = Table('people', metadata,
Column('person_id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50)),
Column('type', String(30)))
managers = Table('managers', metadata,
Column('person_id', Integer, ForeignKey('people.person_id'),
primary_key=True),
Column('manager_id', Integer, ForeignKey('people.person_id')),
Column('status', String(30)),
)
data = Table('data', metadata,
Column('person_id', Integer, ForeignKey('managers.person_id'),
primary_key=True),
Column('data', String(30))
)
def test_relationshiponsubclass_j1_nodata(self):
self._do_test("join1", False)
def test_relationshiponsubclass_j2_nodata(self):
self._do_test("join2", False)
def test_relationshiponsubclass_j1_data(self):
self._do_test("join1", True)
def test_relationshiponsubclass_j2_data(self):
self._do_test("join2", True)
def test_relationshiponsubclass_j3_nodata(self):
self._do_test("join3", False)
def test_relationshiponsubclass_j3_data(self):
self._do_test("join3", True)
def _do_test(self, jointype="join1", usedata=False):
class Person(AttrSettable):
pass
class Manager(Person):
pass
if jointype == "join1":
poly_union = polymorphic_union({
'person':people.select(people.c.type=='person'),
'manager':join(people, managers,
people.c.person_id==managers.c.person_id)
}, None)
polymorphic_on=poly_union.c.type
elif jointype == "join2":
poly_union = polymorphic_union({
'person':people.select(people.c.type=='person'),
'manager':managers.join(people,
people.c.person_id==managers.c.person_id)
}, None)
polymorphic_on=poly_union.c.type
elif jointype == "join3":
poly_union = None
polymorphic_on = people.c.type
if usedata:
class Data(object):
def __init__(self, data):
self.data = data
mapper(Data, data)
mapper(Person, people,
with_polymorphic=('*', poly_union),
polymorphic_identity='person',
polymorphic_on=polymorphic_on)
if usedata:
mapper(Manager, managers,
inherits=Person,
inherit_condition=people.c.person_id==
managers.c.person_id,
polymorphic_identity='manager',
properties={
'colleague':relationship(
Person,
primaryjoin=managers.c.manager_id==
people.c.person_id,
lazy='select', uselist=False),
'data':relationship(Data, uselist=False)
}
)
else:
mapper(Manager, managers, inherits=Person,
inherit_condition=people.c.person_id==
managers.c.person_id,
polymorphic_identity='manager',
properties={
'colleague':relationship(Person,
primaryjoin=managers.c.manager_id==
people.c.person_id,
lazy='select', uselist=False)
}
)
sess = create_session()
p = Person(name='person1')
m = Manager(name='manager1')
m.colleague = p
if usedata:
m.data = Data('ms data')
sess.add(m)
sess.flush()
sess.expunge_all()
p = sess.query(Person).get(p.person_id)
m = sess.query(Manager).get(m.person_id)
assert m.colleague is p
if usedata:
assert m.data.data == 'ms data'
class RelationshipTest3(fixtures.MappedTest):
"""test self-referential relationships on polymorphic mappers"""
@classmethod
def define_tables(cls, metadata):
global people, managers, data
people = Table('people', metadata,
Column('person_id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('colleague_id', Integer, ForeignKey('people.person_id')),
Column('name', String(50)),
Column('type', String(30)))
managers = Table('managers', metadata,
Column('person_id', Integer, ForeignKey('people.person_id'),
primary_key=True),
Column('status', String(30)),
)
data = Table('data', metadata,
Column('person_id', Integer, ForeignKey('people.person_id'),
primary_key=True),
Column('data', String(30))
)
def _generate_test(jointype="join1", usedata=False):
def _do_test(self):
class Person(AttrSettable):
pass
class Manager(Person):
pass
if usedata:
class Data(object):
def __init__(self, data):
self.data = data
if jointype == "join1":
poly_union = polymorphic_union({
'manager':managers.join(people,
people.c.person_id==managers.c.person_id),
'person':people.select(people.c.type=='person')
}, None)
elif jointype =="join2":
poly_union = polymorphic_union({
'manager':join(people, managers,
people.c.person_id==managers.c.person_id),
'person':people.select(people.c.type=='person')
}, None)
elif jointype == 'join3':
poly_union = people.outerjoin(managers)
elif jointype == "join4":
poly_union=None
if usedata:
mapper(Data, data)
if usedata:
mapper(Person, people,
with_polymorphic=('*', poly_union),
polymorphic_identity='person',
polymorphic_on=people.c.type,
properties={
'colleagues':relationship(Person,
primaryjoin=people.c.colleague_id==
people.c.person_id,
remote_side=people.c.colleague_id,
uselist=True),
'data':relationship(Data, uselist=False)
}
)
else:
mapper(Person, people,
with_polymorphic=('*', poly_union),
polymorphic_identity='person',
polymorphic_on=people.c.type,
properties={
'colleagues':relationship(Person,
primaryjoin=people.c.colleague_id==people.c.person_id,
remote_side=people.c.colleague_id, uselist=True)
}
)
mapper(Manager, managers, inherits=Person,
inherit_condition=people.c.person_id==
managers.c.person_id,
polymorphic_identity='manager')
sess = create_session()
p = Person(name='person1')
p2 = Person(name='person2')
p3 = Person(name='person3')
m = Manager(name='manager1')
p.colleagues.append(p2)
m.colleagues.append(p3)
if usedata:
p.data = Data('ps data')
m.data = Data('ms data')
sess.add(m)
sess.add(p)
sess.flush()
sess.expunge_all()
p = sess.query(Person).get(p.person_id)
p2 = sess.query(Person).get(p2.person_id)
p3 = sess.query(Person).get(p3.person_id)
m = sess.query(Person).get(m.person_id)
assert len(p.colleagues) == 1
assert p.colleagues == [p2]
assert m.colleagues == [p3]
if usedata:
assert p.data.data == 'ps data'
assert m.data.data == 'ms data'
do_test = function_named(
_do_test, 'test_relationship_on_base_class_%s_%s' % (
jointype, data and "nodata" or "data"))
return do_test
for jointype in ["join1", "join2", "join3", "join4"]:
for data in (True, False):
func = _generate_test(jointype, data)
setattr(RelationshipTest3, func.__name__, func)
del func
class RelationshipTest4(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
global people, engineers, managers, cars
people = Table('people', metadata,
Column('person_id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50)))
engineers = Table('engineers', metadata,
Column('person_id', Integer, ForeignKey('people.person_id'),
primary_key=True),
Column('status', String(30)))
managers = Table('managers', metadata,
Column('person_id', Integer, ForeignKey('people.person_id'),
primary_key=True),
Column('longer_status', String(70)))
cars = Table('cars', metadata,
Column('car_id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('owner', Integer, ForeignKey('people.person_id')))
def test_many_to_one_polymorphic(self):
"""in this test, the polymorphic union is between two subclasses, but
does not include the base table by itself in the union. however, the
primaryjoin condition is going to be against the base table, and its a
many-to-one relationship (unlike the test in polymorph.py) so the
column in the base table is explicit. Can the ClauseAdapter figure out
how to alias the primaryjoin to the polymorphic union ?"""
# class definitions
class Person(object):
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def __repr__(self):
return "Ordinary person %s" % self.name
class Engineer(Person):
def __repr__(self):
return "Engineer %s, status %s" % \
(self.name, self.status)
class Manager(Person):
def __repr__(self):
return "Manager %s, status %s" % \
(self.name, self.longer_status)
class Car(object):
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def __repr__(self):
return "Car number %d" % self.car_id
# create a union that represents both types of joins.
employee_join = polymorphic_union(
{
'engineer':people.join(engineers),
'manager':people.join(managers),
}, "type", 'employee_join')
person_mapper = mapper(Person, people,
with_polymorphic=('*', employee_join),
polymorphic_on=employee_join.c.type,
polymorphic_identity='person')
engineer_mapper = mapper(Engineer, engineers,
inherits=person_mapper,
polymorphic_identity='engineer')
manager_mapper = mapper(Manager, managers,
inherits=person_mapper,
polymorphic_identity='manager')
car_mapper = mapper(Car, cars,
properties= {'employee':
relationship(person_mapper)})
session = create_session()
# creating 5 managers named from M1 to E5
for i in range(1,5):
session.add(Manager(name="M%d" % i,
longer_status="YYYYYYYYY"))
# creating 5 engineers named from E1 to E5
for i in range(1,5):
session.add(Engineer(name="E%d" % i,status="X"))
session.flush()
engineer4 = session.query(Engineer).\
filter(Engineer.name=="E4").first()
manager3 = session.query(Manager).\
filter(Manager.name=="M3").first()
car1 = Car(employee=engineer4)
session.add(car1)
car2 = Car(employee=manager3)
session.add(car2)
session.flush()
session.expunge_all()
def go():
testcar = session.query(Car).options(
joinedload('employee')
).get(car1.car_id)
assert str(testcar.employee) == "Engineer E4, status X"
self.assert_sql_count(testing.db, go, 1)
car1 = session.query(Car).get(car1.car_id)
usingGet = session.query(person_mapper).get(car1.owner)
usingProperty = car1.employee
assert str(engineer4) == "Engineer E4, status X"
assert str(usingGet) == "Engineer E4, status X"
assert str(usingProperty) == "Engineer E4, status X"
session.expunge_all()
# and now for the lightning round, eager !
def go():
testcar = session.query(Car).options(
joinedload('employee')
).get(car1.car_id)
assert str(testcar.employee) == "Engineer E4, status X"
self.assert_sql_count(testing.db, go, 1)
session.expunge_all()
s = session.query(Car)
c = s.join("employee").filter(Person.name=="E4")[0]
assert c.car_id==car1.car_id
class RelationshipTest5(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
global people, engineers, managers, cars
people = Table('people', metadata,
Column('person_id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50)),
Column('type', String(50)))
engineers = Table('engineers', metadata,
Column('person_id', Integer, ForeignKey('people.person_id'),
primary_key=True),
Column('status', String(30)))
managers = Table('managers', metadata,
Column('person_id', Integer, ForeignKey('people.person_id'),
primary_key=True),
Column('longer_status', String(70)))
cars = Table('cars', metadata,
Column('car_id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('owner', Integer, ForeignKey('people.person_id')))
def test_eager_empty(self):
"""test parent object with child relationship to an inheriting mapper,
using eager loads, works when there are no child objects present"""
class Person(object):
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def __repr__(self):
return "Ordinary person %s" % self.name
class Engineer(Person):
def __repr__(self):
return "Engineer %s, status %s" % \
(self.name, self.status)
class Manager(Person):
def __repr__(self):
return "Manager %s, status %s" % \
(self.name, self.longer_status)
class Car(object):
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def __repr__(self):
return "Car number %d" % self.car_id
person_mapper = mapper(Person, people,
polymorphic_on=people.c.type,
polymorphic_identity='person')
engineer_mapper = mapper(Engineer, engineers,
inherits=person_mapper,
polymorphic_identity='engineer')
manager_mapper = mapper(Manager, managers,
inherits=person_mapper,
polymorphic_identity='manager')
car_mapper = mapper(Car, cars, properties= {
'manager':relationship(
manager_mapper, lazy='joined')})
sess = create_session()
car1 = Car()
car2 = Car()
car2.manager = Manager()
sess.add(car1)
sess.add(car2)
sess.flush()
sess.expunge_all()
carlist = sess.query(Car).all()
assert carlist[0].manager is None
assert carlist[1].manager.person_id == car2.manager.person_id
class RelationshipTest6(fixtures.MappedTest):
"""test self-referential relationships on a single joined-table
inheritance mapper"""
@classmethod
def define_tables(cls, metadata):
global people, managers, data
people = Table('people', metadata,
Column('person_id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50)),
)
managers = Table('managers', metadata,
Column('person_id', Integer, ForeignKey('people.person_id'),
primary_key=True),
Column('colleague_id', Integer,
ForeignKey('managers.person_id')),
Column('status', String(30)),
)
def test_basic(self):
class Person(AttrSettable):
pass
class Manager(Person):
pass
mapper(Person, people)
mapper(Manager, managers, inherits=Person,
inherit_condition=people.c.person_id==\
managers.c.person_id,
properties={
'colleague':relationship(Manager,
primaryjoin=managers.c.colleague_id==\
managers.c.person_id,
lazy='select', uselist=False)
}
)
sess = create_session()
m = Manager(name='manager1')
m2 =Manager(name='manager2')
m.colleague = m2
sess.add(m)
sess.flush()
sess.expunge_all()
m = sess.query(Manager).get(m.person_id)
m2 = sess.query(Manager).get(m2.person_id)
assert m.colleague is m2
class RelationshipTest7(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
global people, engineers, managers, cars, offroad_cars
cars = Table('cars', metadata,
Column('car_id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(30)))
offroad_cars = Table('offroad_cars', metadata,
Column('car_id',Integer, ForeignKey('cars.car_id'),
nullable=False,primary_key=True))
people = Table('people', metadata,
Column('person_id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('car_id', Integer, ForeignKey('cars.car_id'),
nullable=False),
Column('name', String(50)))
engineers = Table('engineers', metadata,
Column('person_id', Integer, ForeignKey('people.person_id'),
primary_key=True),
Column('field', String(30)))
managers = Table('managers', metadata,
Column('person_id', Integer, ForeignKey('people.person_id'),
primary_key=True),
Column('category', String(70)))
def test_manytoone_lazyload(self):
"""test that lazy load clause to a polymorphic child mapper generates
correctly [ticket:493]"""
class PersistentObject(object):
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
class Status(PersistentObject):
def __repr__(self):
return "Status %s" % self.name
class Person(PersistentObject):
def __repr__(self):
return "Ordinary person %s" % self.name
class Engineer(Person):
def __repr__(self):
return "Engineer %s, field %s" % (self.name,
self.field)
class Manager(Person):
def __repr__(self):
return "Manager %s, category %s" % (self.name,
self.category)
class Car(PersistentObject):
def __repr__(self):
return "Car number %d, name %s" % \
(self.car_id, self.name)
class Offraod_Car(Car):
def __repr__(self):
return "Offroad Car number %d, name %s" % \
(self.car_id,self.name)
employee_join = polymorphic_union(
{
'engineer':people.join(engineers),
'manager':people.join(managers),
}, "type", 'employee_join')
car_join = polymorphic_union(
{
'car' : cars.outerjoin(offroad_cars).\
select(offroad_cars.c.car_id == None).reduce_columns(),
'offroad' : cars.join(offroad_cars)
}, "type", 'car_join')
car_mapper = mapper(Car, cars,
with_polymorphic=('*', car_join) ,polymorphic_on=car_join.c.type,
polymorphic_identity='car',
)
offroad_car_mapper = mapper(Offraod_Car, offroad_cars,
inherits=car_mapper, polymorphic_identity='offroad')
person_mapper = mapper(Person, people,
with_polymorphic=('*', employee_join),
polymorphic_on=employee_join.c.type,
polymorphic_identity='person',
properties={
'car':relationship(car_mapper)
})
engineer_mapper = mapper(Engineer, engineers,
inherits=person_mapper,
polymorphic_identity='engineer')
manager_mapper = mapper(Manager, managers,
inherits=person_mapper,
polymorphic_identity='manager')
session = create_session()
basic_car=Car(name="basic")
offroad_car=Offraod_Car(name="offroad")
for i in range(1,4):
if i%2:
car=Car()
else:
car=Offraod_Car()
session.add(Manager(name="M%d" % i,
category="YYYYYYYYY",car=car))
session.add(Engineer(name="E%d" % i,field="X",car=car))
session.flush()
session.expunge_all()
r = session.query(Person).all()
for p in r:
assert p.car_id == p.car.car_id
class RelationshipTest8(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
global taggable, users
taggable = Table('taggable', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('type', String(30)),
Column('owner_id', Integer, ForeignKey('taggable.id')),
)
users = Table ('users', metadata,
Column('id', Integer, ForeignKey('taggable.id'),
primary_key=True),
Column('data', String(50)),
)
def test_selfref_onjoined(self):
class Taggable(fixtures.ComparableEntity):
pass
class User(Taggable):
pass
mapper( Taggable, taggable,
polymorphic_on=taggable.c.type,
polymorphic_identity='taggable',
properties = {
'owner' : relationship (User,
primaryjoin=taggable.c.owner_id ==taggable.c.id,
remote_side=taggable.c.id
),
})
mapper(User, users, inherits=Taggable,
polymorphic_identity='user',
inherit_condition=users.c.id == taggable.c.id,
)
u1 = User(data='u1')
t1 = Taggable(owner=u1)
sess = create_session()
sess.add(t1)
sess.flush()
sess.expunge_all()
eq_(
sess.query(Taggable).order_by(Taggable.id).all(),
[User(data='u1'), Taggable(owner=User(data='u1'))]
)
class GenerativeTest(fixtures.TestBase, AssertsExecutionResults):
@classmethod
def setup_class(cls):
# cars---owned by--- people (abstract) --- has a --- status
# | ^ ^ |
# | | | |
# | engineers managers |
# | |
# +--------------------------------------- has a ------+
global metadata, status, people, engineers, managers, cars
metadata = MetaData(testing.db)
# table definitions
status = Table('status', metadata,
Column('status_id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(20)))
people = Table('people', metadata,
Column('person_id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('status_id', Integer, ForeignKey('status.status_id'),
nullable=False),
Column('name', String(50)))
engineers = Table('engineers', metadata,
Column('person_id', Integer, ForeignKey('people.person_id'),
primary_key=True),
Column('field', String(30)))
managers = Table('managers', metadata,
Column('person_id', Integer, ForeignKey('people.person_id'),
primary_key=True),
Column('category', String(70)))
cars = Table('cars', metadata,
Column('car_id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('status_id', Integer, ForeignKey('status.status_id'),
nullable=False),
Column('owner', Integer, ForeignKey('people.person_id'),
nullable=False))
metadata.create_all()
@classmethod
def teardown_class(cls):
metadata.drop_all()
def teardown(self):
clear_mappers()
for t in reversed(metadata.sorted_tables):
t.delete().execute()
def test_join_to(self):
# class definitions
class PersistentObject(object):
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
class Status(PersistentObject):
def __repr__(self):
return "Status %s" % self.name
class Person(PersistentObject):
def __repr__(self):
return "Ordinary person %s" % self.name
class Engineer(Person):
def __repr__(self):
return "Engineer %s, field %s, status %s" % (
self.name, self.field, self.status)
class Manager(Person):
def __repr__(self):
return "Manager %s, category %s, status %s" % (
self.name, self.category, self.status)
class Car(PersistentObject):
def __repr__(self):
return "Car number %d" % self.car_id
# create a union that represents both types of joins.
employee_join = polymorphic_union(
{
'engineer':people.join(engineers),
'manager':people.join(managers),
}, "type", 'employee_join')
status_mapper = mapper(Status, status)
person_mapper = mapper(Person, people,
with_polymorphic=('*', employee_join),
polymorphic_on=employee_join.c.type,
polymorphic_identity='person',
properties={'status':relationship(status_mapper)})
engineer_mapper = mapper(Engineer, engineers,
inherits=person_mapper,
polymorphic_identity='engineer')
manager_mapper = mapper(Manager, managers,
inherits=person_mapper,
polymorphic_identity='manager')
car_mapper = mapper(Car, cars, properties= {
'employee':relationship(person_mapper),
'status':relationship(status_mapper)})
session = create_session()
active = Status(name="active")
dead = Status(name="dead")
session.add(active)
session.add(dead)
session.flush()
# TODO: we haven't created assertions for all
# the data combinations created here
# creating 5 managers named from M1 to M5
# and 5 engineers named from E1 to E5
# M4, M5, E4 and E5 are dead
for i in range(1,5):
if i<4:
st=active
else:
st=dead
session.add(Manager(name="M%d" % i,
category="YYYYYYYYY",status=st))
session.add(Engineer(name="E%d" % i,field="X",status=st))
session.flush()
# get E4
engineer4 = session.query(engineer_mapper).\
filter_by(name="E4").one()
# create 2 cars for E4, one active and one dead
car1 = Car(employee=engineer4,status=active)
car2 = Car(employee=engineer4,status=dead)
session.add(car1)
session.add(car2)
session.flush()
# this particular adapt used to cause a recursion overflow;
# added here for testing
e = exists([Car.owner], Car.owner==employee_join.c.person_id)
Query(Person)._adapt_clause(employee_join, False, False)
r = session.query(Person).filter(Person.name.like('%2')).\
join('status').\
filter_by(name="active").\
order_by(Person.person_id)
eq_(str(list(r)), "[Manager M2, category YYYYYYYYY, status "
"Status active, Engineer E2, field X, "
"status Status active]")
r = session.query(Engineer).join('status').\
filter(Person.name.in_(
['E2', 'E3', 'E4', 'M4', 'M2', 'M1']) &
(status.c.name=="active")).order_by(Person.name)
eq_(str(list(r)), "[Engineer E2, field X, status Status "
"active, Engineer E3, field X, status "
"Status active]")
r = session.query(Person).filter(exists([1],
Car.owner==Person.person_id))
eq_(str(list(r)), "[Engineer E4, field X, status Status dead]")
class MultiLevelTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
global table_Employee, table_Engineer, table_Manager
table_Employee = Table( 'Employee', metadata,
Column( 'name', type_= String(100), ),
Column( 'id', primary_key= True, type_= Integer,
test_needs_autoincrement=True),
Column( 'atype', type_= String(100), ),
)
table_Engineer = Table( 'Engineer', metadata,
Column( 'machine', type_= String(100), ),
Column( 'id', Integer, ForeignKey( 'Employee.id', ),
primary_key= True),
)
table_Manager = Table( 'Manager', metadata,
Column( 'duties', type_= String(100), ),
Column( 'id', Integer, ForeignKey( 'Engineer.id', ),
primary_key= True, ),
)
def test_threelevels(self):
class Employee( object):
def set( me, **kargs):
for k,v in kargs.items(): setattr( me, k, v)
return me
def __str__(me):
return str(me.__class__.__name__)+':'+str(me.name)
__repr__ = __str__
class Engineer(Employee):
pass
class Manager(Engineer):
pass
pu_Employee = polymorphic_union( {
'Manager': table_Employee.join(
table_Engineer).join( table_Manager),
'Engineer': select([table_Employee,
table_Engineer.c.machine],
table_Employee.c.atype == 'Engineer',
from_obj=[
table_Employee.join(table_Engineer)]),
'Employee': table_Employee.select(
table_Employee.c.atype == 'Employee'),
}, None, 'pu_employee', )
mapper_Employee = mapper( Employee, table_Employee,
polymorphic_identity= 'Employee',
polymorphic_on= pu_Employee.c.atype,
with_polymorphic=('*', pu_Employee),
)
pu_Engineer = polymorphic_union( {
'Manager': table_Employee.join( table_Engineer).
join( table_Manager),
'Engineer': select([table_Employee,
table_Engineer.c.machine],
table_Employee.c.atype == 'Engineer',
from_obj=[
table_Employee.join(table_Engineer)
]),
}, None, 'pu_engineer', )
mapper_Engineer = mapper( Engineer, table_Engineer,
inherit_condition= table_Engineer.c.id == \
table_Employee.c.id,
inherits= mapper_Employee,
polymorphic_identity= 'Engineer',
polymorphic_on= pu_Engineer.c.atype,
with_polymorphic=('*', pu_Engineer),
)
mapper_Manager = mapper( Manager, table_Manager,
inherit_condition= table_Manager.c.id == \
table_Engineer.c.id,
inherits= mapper_Engineer,
polymorphic_identity= 'Manager',
)
a = Employee().set( name= 'one')
b = Engineer().set( egn= 'two', machine= 'any')
c = Manager().set( name= 'head', machine= 'fast',
duties= 'many')
session = create_session()
session.add(a)
session.add(b)
session.add(c)
session.flush()
assert set(session.query(Employee).all()) == set([a,b,c])
assert set(session.query( Engineer).all()) == set([b,c])
assert session.query( Manager).all() == [c]
class ManyToManyPolyTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
global base_item_table, item_table, base_item_collection_table, \
collection_table
base_item_table = Table(
'base_item', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('child_name', String(255), default=None))
item_table = Table(
'item', metadata,
Column('id', Integer, ForeignKey('base_item.id'),
primary_key=True),
Column('dummy', Integer, default=0))
base_item_collection_table = Table(
'base_item_collection', metadata,
Column('item_id', Integer, ForeignKey('base_item.id')),
Column('collection_id', Integer, ForeignKey('collection.id')))
collection_table = Table(
'collection', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', Unicode(255)))
def test_pjoin_compile(self):
"""test that remote_side columns in the secondary join table
arent attempted to be matched to the target polymorphic
selectable"""
class BaseItem(object): pass
class Item(BaseItem): pass
class Collection(object): pass
item_join = polymorphic_union( {
'BaseItem':base_item_table.select(
base_item_table.c.child_name=='BaseItem'),
'Item':base_item_table.join(item_table),
}, None, 'item_join')
mapper(
BaseItem, base_item_table,
with_polymorphic=('*', item_join),
polymorphic_on=base_item_table.c.child_name,
polymorphic_identity='BaseItem',
properties=dict(collections=relationship(Collection,
secondary=base_item_collection_table,
backref="items")))
mapper(
Item, item_table,
inherits=BaseItem,
polymorphic_identity='Item')
mapper(Collection, collection_table)
class_mapper(BaseItem)
class CustomPKTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
global t1, t2
t1 = Table('t1', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('type', String(30), nullable=False),
Column('data', String(30)))
# note that the primary key column in t2 is named differently
t2 = Table('t2', metadata,
Column('t2id', Integer, ForeignKey('t1.id'), primary_key=True),
Column('t2data', String(30)))
def test_custompk(self):
"""test that the primary_key attribute is propagated to the
polymorphic mapper"""
class T1(object):pass
class T2(T1):pass
# create a polymorphic union with the select against the base table first.
# with the join being second, the alias of the union will
# pick up two "primary key" columns. technically the alias should have a
# 2-col pk in any case but the leading select has a NULL for the "t2id" column
d = util.OrderedDict()
d['t1'] = t1.select(t1.c.type=='t1')
d['t2'] = t1.join(t2)
pjoin = polymorphic_union(d, None, 'pjoin')
mapper(T1, t1, polymorphic_on=t1.c.type,
polymorphic_identity='t1',
with_polymorphic=('*', pjoin),
primary_key=[pjoin.c.id])
mapper(T2, t2, inherits=T1, polymorphic_identity='t2')
ot1 = T1()
ot2 = T2()
sess = create_session()
sess.add(ot1)
sess.add(ot2)
sess.flush()
sess.expunge_all()
# query using get(), using only one value.
# this requires the select_table mapper
# has the same single-col primary key.
assert sess.query(T1).get(ot1.id).id == ot1.id
ot1 = sess.query(T1).get(ot1.id)
ot1.data = 'hi'
sess.flush()
def test_pk_collapses(self):
"""test that a composite primary key attribute formed by a join
is "collapsed" into its minimal columns"""
class T1(object):pass
class T2(T1):pass
# create a polymorphic union with the select against the base table first.
# with the join being second, the alias of the union will
# pick up two "primary key" columns. technically the alias should have a
# 2-col pk in any case but the leading select has a NULL for the "t2id" column
d = util.OrderedDict()
d['t1'] = t1.select(t1.c.type=='t1')
d['t2'] = t1.join(t2)
pjoin = polymorphic_union(d, None, 'pjoin')
mapper(T1, t1, polymorphic_on=t1.c.type,
polymorphic_identity='t1',
with_polymorphic=('*', pjoin))
mapper(T2, t2, inherits=T1, polymorphic_identity='t2')
assert len(class_mapper(T1).primary_key) == 1
ot1 = T1()
ot2 = T2()
sess = create_session()
sess.add(ot1)
sess.add(ot2)
sess.flush()
sess.expunge_all()
# query using get(), using only one value. this requires the
# select_table mapper
# has the same single-col primary key.
assert sess.query(T1).get(ot1.id).id == ot1.id
ot1 = sess.query(T1).get(ot1.id)
ot1.data = 'hi'
sess.flush()
class InheritingEagerTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
global people, employees, tags, peopleTags
people = Table('people', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('_type', String(30), nullable=False),
)
employees = Table('employees', metadata,
Column('id', Integer, ForeignKey('people.id'),
primary_key=True),
)
tags = Table('tags', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('label', String(50), nullable=False),
)
peopleTags = Table('peopleTags', metadata,
Column('person_id', Integer,
ForeignKey('people.id')),
Column('tag_id', Integer,
ForeignKey('tags.id')),
)
def test_basic(self):
"""test that Query uses the full set of mapper._eager_loaders
when generating SQL"""
class Person(fixtures.ComparableEntity):
pass
class Employee(Person):
def __init__(self, name='bob'):
self.name = name
class Tag(fixtures.ComparableEntity):
def __init__(self, label):
self.label = label
mapper(Person, people, polymorphic_on=people.c._type,
polymorphic_identity='person', properties={
'tags': relationship(Tag,
secondary=peopleTags,
backref='people', lazy='joined')
})
mapper(Employee, employees, inherits=Person,
polymorphic_identity='employee')
mapper(Tag, tags)
session = create_session()
bob = Employee()
session.add(bob)
tag = Tag('crazy')
bob.tags.append(tag)
tag = Tag('funny')
bob.tags.append(tag)
session.flush()
session.expunge_all()
# query from Employee with limit, query needs to apply eager limiting subquery
instance = session.query(Employee).\
filter_by(id=1).limit(1).first()
assert len(instance.tags) == 2
class MissingPolymorphicOnTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
tablea = Table('tablea', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('adata', String(50)),
)
tableb = Table('tableb', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('aid', Integer, ForeignKey('tablea.id')),
Column('data', String(50)),
)
tablec = Table('tablec', metadata,
Column('id', Integer, ForeignKey('tablea.id'),
primary_key=True),
Column('cdata', String(50)),
)
tabled = Table('tabled', metadata,
Column('id', Integer, ForeignKey('tablec.id'),
primary_key=True),
Column('ddata', String(50)),
)
@classmethod
def setup_classes(cls):
class A(cls.Comparable):
pass
class B(cls.Comparable):
pass
class C(A):
pass
class D(C):
pass
def test_polyon_col_setsup(self):
tablea, tableb, tablec, tabled = self.tables.tablea, \
self.tables.tableb, self.tables.tablec, self.tables.tabled
A, B, C, D = self.classes.A, self.classes.B, self.classes.C, \
self.classes.D
poly_select = select(
[tablea, tableb.c.data.label('discriminator')],
from_obj=tablea.join(tableb)).alias('poly')
mapper(B, tableb)
mapper(A, tablea,
with_polymorphic=('*', poly_select),
polymorphic_on=poly_select.c.discriminator,
properties={
'b':relationship(B, uselist=False)
})
mapper(C, tablec, inherits=A,polymorphic_identity='c')
mapper(D, tabled, inherits=C, polymorphic_identity='d')
c = C(cdata='c1', adata='a1', b=B(data='c'))
d = D(cdata='c2', adata='a2', ddata='d2', b=B(data='d'))
sess = create_session()
sess.add(c)
sess.add(d)
sess.flush()
sess.expunge_all()
eq_(
sess.query(A).all(),
[
C(cdata='c1', adata='a1'),
D(cdata='c2', adata='a2', ddata='d2')
]
)
class JoinedInhAdjacencyTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('people', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('type', String(30)),
)
Table('users', metadata,
Column('id', Integer, ForeignKey('people.id'),
primary_key=True),
Column('supervisor_id', Integer, ForeignKey('people.id')),
)
Table('dudes', metadata,
Column('id', Integer, ForeignKey('users.id'),
primary_key=True),
)
@classmethod
def setup_classes(cls):
class Person(cls.Comparable):
pass
class User(Person):
pass
class Dude(User):
pass
def _roundtrip(self):
Person, User = self.classes.Person, self.classes.User
sess = Session()
u1 = User()
u2 = User()
u2.supervisor = u1
sess.add_all([u1, u2])
sess.commit()
assert u2.supervisor is u1
def _dude_roundtrip(self):
Dude, User = self.classes.Dude, self.classes.User
sess = Session()
u1 = User()
d1 = Dude()
d1.supervisor = u1
sess.add_all([u1, d1])
sess.commit()
assert d1.supervisor is u1
def test_joined_to_base(self):
people, users = self.tables.people, self.tables.users
Person, User = self.classes.Person, self.classes.User
mapper(Person, people,
polymorphic_on=people.c.type,
polymorphic_identity='person',
)
mapper(User, users, inherits=Person,
polymorphic_identity='user',
inherit_condition=(users.c.id == people.c.id),
properties = {
'supervisor': relationship(Person,
primaryjoin=users.c.supervisor_id==people.c.id,
),
}
)
assert User.supervisor.property.direction is MANYTOONE
self._roundtrip()
def test_joined_to_same_subclass(self):
people, users = self.tables.people, self.tables.users
Person, User = self.classes.Person, self.classes.User
mapper(Person, people,
polymorphic_on=people.c.type,
polymorphic_identity='person',
)
mapper(User, users, inherits=Person,
polymorphic_identity='user',
inherit_condition=(users.c.id == people.c.id),
properties = {
'supervisor': relationship(User,
primaryjoin=users.c.supervisor_id==people.c.id,
remote_side=people.c.id,
foreign_keys=[users.c.supervisor_id]
),
}
)
assert User.supervisor.property.direction is MANYTOONE
self._roundtrip()
def test_joined_subclass_to_superclass(self):
people, users, dudes = self.tables.people, self.tables.users, \
self.tables.dudes
Person, User, Dude = self.classes.Person, self.classes.User, \
self.classes.Dude
mapper(Person, people,
polymorphic_on=people.c.type,
polymorphic_identity='person',
)
mapper(User, users, inherits=Person,
polymorphic_identity='user',
inherit_condition=(users.c.id == people.c.id),
)
mapper(Dude, dudes, inherits=User,
polymorphic_identity='dude',
inherit_condition=(dudes.c.id==users.c.id),
properties={
'supervisor': relationship(User,
primaryjoin=users.c.supervisor_id==people.c.id,
remote_side=people.c.id,
foreign_keys=[users.c.supervisor_id]
),
}
)
assert Dude.supervisor.property.direction is MANYTOONE
self._dude_roundtrip()
class Ticket2419Test(fixtures.DeclarativeMappedTest):
"""Test [ticket:2419]'s test case."""
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class A(Base):
__tablename__ = "a"
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
class B(Base):
__tablename__ = "b"
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
ds = relationship("D")
es = relationship("E")
class C(A):
__tablename__ = "c"
id = Column(Integer, ForeignKey('a.id'), primary_key=True)
b_id = Column(Integer, ForeignKey('b.id'))
b = relationship("B", primaryjoin=b_id==B.id)
class D(Base):
__tablename__ = "d"
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
b_id = Column(Integer, ForeignKey('b.id'))
class E(Base):
__tablename__ = 'e'
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
b_id = Column(Integer, ForeignKey('b.id'))
@testing.fails_on("oracle",
"seems like oracle's query engine can't "
"handle this, not clear if there's an "
"expression-level bug on our end though")
def test_join_w_eager_w_any(self):
A, B, C, D, E = self.classes.A, self.classes.B, \
self.classes.C, self.classes.D, \
self.classes.E
s = Session(testing.db)
b = B(ds=[D()])
s.add_all([
C(
b=b
)
])
s.commit()
q = s.query(B, B.ds.any(D.id==1)).options(joinedload_all("es"))
q = q.join(C, C.b_id==B.id)
q = q.limit(5)
eq_(
q.all(),
[(b, True)]
)
class ColSubclassTest(fixtures.DeclarativeMappedTest, testing.AssertsCompiledSQL):
"""Test [ticket:2918]'s test case."""
run_create_tables = run_deletes = None
__dialect__ = 'default'
@classmethod
def setup_classes(cls):
from sqlalchemy.schema import Column
Base = cls.DeclarativeBasic
class A(Base):
__tablename__ = 'a'
id = Column(Integer, primary_key=True)
class MySpecialColumn(Column):
pass
class B(A):
__tablename__ = 'b'
id = Column(ForeignKey('a.id'), primary_key=True)
x = MySpecialColumn(String)
def test_polymorphic_adaptation(self):
A, B = self.classes.A, self.classes.B
s = Session()
self.assert_compile(
s.query(A).join(B).filter(B.x == 'test'),
"SELECT a.id AS a_id FROM a JOIN "
"(a AS a_1 JOIN b AS b_1 ON a_1.id = b_1.id) "
"ON a.id = b_1.id WHERE b_1.x = :x_1"
)
|
mit
|
keyboard-k/youtube-dl-pet
|
youtube_dl/extractor/ceskatelevize.py
|
17
|
6969
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
compat_urllib_parse_unquote,
compat_urllib_parse_urlparse,
)
from ..utils import (
ExtractorError,
float_or_none,
sanitized_Request,
)
class CeskaTelevizeIE(InfoExtractor):
_VALID_URL = r'https?://www\.ceskatelevize\.cz/(porady|ivysilani)/(?:[^/]+/)*(?P<id>[^/#?]+)/*(?:[#?].*)?$'
_TESTS = [{
'url': 'http://www.ceskatelevize.cz/ivysilani/ivysilani/10441294653-hyde-park-civilizace/214411058091220',
'info_dict': {
'id': '61924494876951776',
'ext': 'mp4',
'title': 'Hyde Park Civilizace',
'description': 'md5:fe93f6eda372d150759d11644ebbfb4a',
'thumbnail': 're:^https?://.*\.jpg',
'duration': 3350,
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
'url': 'http://www.ceskatelevize.cz/ivysilani/10532695142-prvni-republika/bonus/14716-zpevacka-z-duparny-bobina',
'info_dict': {
'id': '61924494876844374',
'ext': 'mp4',
'title': 'První republika: Zpěvačka z Dupárny Bobina',
'description': 'Sága mapující atmosféru první republiky od r. 1918 do r. 1945.',
'thumbnail': 're:^https?://.*\.jpg',
'duration': 88.4,
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
# video with 18+ caution trailer
'url': 'http://www.ceskatelevize.cz/porady/10520528904-queer/215562210900007-bogotart/',
'info_dict': {
'id': '215562210900007-bogotart',
'title': 'Queer: Bogotart',
'description': 'Alternativní průvodce současným queer světem',
},
'playlist': [{
'info_dict': {
'id': '61924494876844842',
'ext': 'mp4',
'title': 'Queer: Bogotart (Varování 18+)',
'duration': 10.2,
},
}, {
'info_dict': {
'id': '61924494877068022',
'ext': 'mp4',
'title': 'Queer: Bogotart (Queer)',
'thumbnail': 're:^https?://.*\.jpg',
'duration': 1558.3,
},
}],
'params': {
# m3u8 download
'skip_download': True,
},
}]
def _real_extract(self, url):
url = url.replace('/porady/', '/ivysilani/').replace('/video/', '')
mobj = re.match(self._VALID_URL, url)
playlist_id = mobj.group('id')
webpage = self._download_webpage(url, playlist_id)
NOT_AVAILABLE_STRING = 'This content is not available at your territory due to limited copyright.'
if '%s</p>' % NOT_AVAILABLE_STRING in webpage:
raise ExtractorError(NOT_AVAILABLE_STRING, expected=True)
typ = self._html_search_regex(
r'getPlaylistUrl\(\[\{"type":"(.+?)","id":".+?"\}\],', webpage, 'type')
episode_id = self._html_search_regex(
r'getPlaylistUrl\(\[\{"type":".+?","id":"(.+?)"\}\],', webpage, 'episode_id')
data = {
'playlist[0][type]': typ,
'playlist[0][id]': episode_id,
'requestUrl': compat_urllib_parse_urlparse(url).path,
'requestSource': 'iVysilani',
}
req = sanitized_Request(
'http://www.ceskatelevize.cz/ivysilani/ajax/get-client-playlist',
data=compat_urllib_parse.urlencode(data))
req.add_header('Content-type', 'application/x-www-form-urlencoded')
req.add_header('x-addr', '127.0.0.1')
req.add_header('X-Requested-With', 'XMLHttpRequest')
req.add_header('Referer', url)
playlistpage = self._download_json(req, playlist_id)
playlist_url = playlistpage['url']
if playlist_url == 'error_region':
raise ExtractorError(NOT_AVAILABLE_STRING, expected=True)
req = sanitized_Request(compat_urllib_parse_unquote(playlist_url))
req.add_header('Referer', url)
playlist_title = self._og_search_title(webpage)
playlist_description = self._og_search_description(webpage)
playlist = self._download_json(req, playlist_id)['playlist']
playlist_len = len(playlist)
entries = []
for item in playlist:
formats = []
for format_id, stream_url in item['streamUrls'].items():
formats.extend(self._extract_m3u8_formats(
stream_url, playlist_id, 'mp4', entry_protocol='m3u8_native'))
self._sort_formats(formats)
item_id = item.get('id') or item['assetId']
title = item['title']
duration = float_or_none(item.get('duration'))
thumbnail = item.get('previewImageUrl')
subtitles = {}
if item.get('type') == 'VOD':
subs = item.get('subtitles')
if subs:
subtitles = self.extract_subtitles(episode_id, subs)
entries.append({
'id': item_id,
'title': playlist_title if playlist_len == 1 else '%s (%s)' % (playlist_title, title),
'description': playlist_description if playlist_len == 1 else None,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
'subtitles': subtitles,
})
return self.playlist_result(entries, playlist_id, playlist_title, playlist_description)
def _get_subtitles(self, episode_id, subs):
original_subtitles = self._download_webpage(
subs[0]['url'], episode_id, 'Downloading subtitles')
srt_subs = self._fix_subtitles(original_subtitles)
return {
'cs': [{
'ext': 'srt',
'data': srt_subs,
}]
}
@staticmethod
def _fix_subtitles(subtitles):
""" Convert millisecond-based subtitles to SRT """
def _msectotimecode(msec):
""" Helper utility to convert milliseconds to timecode """
components = []
for divider in [1000, 60, 60, 100]:
components.append(msec % divider)
msec //= divider
return "{3:02}:{2:02}:{1:02},{0:03}".format(*components)
def _fix_subtitle(subtitle):
for line in subtitle.splitlines():
m = re.match(r"^\s*([0-9]+);\s*([0-9]+)\s+([0-9]+)\s*$", line)
if m:
yield m.group(1)
start, stop = (_msectotimecode(int(t)) for t in m.groups()[1:])
yield "{0} --> {1}".format(start, stop)
else:
yield line
return "\r\n".join(_fix_subtitle(subtitles))
|
unlicense
|
dmsimard/ansible
|
lib/ansible/cli/__init__.py
|
10
|
20965
|
# Copyright: (c) 2012-2014, Michael DeHaan <[email protected]>
# Copyright: (c) 2016, Toshio Kuratomi <[email protected]>
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import getpass
import os
import subprocess
import sys
from abc import ABCMeta, abstractmethod
from ansible.cli.arguments import option_helpers as opt_help
from ansible import constants as C
from ansible import context
from ansible.errors import AnsibleError
from ansible.inventory.manager import InventoryManager
from ansible.module_utils.six import with_metaclass, string_types
from ansible.module_utils._text import to_bytes, to_text
from ansible.parsing.dataloader import DataLoader
from ansible.parsing.vault import PromptVaultSecret, get_file_vault_secret
from ansible.plugins.loader import add_all_plugin_dirs
from ansible.release import __version__
from ansible.utils.collection_loader import AnsibleCollectionConfig
from ansible.utils.collection_loader._collection_finder import _get_collection_name_from_path
from ansible.utils.display import Display
from ansible.utils.path import unfrackpath
from ansible.utils.unsafe_proxy import to_unsafe_text
from ansible.vars.manager import VariableManager
try:
import argcomplete
HAS_ARGCOMPLETE = True
except ImportError:
HAS_ARGCOMPLETE = False
display = Display()
class CLI(with_metaclass(ABCMeta, object)):
''' code behind bin/ansible* programs '''
PAGER = 'less'
# -F (quit-if-one-screen) -R (allow raw ansi control chars)
# -S (chop long lines) -X (disable termcap init and de-init)
LESS_OPTS = 'FRSX'
SKIP_INVENTORY_DEFAULTS = False
def __init__(self, args, callback=None):
"""
Base init method for all command line programs
"""
if not args:
raise ValueError('A non-empty list for args is required')
self.args = args
self.parser = None
self.callback = callback
if C.DEVEL_WARNING and __version__.endswith('dev0'):
display.warning(
'You are running the development version of Ansible. You should only run Ansible from "devel" if '
'you are modifying the Ansible engine, or trying out features under development. This is a rapidly '
'changing source of code and can become unstable at any point.'
)
@abstractmethod
def run(self):
"""Run the ansible command
Subclasses must implement this method. It does the actual work of
running an Ansible command.
"""
self.parse()
display.vv(to_text(opt_help.version(self.parser.prog)))
if C.CONFIG_FILE:
display.v(u"Using %s as config file" % to_text(C.CONFIG_FILE))
else:
display.v(u"No config file found; using defaults")
# warn about deprecated config options
for deprecated in C.config.DEPRECATED:
name = deprecated[0]
why = deprecated[1]['why']
if 'alternatives' in deprecated[1]:
alt = ', use %s instead' % deprecated[1]['alternatives']
else:
alt = ''
ver = deprecated[1].get('version')
date = deprecated[1].get('date')
collection_name = deprecated[1].get('collection_name')
display.deprecated("%s option, %s%s" % (name, why, alt),
version=ver, date=date, collection_name=collection_name)
@staticmethod
def split_vault_id(vault_id):
# return (before_@, after_@)
# if no @, return whole string as after_
if '@' not in vault_id:
return (None, vault_id)
parts = vault_id.split('@', 1)
ret = tuple(parts)
return ret
@staticmethod
def build_vault_ids(vault_ids, vault_password_files=None,
ask_vault_pass=None, create_new_password=None,
auto_prompt=True):
vault_password_files = vault_password_files or []
vault_ids = vault_ids or []
# convert vault_password_files into vault_ids slugs
for password_file in vault_password_files:
id_slug = u'%s@%s' % (C.DEFAULT_VAULT_IDENTITY, password_file)
# note this makes --vault-id higher precedence than --vault-password-file
# if we want to intertwingle them in order probably need a cli callback to populate vault_ids
# used by --vault-id and --vault-password-file
vault_ids.append(id_slug)
# if an action needs an encrypt password (create_new_password=True) and we dont
# have other secrets setup, then automatically add a password prompt as well.
# prompts cant/shouldnt work without a tty, so dont add prompt secrets
if ask_vault_pass or (not vault_ids and auto_prompt):
id_slug = u'%s@%s' % (C.DEFAULT_VAULT_IDENTITY, u'prompt_ask_vault_pass')
vault_ids.append(id_slug)
return vault_ids
# TODO: remove the now unused args
@staticmethod
def setup_vault_secrets(loader, vault_ids, vault_password_files=None,
ask_vault_pass=None, create_new_password=False,
auto_prompt=True):
# list of tuples
vault_secrets = []
# Depending on the vault_id value (including how --ask-vault-pass / --vault-password-file create a vault_id)
# we need to show different prompts. This is for compat with older Towers that expect a
# certain vault password prompt format, so 'promp_ask_vault_pass' vault_id gets the old format.
prompt_formats = {}
# If there are configured default vault identities, they are considered 'first'
# so we prepend them to vault_ids (from cli) here
vault_password_files = vault_password_files or []
if C.DEFAULT_VAULT_PASSWORD_FILE:
vault_password_files.append(C.DEFAULT_VAULT_PASSWORD_FILE)
if create_new_password:
prompt_formats['prompt'] = ['New vault password (%(vault_id)s): ',
'Confirm new vault password (%(vault_id)s): ']
# 2.3 format prompts for --ask-vault-pass
prompt_formats['prompt_ask_vault_pass'] = ['New Vault password: ',
'Confirm New Vault password: ']
else:
prompt_formats['prompt'] = ['Vault password (%(vault_id)s): ']
# The format when we use just --ask-vault-pass needs to match 'Vault password:\s*?$'
prompt_formats['prompt_ask_vault_pass'] = ['Vault password: ']
vault_ids = CLI.build_vault_ids(vault_ids,
vault_password_files,
ask_vault_pass,
create_new_password,
auto_prompt=auto_prompt)
for vault_id_slug in vault_ids:
vault_id_name, vault_id_value = CLI.split_vault_id(vault_id_slug)
if vault_id_value in ['prompt', 'prompt_ask_vault_pass']:
# --vault-id some_name@prompt_ask_vault_pass --vault-id other_name@prompt_ask_vault_pass will be a little
# confusing since it will use the old format without the vault id in the prompt
built_vault_id = vault_id_name or C.DEFAULT_VAULT_IDENTITY
# choose the prompt based on --vault-id=prompt or --ask-vault-pass. --ask-vault-pass
# always gets the old format for Tower compatibility.
# ie, we used --ask-vault-pass, so we need to use the old vault password prompt
# format since Tower needs to match on that format.
prompted_vault_secret = PromptVaultSecret(prompt_formats=prompt_formats[vault_id_value],
vault_id=built_vault_id)
# a empty or invalid password from the prompt will warn and continue to the next
# without erroring globally
try:
prompted_vault_secret.load()
except AnsibleError as exc:
display.warning('Error in vault password prompt (%s): %s' % (vault_id_name, exc))
raise
vault_secrets.append((built_vault_id, prompted_vault_secret))
# update loader with new secrets incrementally, so we can load a vault password
# that is encrypted with a vault secret provided earlier
loader.set_vault_secrets(vault_secrets)
continue
# assuming anything else is a password file
display.vvvvv('Reading vault password file: %s' % vault_id_value)
# read vault_pass from a file
file_vault_secret = get_file_vault_secret(filename=vault_id_value,
vault_id=vault_id_name,
loader=loader)
# an invalid password file will error globally
try:
file_vault_secret.load()
except AnsibleError as exc:
display.warning('Error in vault password file loading (%s): %s' % (vault_id_name, to_text(exc)))
raise
if vault_id_name:
vault_secrets.append((vault_id_name, file_vault_secret))
else:
vault_secrets.append((C.DEFAULT_VAULT_IDENTITY, file_vault_secret))
# update loader with as-yet-known vault secrets
loader.set_vault_secrets(vault_secrets)
return vault_secrets
@staticmethod
def ask_passwords():
''' prompt for connection and become passwords if needed '''
op = context.CLIARGS
sshpass = None
becomepass = None
become_prompt = ''
become_prompt_method = "BECOME" if C.AGNOSTIC_BECOME_PROMPT else op['become_method'].upper()
try:
if op['ask_pass']:
sshpass = getpass.getpass(prompt="SSH password: ")
become_prompt = "%s password[defaults to SSH password]: " % become_prompt_method
else:
become_prompt = "%s password: " % become_prompt_method
if op['become_ask_pass']:
becomepass = getpass.getpass(prompt=become_prompt)
if op['ask_pass'] and becomepass == '':
becomepass = sshpass
except EOFError:
pass
# we 'wrap' the passwords to prevent templating as
# they can contain special chars and trigger it incorrectly
if sshpass:
sshpass = to_unsafe_text(sshpass)
if becomepass:
becomepass = to_unsafe_text(becomepass)
return (sshpass, becomepass)
def validate_conflicts(self, op, runas_opts=False, fork_opts=False):
''' check for conflicting options '''
if fork_opts:
if op.forks < 1:
self.parser.error("The number of processes (--forks) must be >= 1")
return op
@abstractmethod
def init_parser(self, usage="", desc=None, epilog=None):
"""
Create an options parser for most ansible scripts
Subclasses need to implement this method. They will usually call the base class's
init_parser to create a basic version and then add their own options on top of that.
An implementation will look something like this::
def init_parser(self):
super(MyCLI, self).init_parser(usage="My Ansible CLI", inventory_opts=True)
ansible.arguments.option_helpers.add_runas_options(self.parser)
self.parser.add_option('--my-option', dest='my_option', action='store')
"""
self.parser = opt_help.create_base_parser(os.path.basename(self.args[0]), usage=usage, desc=desc, epilog=epilog, )
@abstractmethod
def post_process_args(self, options):
"""Process the command line args
Subclasses need to implement this method. This method validates and transforms the command
line arguments. It can be used to check whether conflicting values were given, whether filenames
exist, etc.
An implementation will look something like this::
def post_process_args(self, options):
options = super(MyCLI, self).post_process_args(options)
if options.addition and options.subtraction:
raise AnsibleOptionsError('Only one of --addition and --subtraction can be specified')
if isinstance(options.listofhosts, string_types):
options.listofhosts = string_types.split(',')
return options
"""
# process tags
if hasattr(options, 'tags') and not options.tags:
# optparse defaults does not do what's expected
# More specifically, we want `--tags` to be additive. So we cannot
# simply change C.TAGS_RUN's default to ["all"] because then passing
# --tags foo would cause us to have ['all', 'foo']
options.tags = ['all']
if hasattr(options, 'tags') and options.tags:
tags = set()
for tag_set in options.tags:
for tag in tag_set.split(u','):
tags.add(tag.strip())
options.tags = list(tags)
# process skip_tags
if hasattr(options, 'skip_tags') and options.skip_tags:
skip_tags = set()
for tag_set in options.skip_tags:
for tag in tag_set.split(u','):
skip_tags.add(tag.strip())
options.skip_tags = list(skip_tags)
# process inventory options except for CLIs that require their own processing
if hasattr(options, 'inventory') and not self.SKIP_INVENTORY_DEFAULTS:
if options.inventory:
# should always be list
if isinstance(options.inventory, string_types):
options.inventory = [options.inventory]
# Ensure full paths when needed
options.inventory = [unfrackpath(opt, follow=False) if ',' not in opt else opt for opt in options.inventory]
else:
options.inventory = C.DEFAULT_HOST_LIST
# Dup args set on the root parser and sub parsers results in the root parser ignoring the args. e.g. doing
# 'ansible-galaxy -vvv init' has no verbosity set but 'ansible-galaxy init -vvv' sets a level of 3. To preserve
# back compat with pre-argparse changes we manually scan and set verbosity based on the argv values.
if self.parser.prog in ['ansible-galaxy', 'ansible-vault'] and not options.verbosity:
verbosity_arg = next(iter([arg for arg in self.args if arg.startswith('-v')]), None)
if verbosity_arg:
display.deprecated("Setting verbosity before the arg sub command is deprecated, set the verbosity "
"after the sub command", "2.13", collection_name='ansible.builtin')
options.verbosity = verbosity_arg.count('v')
return options
def parse(self):
"""Parse the command line args
This method parses the command line arguments. It uses the parser
stored in the self.parser attribute and saves the args and options in
context.CLIARGS.
Subclasses need to implement two helper methods, init_parser() and post_process_args() which
are called from this function before and after parsing the arguments.
"""
self.init_parser()
if HAS_ARGCOMPLETE:
argcomplete.autocomplete(self.parser)
try:
options = self.parser.parse_args(self.args[1:])
except SystemExit as e:
if(e.code != 0):
self.parser.exit(status=2, message=" \n%s " % self.parser.format_help())
raise
options = self.post_process_args(options)
context._init_global_context(options)
@staticmethod
def version_info(gitinfo=False):
''' return full ansible version info '''
if gitinfo:
# expensive call, user with care
ansible_version_string = opt_help.version()
else:
ansible_version_string = __version__
ansible_version = ansible_version_string.split()[0]
ansible_versions = ansible_version.split('.')
for counter in range(len(ansible_versions)):
if ansible_versions[counter] == "":
ansible_versions[counter] = 0
try:
ansible_versions[counter] = int(ansible_versions[counter])
except Exception:
pass
if len(ansible_versions) < 3:
for counter in range(len(ansible_versions), 3):
ansible_versions.append(0)
return {'string': ansible_version_string.strip(),
'full': ansible_version,
'major': ansible_versions[0],
'minor': ansible_versions[1],
'revision': ansible_versions[2]}
@staticmethod
def pager(text):
''' find reasonable way to display text '''
# this is a much simpler form of what is in pydoc.py
if not sys.stdout.isatty():
display.display(text, screen_only=True)
elif 'PAGER' in os.environ:
if sys.platform == 'win32':
display.display(text, screen_only=True)
else:
CLI.pager_pipe(text, os.environ['PAGER'])
else:
p = subprocess.Popen('less --version', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
if p.returncode == 0:
CLI.pager_pipe(text, 'less')
else:
display.display(text, screen_only=True)
@staticmethod
def pager_pipe(text, cmd):
''' pipe text through a pager '''
if 'LESS' not in os.environ:
os.environ['LESS'] = CLI.LESS_OPTS
try:
cmd = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=sys.stdout)
cmd.communicate(input=to_bytes(text))
except IOError:
pass
except KeyboardInterrupt:
pass
@staticmethod
def _play_prereqs():
options = context.CLIARGS
# all needs loader
loader = DataLoader()
basedir = options.get('basedir', False)
if basedir:
loader.set_basedir(basedir)
add_all_plugin_dirs(basedir)
AnsibleCollectionConfig.playbook_paths = basedir
default_collection = _get_collection_name_from_path(basedir)
if default_collection:
display.warning(u'running with default collection {0}'.format(default_collection))
AnsibleCollectionConfig.default_collection = default_collection
vault_ids = list(options['vault_ids'])
default_vault_ids = C.DEFAULT_VAULT_IDENTITY_LIST
vault_ids = default_vault_ids + vault_ids
vault_secrets = CLI.setup_vault_secrets(loader,
vault_ids=vault_ids,
vault_password_files=list(options['vault_password_files']),
ask_vault_pass=options['ask_vault_pass'],
auto_prompt=False)
loader.set_vault_secrets(vault_secrets)
# create the inventory, and filter it based on the subset specified (if any)
inventory = InventoryManager(loader=loader, sources=options['inventory'])
# create the variable manager, which will be shared throughout
# the code, ensuring a consistent view of global variables
variable_manager = VariableManager(loader=loader, inventory=inventory, version_info=CLI.version_info(gitinfo=False))
return loader, inventory, variable_manager
@staticmethod
def get_host_list(inventory, subset, pattern='all'):
no_hosts = False
if len(inventory.list_hosts()) == 0:
# Empty inventory
if C.LOCALHOST_WARNING and pattern not in C.LOCALHOST:
display.warning("provided hosts list is empty, only localhost is available. Note that the implicit localhost does not match 'all'")
no_hosts = True
inventory.subset(subset)
hosts = inventory.list_hosts(pattern)
if not hosts and no_hosts is False:
raise AnsibleError("Specified hosts and/or --limit does not match any hosts")
return hosts
|
gpl-3.0
|
baidu/Paddle
|
python/paddle/fluid/tests/unittests/test_mean_op.py
|
3
|
1839
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
import paddle.fluid.core as core
class TestMeanOp(OpTest):
def setUp(self):
self.op_type = "mean"
self.dtype = np.float32
self.init_dtype_type()
self.inputs = {'X': np.random.random((10, 10)).astype(self.dtype)}
self.outputs = {'Out': np.mean(self.inputs["X"])}
def init_dtype_type(self):
pass
def test_check_output(self):
self.check_output()
def test_checkout_grad(self):
self.check_grad(['X'], 'Out')
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFP16MeanOp(TestMeanOp):
def init_dtype_type(self):
self.dtype = np.float16
def test_check_output(self):
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=2e-3)
def test_checkout_grad(self):
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_grad_with_place(
place, ['X'], 'Out', max_relative_error=0.8)
if __name__ == "__main__":
unittest.main()
|
apache-2.0
|
yewang15215/django
|
django/contrib/flatpages/migrations/0001_initial.py
|
308
|
1775
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='FlatPage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('url', models.CharField(max_length=100, verbose_name='URL', db_index=True)),
('title', models.CharField(max_length=200, verbose_name='title')),
('content', models.TextField(verbose_name='content', blank=True)),
('enable_comments', models.BooleanField(default=False, verbose_name='enable comments')),
('template_name', models.CharField(
help_text=(
"Example: 'flatpages/contact_page.html'. If this isn't provided, the system will use "
"'flatpages/default.html'."
), max_length=70, verbose_name='template name', blank=True
)),
('registration_required', models.BooleanField(
default=False, help_text='If this is checked, only logged-in users will be able to view the page.',
verbose_name='registration required'
)),
('sites', models.ManyToManyField(to='sites.Site', verbose_name='sites')),
],
options={
'ordering': ('url',),
'db_table': 'django_flatpage',
'verbose_name': 'flat page',
'verbose_name_plural': 'flat pages',
},
bases=(models.Model,),
),
]
|
bsd-3-clause
|
pbrod/scipy
|
scipy/sparse/dia.py
|
5
|
13114
|
"""Sparse DIAgonal format"""
from __future__ import division, print_function, absolute_import
__docformat__ = "restructuredtext en"
__all__ = ['dia_matrix', 'isspmatrix_dia']
import numpy as np
from .base import isspmatrix, _formats, spmatrix
from .data import _data_matrix
from .sputils import (isshape, upcast_char, getdtype, get_index_dtype,
get_sum_dtype, validateaxis)
from ._sparsetools import dia_matvec
class dia_matrix(_data_matrix):
"""Sparse matrix with DIAgonal storage
This can be instantiated in several ways:
dia_matrix(D)
with a dense matrix
dia_matrix(S)
with another sparse matrix S (equivalent to S.todia())
dia_matrix((M, N), [dtype])
to construct an empty matrix with shape (M, N),
dtype is optional, defaulting to dtype='d'.
dia_matrix((data, offsets), shape=(M, N))
where the ``data[k,:]`` stores the diagonal entries for
diagonal ``offsets[k]`` (See example below)
Attributes
----------
dtype : dtype
Data type of the matrix
shape : 2-tuple
Shape of the matrix
ndim : int
Number of dimensions (this is always 2)
nnz
Number of nonzero elements
data
DIA format data array of the matrix
offsets
DIA format offset array of the matrix
Notes
-----
Sparse matrices can be used in arithmetic operations: they support
addition, subtraction, multiplication, division, and matrix power.
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import dia_matrix
>>> dia_matrix((3, 4), dtype=np.int8).toarray()
array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]], dtype=int8)
>>> data = np.array([[1, 2, 3, 4]]).repeat(3, axis=0)
>>> offsets = np.array([0, -1, 2])
>>> dia_matrix((data, offsets), shape=(4, 4)).toarray()
array([[1, 0, 3, 0],
[1, 2, 0, 4],
[0, 2, 3, 0],
[0, 0, 3, 4]])
"""
format = 'dia'
def __init__(self, arg1, shape=None, dtype=None, copy=False):
_data_matrix.__init__(self)
if isspmatrix_dia(arg1):
if copy:
arg1 = arg1.copy()
self.data = arg1.data
self.offsets = arg1.offsets
self.shape = arg1.shape
elif isspmatrix(arg1):
if isspmatrix_dia(arg1) and copy:
A = arg1.copy()
else:
A = arg1.todia()
self.data = A.data
self.offsets = A.offsets
self.shape = A.shape
elif isinstance(arg1, tuple):
if isshape(arg1):
# It's a tuple of matrix dimensions (M, N)
# create empty matrix
self.shape = arg1 # spmatrix checks for errors here
self.data = np.zeros((0,0), getdtype(dtype, default=float))
idx_dtype = get_index_dtype(maxval=max(self.shape))
self.offsets = np.zeros((0), dtype=idx_dtype)
else:
try:
# Try interpreting it as (data, offsets)
data, offsets = arg1
except:
raise ValueError('unrecognized form for dia_matrix constructor')
else:
if shape is None:
raise ValueError('expected a shape argument')
self.data = np.atleast_2d(np.array(arg1[0], dtype=dtype, copy=copy))
self.offsets = np.atleast_1d(np.array(arg1[1],
dtype=get_index_dtype(maxval=max(shape)),
copy=copy))
self.shape = shape
else:
#must be dense, convert to COO first, then to DIA
try:
arg1 = np.asarray(arg1)
except:
raise ValueError("unrecognized form for"
" %s_matrix constructor" % self.format)
from .coo import coo_matrix
A = coo_matrix(arg1, dtype=dtype, shape=shape).todia()
self.data = A.data
self.offsets = A.offsets
self.shape = A.shape
if dtype is not None:
self.data = self.data.astype(dtype)
#check format
if self.offsets.ndim != 1:
raise ValueError('offsets array must have rank 1')
if self.data.ndim != 2:
raise ValueError('data array must have rank 2')
if self.data.shape[0] != len(self.offsets):
raise ValueError('number of diagonals (%d) '
'does not match the number of offsets (%d)'
% (self.data.shape[0], len(self.offsets)))
if len(np.unique(self.offsets)) != len(self.offsets):
raise ValueError('offset array contains duplicate values')
def __repr__(self):
format = _formats[self.getformat()][1]
return "<%dx%d sparse matrix of type '%s'\n" \
"\twith %d stored elements (%d diagonals) in %s format>" % \
(self.shape + (self.dtype.type, self.nnz, self.data.shape[0],
format))
def _data_mask(self):
"""Returns a mask of the same shape as self.data, where
mask[i,j] is True when data[i,j] corresponds to a stored element."""
num_rows, num_cols = self.shape
offset_inds = np.arange(self.data.shape[1])
row = offset_inds - self.offsets[:,None]
mask = (row >= 0)
mask &= (row < num_rows)
mask &= (offset_inds < num_cols)
return mask
def count_nonzero(self):
mask = self._data_mask()
return np.count_nonzero(self.data[mask])
def getnnz(self, axis=None):
if axis is not None:
raise NotImplementedError("getnnz over an axis is not implemented "
"for DIA format")
M,N = self.shape
nnz = 0
for k in self.offsets:
if k > 0:
nnz += min(M,N-k)
else:
nnz += min(M+k,N)
return int(nnz)
getnnz.__doc__ = spmatrix.getnnz.__doc__
count_nonzero.__doc__ = spmatrix.count_nonzero.__doc__
def sum(self, axis=None, dtype=None, out=None):
validateaxis(axis)
if axis is not None and axis < 0:
axis += 2
res_dtype = get_sum_dtype(self.dtype)
num_rows, num_cols = self.shape
ret = None
if axis == 0:
mask = self._data_mask()
x = (self.data * mask).sum(axis=0)
if x.shape[0] == num_cols:
res = x
else:
res = np.zeros(num_cols, dtype=x.dtype)
res[:x.shape[0]] = x
ret = np.matrix(res, dtype=res_dtype)
else:
row_sums = np.zeros(num_rows, dtype=res_dtype)
one = np.ones(num_cols, dtype=res_dtype)
dia_matvec(num_rows, num_cols, len(self.offsets),
self.data.shape[1], self.offsets, self.data, one, row_sums)
row_sums = np.matrix(row_sums)
if axis is None:
return row_sums.sum(dtype=dtype, out=out)
if axis is not None:
row_sums = row_sums.T
ret = np.matrix(row_sums.sum(axis=axis))
if out is not None and out.shape != ret.shape:
raise ValueError("dimensions do not match")
return ret.sum(axis=(), dtype=dtype, out=out)
sum.__doc__ = spmatrix.sum.__doc__
def _mul_vector(self, other):
x = other
y = np.zeros(self.shape[0], dtype=upcast_char(self.dtype.char,
x.dtype.char))
L = self.data.shape[1]
M,N = self.shape
dia_matvec(M,N, len(self.offsets), L, self.offsets, self.data, x.ravel(), y.ravel())
return y
def _mul_multimatrix(self, other):
return np.hstack([self._mul_vector(col).reshape(-1,1) for col in other.T])
def _setdiag(self, values, k=0):
M, N = self.shape
if values.ndim == 0:
# broadcast
values_n = np.inf
else:
values_n = len(values)
if k < 0:
n = min(M + k, N, values_n)
min_index = 0
max_index = n
else:
n = min(M, N - k, values_n)
min_index = k
max_index = k + n
if values.ndim != 0:
# allow also longer sequences
values = values[:n]
if k in self.offsets:
self.data[self.offsets == k, min_index:max_index] = values
else:
self.offsets = np.append(self.offsets, self.offsets.dtype.type(k))
m = max(max_index, self.data.shape[1])
data = np.zeros((self.data.shape[0]+1, m), dtype=self.data.dtype)
data[:-1,:self.data.shape[1]] = self.data
data[-1, min_index:max_index] = values
self.data = data
def todia(self, copy=False):
if copy:
return self.copy()
else:
return self
todia.__doc__ = spmatrix.todia.__doc__
def transpose(self, axes=None, copy=False):
if axes is not None:
raise ValueError(("Sparse matrices do not support "
"an 'axes' parameter because swapping "
"dimensions is the only logical permutation."))
num_rows, num_cols = self.shape
max_dim = max(self.shape)
# flip diagonal offsets
offsets = -self.offsets
# re-align the data matrix
r = np.arange(len(offsets), dtype=np.intc)[:, None]
c = np.arange(num_rows, dtype=np.intc) - (offsets % max_dim)[:, None]
pad_amount = max(0, max_dim-self.data.shape[1])
data = np.hstack((self.data, np.zeros((self.data.shape[0], pad_amount),
dtype=self.data.dtype)))
data = data[r, c]
return dia_matrix((data, offsets), shape=(
num_cols, num_rows), copy=copy)
transpose.__doc__ = spmatrix.transpose.__doc__
def diagonal(self):
idx, = np.where(self.offsets == 0)
n = min(self.shape)
if idx.size == 0:
return np.zeros(n, dtype=self.data.dtype)
return self.data[idx[0],:n]
diagonal.__doc__ = spmatrix.diagonal.__doc__
def tocsc(self, copy=False):
from .csc import csc_matrix
if self.nnz == 0:
return csc_matrix(self.shape, dtype=self.dtype)
num_rows, num_cols = self.shape
num_offsets, offset_len = self.data.shape
offset_inds = np.arange(offset_len)
row = offset_inds - self.offsets[:,None]
mask = (row >= 0)
mask &= (row < num_rows)
mask &= (offset_inds < num_cols)
mask &= (self.data != 0)
idx_dtype = get_index_dtype(maxval=max(self.shape))
indptr = np.zeros(num_cols + 1, dtype=idx_dtype)
indptr[1:offset_len+1] = np.cumsum(mask.sum(axis=0))
indptr[offset_len+1:] = indptr[offset_len]
indices = row.T[mask.T].astype(idx_dtype, copy=False)
data = self.data.T[mask.T]
return csc_matrix((data, indices, indptr), shape=self.shape,
dtype=self.dtype)
tocsc.__doc__ = spmatrix.tocsc.__doc__
def tocoo(self, copy=False):
num_rows, num_cols = self.shape
num_offsets, offset_len = self.data.shape
offset_inds = np.arange(offset_len)
row = offset_inds - self.offsets[:,None]
mask = (row >= 0)
mask &= (row < num_rows)
mask &= (offset_inds < num_cols)
mask &= (self.data != 0)
row = row[mask]
col = np.tile(offset_inds, num_offsets)[mask.ravel()]
data = self.data[mask]
from .coo import coo_matrix
A = coo_matrix((data,(row,col)), shape=self.shape, dtype=self.dtype)
A.has_canonical_format = True
return A
tocoo.__doc__ = spmatrix.tocoo.__doc__
# needed by _data_matrix
def _with_data(self, data, copy=True):
"""Returns a matrix with the same sparsity structure as self,
but with different data. By default the structure arrays are copied.
"""
if copy:
return dia_matrix((data, self.offsets.copy()), shape=self.shape)
else:
return dia_matrix((data,self.offsets), shape=self.shape)
def isspmatrix_dia(x):
"""Is x of dia_matrix type?
Parameters
----------
x
object to check for being a dia matrix
Returns
-------
bool
True if x is a dia matrix, False otherwise
Examples
--------
>>> from scipy.sparse import dia_matrix, isspmatrix_dia
>>> isspmatrix_dia(dia_matrix([[5]]))
True
>>> from scipy.sparse import dia_matrix, csr_matrix, isspmatrix_dia
>>> isspmatrix_dia(csr_matrix([[5]]))
False
"""
return isinstance(x, dia_matrix)
|
bsd-3-clause
|
Microsoft/PTVS
|
Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/adodbapi/is64bit.py
|
8
|
1226
|
"""is64bit.Python() --> boolean value of detected Python word size. is64bit.os() --> os build version"""
import sys
def Python():
if sys.platform == 'cli': #IronPython
import System
return System.IntPtr.Size == 8
else:
try:
return sys.maxsize > 2147483647
except AttributeError:
return sys.maxint > 2147483647
def os():
import platform
pm = platform.machine()
if pm != '..' and pm.endswith('64'): # recent Python (not Iron)
return True
else:
import os
if 'PROCESSOR_ARCHITEW6432' in os.environ:
return True # 32 bit program running on 64 bit Windows
try:
return os.environ['PROCESSOR_ARCHITECTURE'].endswith('64') # 64 bit Windows 64 bit program
except IndexError:
pass # not Windows
try:
return '64' in platform.architecture()[0] # this often works in Linux
except:
return False # is an older version of Python, assume also an older os (best we can guess)
if __name__ == "__main__":
print(("is64bit.Python() =", Python(), "is64bit.os() =", os()))
|
apache-2.0
|
hujiajie/chromium-crosswalk
|
third_party/bintrees/bintrees/bintree.py
|
156
|
4928
|
#!/usr/bin/env python
#coding:utf-8
# Author: mozman
# Purpose: binary tree module
# Created: 28.04.2010
# Copyright (c) 2010-2013 by Manfred Moitzi
# License: MIT License
from __future__ import absolute_import
from .treemixin import TreeMixin
__all__ = ['BinaryTree']
class Node(object):
""" Internal object, represents a treenode """
__slots__ = ['key', 'value', 'left', 'right']
def __init__(self, key, value):
self.key = key
self.value = value
self.left = None
self.right = None
def __getitem__(self, key):
""" x.__getitem__(key) <==> x[key], where key is 0 (left) or 1 (right) """
return self.left if key == 0 else self.right
def __setitem__(self, key, value):
""" x.__setitem__(key, value) <==> x[key]=value, where key is 0 (left) or 1 (right) """
if key == 0:
self.left = value
else:
self.right = value
def free(self):
""" Set references to None """
self.left = None
self.right = None
self.value = None
self.key = None
class BinaryTree(TreeMixin):
"""
BinaryTree implements an unbalanced binary tree with a dict-like interface.
see: http://en.wikipedia.org/wiki/Binary_tree
A binary tree is a tree data structure in which each node has at most two
children.
BinaryTree() -> new empty tree.
BinaryTree(mapping,) -> new tree initialized from a mapping
BinaryTree(seq) -> new tree initialized from seq [(k1, v1), (k2, v2), ... (kn, vn)]
see also TreeMixin() class.
"""
def __init__(self, items=None):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signature """
self._root = None
self._count = 0
if items is not None:
self.update(items)
def clear(self):
""" T.clear() -> None. Remove all items from T. """
def _clear(node):
if node is not None:
_clear(node.left)
_clear(node.right)
node.free()
_clear(self._root)
self._count = 0
self._root = None
@property
def root(self):
""" root node of T """
return self._root
@property
def count(self):
""" count of items """
return self._count
def _new_node(self, key, value):
""" Create a new tree node. """
self._count += 1
return Node(key, value)
def insert(self, key, value):
""" T.insert(key, value) <==> T[key] = value, insert key, value into Tree """
if self._root is None:
self._root = self._new_node(key, value)
else:
parent = None
direction = 0
node = self._root
while True:
if node is None:
parent[direction] = self._new_node(key, value)
break
if key == node.key:
node.value = value # replace value
break
else:
parent = node
direction = 0 if key <= node.key else 1
node = node[direction]
def remove(self, key):
""" T.remove(key) <==> del T[key], remove item <key> from tree """
node = self._root
if node is None:
raise KeyError(str(key))
else:
parent = None
direction = 0
while True:
if key == node.key:
# remove node
if (node.left is not None) and (node.right is not None):
# find replacment node: smallest key in right-subtree
parent = node
direction = 1
replacement = node.right
while replacement.left is not None:
parent = replacement
direction = 0
replacement = replacement.left
parent[direction] = replacement.right
#swap places
node.key = replacement.key
node.value = replacement.value
node = replacement # delete replacement!
else:
down_dir = 1 if node.left is None else 0
if parent is None: # root
self._root = node[down_dir]
else:
parent[direction] = node[down_dir]
node.free()
self._count -= 1
break
else:
direction = 0 if key < node.key else 1
parent = node
node = node[direction]
if node is None:
raise KeyError(str(key))
|
bsd-3-clause
|
pimentech/django-mongoforms
|
setup.py
|
1
|
1754
|
from setuptools import setup, find_packages
from setuptools.command.test import test
class TestRunner(test):
def run(self):
if self.distribution.install_requires:
self.distribution.fetch_build_eggs(
self.distribution.install_requires)
if self.distribution.tests_require:
self.distribution.fetch_build_eggs(self.distribution.tests_require)
import sys
sys.path.insert(0, 'testprj')
from testprj import settings as test_settings
from django.conf import settings
settings.configure(test_settings)
from testprj.tests import mongoforms_test_runner as test_runner
test_suite = test_runner.build_suite(['testapp'])
test_runner.setup_test_environment()
result = test_runner.run_suite(test_suite)
test_runner.teardown_test_environment()
return result
setup(
name='django-mongoforms',
version='0.2.3',
description='A Django-ModelForm clone for mongoengine',
author='Stephan Jaekel',
author_email='[email protected]',
maintainer='Serge Matveenko',
maintainer_email='[email protected]',
url='http://github.com/stephrdev/django-mongoforms/',
packages=find_packages(
exclude=['examples', 'examples.*', 'testprj', 'testprj.*']),
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
],
zip_safe=False,
cmdclass={"test": TestRunner},
requires=['Django', 'mongoengine(>=0.6)', 'pymongo(>=2.1)']
)
|
bsd-3-clause
|
nicolasgallardo/TECHLAV_T1-6
|
bebop_ws/devel/lib/python2.7/dist-packages/bebop_msgs/msg/_Ardrone3PilotingStateAltitudeChanged.py
|
1
|
6388
|
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from bebop_msgs/Ardrone3PilotingStateAltitudeChanged.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import std_msgs.msg
class Ardrone3PilotingStateAltitudeChanged(genpy.Message):
_md5sum = "5073f650d09c8192d358641b48a0204b"
_type = "bebop_msgs/Ardrone3PilotingStateAltitudeChanged"
_has_header = True #flag to mark the presence of a Header object
_full_text = """# Ardrone3PilotingStateAltitudeChanged
# auto-generated from https://raw.githubusercontent.com/Parrot-Developers/libARCommands/7e2f55fafcd45ba2380ca2574a08b7359c005f47/Xml/ARDrone3_commands.xml
# Do not modify this file by hand. Check scripts/meta folder for generator files.
#
# SDK Comment: Drone altitude changed
Header header
# Altitude in meters
float64 altitude
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
"""
__slots__ = ['header','altitude']
_slot_types = ['std_msgs/Header','float64']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,altitude
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(Ardrone3PilotingStateAltitudeChanged, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.altitude is None:
self.altitude = 0.
else:
self.header = std_msgs.msg.Header()
self.altitude = 0.
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_struct_d.pack(self.altitude))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
start = end
end += 8
(self.altitude,) = _struct_d.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_struct_d.pack(self.altitude))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
start = end
end += 8
(self.altitude,) = _struct_d.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_3I = struct.Struct("<3I")
_struct_d = struct.Struct("<d")
|
gpl-2.0
|
keceli/RMG-Java
|
databases/RMG_database/kinetics_libraries/Dooley/C1/remove_unused_species.py
|
11
|
1403
|
#!/usr/bin/env python
# encoding: utf-8
"""
remove_unused_species.py
Created by Richard West on 2011-03-10.
Copyright (c) 2011 MIT. All rights reserved.
"""
import sys
import os
import re
import fileinput
species = set()
for line in fileinput.input(('reactions.txt','pdepreactions.txt')):
if (line.find(' = ') == -1):
continue
if (line.strip().startswith('//')):
continue
line = line.replace("(+M)","")
reactants, products = line.split(' = ')
products, junk = products.split(None, 1)
combined = "%s+%s"%(reactants,products)
for s in combined.split('+'):
species.add(s.strip())
print "These %d species listed in reactions.txt and pdepreactions.txt" % len(species)
for s in species:
print s
print "Copying the species.txt file, removing redundant species"
outfile = file('species.new.txt','w')
infile = file('species.txt')
for line in infile:
if (line.strip().startswith('//')):
continue
if (line.strip()==''):
continue
s = line.strip()
try:
if (s in species):
while (line.strip()!=''):
outfile.write(line.strip()+'\n')
line = infile.next()
outfile.write('\n')
else:
print "Skipping %s"%s
while (line.strip()!=''):
line = infile.next()
except StopIteration:
break
outfile.close()
|
mit
|
subramani95/neutron
|
neutron/plugins/vmware/extensions/lsn.py
|
54
|
2634
|
# Copyright 2014 VMware, Inc.
#
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from neutron.api import extensions
from neutron.api.v2 import base
from neutron import manager
EXT_ALIAS = 'lsn'
COLLECTION_NAME = "%ss" % EXT_ALIAS
RESOURCE_ATTRIBUTE_MAP = {
COLLECTION_NAME: {
'network': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'is_visible': True},
'report': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:string': None}, 'is_visible': True},
},
}
class Lsn(object):
"""Enable LSN configuration for Neutron NSX networks."""
@classmethod
def get_name(cls):
return "Logical Service Node configuration"
@classmethod
def get_alias(cls):
return EXT_ALIAS
@classmethod
def get_description(cls):
return "Enables configuration of NSX Logical Services Node."
@classmethod
def get_namespace(cls):
return "http://docs.openstack.org/ext/%s/api/v2.0" % EXT_ALIAS
@classmethod
def get_updated(cls):
return "2013-10-05T10:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
exts = []
plugin = manager.NeutronManager.get_plugin()
resource_name = EXT_ALIAS
collection_name = resource_name.replace('_', '-') + "s"
params = RESOURCE_ATTRIBUTE_MAP.get(COLLECTION_NAME, dict())
controller = base.create_resource(collection_name,
resource_name,
plugin, params, allow_bulk=False)
ex = extensions.ResourceExtension(collection_name, controller)
exts.append(ex)
return exts
def get_extended_resources(self, version):
if version == "2.0":
return RESOURCE_ATTRIBUTE_MAP
else:
return {}
|
apache-2.0
|
hmen89/odoo
|
addons/hr_payroll/wizard/__init__.py
|
442
|
1159
|
#-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_payroll_payslips_by_employees
import hr_payroll_contribution_register_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
Lightmatter/django-inlineformfield
|
.tox/py27/lib/python2.7/site-packages/django/utils/2to3_fixes/fix_unicode.py
|
349
|
1181
|
"""Fixer for __unicode__ methods.
Uses the django.utils.encoding.python_2_unicode_compatible decorator.
"""
from __future__ import unicode_literals
from lib2to3 import fixer_base
from lib2to3.fixer_util import find_indentation, Name, syms, touch_import
from lib2to3.pgen2 import token
from lib2to3.pytree import Leaf, Node
class FixUnicode(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
classdef< 'class' any+ ':'
suite< any*
funcdef< 'def' unifunc='__unicode__'
parameters< '(' NAME ')' > any+ >
any* > >
"""
def transform(self, node, results):
unifunc = results["unifunc"]
strfunc = Name("__str__", prefix=unifunc.prefix)
unifunc.replace(strfunc)
klass = node.clone()
klass.prefix = '\n' + find_indentation(node)
decorator = Node(syms.decorator, [Leaf(token.AT, "@"), Name('python_2_unicode_compatible')])
decorated = Node(syms.decorated, [decorator, klass], prefix=node.prefix)
node.replace(decorated)
touch_import('django.utils.encoding', 'python_2_unicode_compatible', decorated)
|
mit
|
RuiNascimento/krepo
|
script.module.lambdascrapers/lib/lambdascrapers/sources_ lambdascrapers/en_DebridOnly/sceper.py
|
2
|
6849
|
# -*- coding: utf-8 -*-
'''
Yoda Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import debrid
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['sceper.ws','sceper.unblocked.pro']
self.base_link = 'https://sceper.unblocked.pro'
self.search_link = '/search/%s/feed/rss2/'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = {'imdb': imdb, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if debrid.status() == False: raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
url = self.search_link % urllib.quote_plus(query)
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
posts = client.parseDOM(r, 'item')
hostDict = hostprDict
items = []
for post in posts:
try:
t = client.parseDOM(post, 'title')[0]
c = client.parseDOM(post, 'content.+?')[0]
s = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', c)
s = s[0] if s else '0'
u = zip(client.parseDOM(c, 'a', ret='href'), client.parseDOM(c, 'a'))
u = [(i[0], i[1], re.findall('PT(\d+)$', i[1])) for i in u]
u = [(i[0], i[1]) for i in u if not i[2]]
if 'tvshowtitle' in data:
u = [([x for x in i[0].strip('//').split('/')][-1], i[0]) for i in u]
else:
u = [(t, i[0], s) for i in u]
items += u
except:
pass
for item in items:
try:
name = item[0]
name = client.replaceHTMLCodes(name)
t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name)
if not cleantitle.get(t) == cleantitle.get(title): raise Exception()
y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()
if not y == hdlr: raise Exception()
fmt = re.sub('(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper())
fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
fmt = [i.lower() for i in fmt]
if any(i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt): raise Exception()
if any(i in ['extras'] for i in fmt): raise Exception()
if '1080p' in fmt: quality = '1080p'
elif '720p' in fmt: quality = 'HD'
else: quality = 'SD'
if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt): quality = 'SCR'
elif any(i in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'] for i in fmt): quality = 'CAM'
info = []
if '3d' in fmt: info.append('3D')
try:
size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', item[2])[-1]
div = 1 if size.endswith(('GB', 'GiB')) else 1024
size = float(re.sub('[^0-9|/.|/,]', '', size))/div
size = '%.2f GB' % size
info.append(size)
except:
pass
if any(i in ['hevc', 'h265', 'x265'] for i in fmt): info.append('HEVC')
info = ' | '.join(info)
url = item[1]
if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception()
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
if not host in hostDict: raise Exception()
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True})
except:
pass
check = [i for i in sources if not i['quality'] == 'CAM']
if check: sources = check
return sources
except:
return sources
def resolve(self, url):
return url
|
gpl-2.0
|
GenoaIO/ews-python-suds
|
src/suds/umx/typed.py
|
208
|
4646
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
Provides typed unmarshaller classes.
"""
from logging import getLogger
from suds import *
from suds.umx import *
from suds.umx.core import Core
from suds.resolver import NodeResolver, Frame
from suds.sudsobject import Factory
log = getLogger(__name__)
#
# Add typed extensions
# type = The expected xsd type
# real = The 'true' XSD type
#
Content.extensions.append('type')
Content.extensions.append('real')
class Typed(Core):
"""
A I{typed} XML unmarshaller
@ivar resolver: A schema type resolver.
@type resolver: L{NodeResolver}
"""
def __init__(self, schema):
"""
@param schema: A schema object.
@type schema: L{xsd.schema.Schema}
"""
self.resolver = NodeResolver(schema)
def process(self, node, type):
"""
Process an object graph representation of the xml L{node}.
@param node: An XML tree.
@type node: L{sax.element.Element}
@param type: The I{optional} schema type.
@type type: L{xsd.sxbase.SchemaObject}
@return: A suds object.
@rtype: L{Object}
"""
content = Content(node)
content.type = type
return Core.process(self, content)
def reset(self):
log.debug('reset')
self.resolver.reset()
def start(self, content):
#
# Resolve to the schema type; build an object and setup metadata.
#
if content.type is None:
found = self.resolver.find(content.node)
if found is None:
log.error(self.resolver.schema)
raise TypeNotFound(content.node.qname())
content.type = found
else:
known = self.resolver.known(content.node)
frame = Frame(content.type, resolved=known)
self.resolver.push(frame)
real = self.resolver.top().resolved
content.real = real
cls_name = real.name
if cls_name is None:
cls_name = content.node.name
content.data = Factory.object(cls_name)
md = content.data.__metadata__
md.sxtype = real
def end(self, content):
self.resolver.pop()
def unbounded(self, content):
return content.type.unbounded()
def nillable(self, content):
resolved = content.type.resolve()
return ( content.type.nillable or \
(resolved.builtin() and resolved.nillable ) )
def append_attribute(self, name, value, content):
"""
Append an attribute name/value into L{Content.data}.
@param name: The attribute name
@type name: basestring
@param value: The attribute's value
@type value: basestring
@param content: The current content being unmarshalled.
@type content: L{Content}
"""
type = self.resolver.findattr(name)
if type is None:
log.warn('attribute (%s) type, not-found', name)
else:
value = self.translated(value, type)
Core.append_attribute(self, name, value, content)
def append_text(self, content):
"""
Append text nodes into L{Content.data}
Here is where the I{true} type is used to translate the value
into the proper python type.
@param content: The current content being unmarshalled.
@type content: L{Content}
"""
Core.append_text(self, content)
known = self.resolver.top().resolved
content.text = self.translated(content.text, known)
def translated(self, value, type):
""" translate using the schema type """
if value is not None:
resolved = type.resolve()
return resolved.translate(value)
else:
return value
|
bsd-2-clause
|
epssy/hue
|
desktop/core/ext-py/kazoo-2.0/kazoo/tests/test_connection.py
|
35
|
9658
|
from collections import namedtuple
import os
import errno
import threading
import time
import uuid
import struct
from nose import SkipTest
from nose.tools import eq_
from nose.tools import raises
import mock
from kazoo.exceptions import ConnectionLoss
from kazoo.protocol.serialization import (
Connect,
int_struct,
write_string,
)
from kazoo.protocol.states import KazooState
from kazoo.protocol.connection import _CONNECTION_DROP
from kazoo.testing import KazooTestCase
from kazoo.tests.util import wait
from kazoo.tests.util import TRAVIS_ZK_VERSION
class Delete(namedtuple('Delete', 'path version')):
type = 2
def serialize(self):
b = bytearray()
b.extend(write_string(self.path))
b.extend(int_struct.pack(self.version))
return b
@classmethod
def deserialize(self, bytes, offset):
raise ValueError("oh my")
class TestConnectionHandler(KazooTestCase):
def test_bad_deserialization(self):
async_object = self.client.handler.async_result()
self.client._queue.append((Delete(self.client.chroot, -1), async_object))
os.write(self.client._connection._write_pipe, b'\0')
@raises(ValueError)
def testit():
async_object.get()
testit()
def test_with_bad_sessionid(self):
ev = threading.Event()
def expired(state):
if state == KazooState.CONNECTED:
ev.set()
password = os.urandom(16)
client = self._get_client(client_id=(82838284824, password))
client.add_listener(expired)
client.start()
try:
ev.wait(15)
eq_(ev.is_set(), True)
finally:
client.stop()
def test_connection_read_timeout(self):
client = self.client
ev = threading.Event()
path = "/" + uuid.uuid4().hex
handler = client.handler
_select = handler.select
_socket = client._connection._socket
def delayed_select(*args, **kwargs):
result = _select(*args, **kwargs)
if len(args[0]) == 1 and _socket in args[0]:
# for any socket read, simulate a timeout
return [], [], []
return result
def back(state):
if state == KazooState.CONNECTED:
ev.set()
client.add_listener(back)
client.create(path, b"1")
try:
handler.select = delayed_select
self.assertRaises(ConnectionLoss, client.get, path)
finally:
handler.select = _select
# the client reconnects automatically
ev.wait(5)
eq_(ev.is_set(), True)
eq_(client.get(path)[0], b"1")
def test_connection_write_timeout(self):
client = self.client
ev = threading.Event()
path = "/" + uuid.uuid4().hex
handler = client.handler
_select = handler.select
_socket = client._connection._socket
def delayed_select(*args, **kwargs):
result = _select(*args, **kwargs)
if _socket in args[1]:
# for any socket write, simulate a timeout
return [], [], []
return result
def back(state):
if state == KazooState.CONNECTED:
ev.set()
client.add_listener(back)
try:
handler.select = delayed_select
self.assertRaises(ConnectionLoss, client.create, path)
finally:
handler.select = _select
# the client reconnects automatically
ev.wait(5)
eq_(ev.is_set(), True)
eq_(client.exists(path), None)
def test_connection_deserialize_fail(self):
client = self.client
ev = threading.Event()
path = "/" + uuid.uuid4().hex
handler = client.handler
_select = handler.select
_socket = client._connection._socket
def delayed_select(*args, **kwargs):
result = _select(*args, **kwargs)
if _socket in args[1]:
# for any socket write, simulate a timeout
return [], [], []
return result
def back(state):
if state == KazooState.CONNECTED:
ev.set()
client.add_listener(back)
deserialize_ev = threading.Event()
def bad_deserialize(bytes, offset):
deserialize_ev.set()
raise struct.error()
# force the connection to die but, on reconnect, cause the
# server response to be non-deserializable. ensure that the client
# continues to retry. This partially reproduces a rare bug seen
# in production.
with mock.patch.object(Connect, 'deserialize') as mock_deserialize:
mock_deserialize.side_effect = bad_deserialize
try:
handler.select = delayed_select
self.assertRaises(ConnectionLoss, client.create, path)
finally:
handler.select = _select
# the client reconnects automatically but the first attempt will
# hit a deserialize failure. wait for that.
deserialize_ev.wait(5)
eq_(deserialize_ev.is_set(), True)
# this time should succeed
ev.wait(5)
eq_(ev.is_set(), True)
eq_(client.exists(path), None)
def test_connection_close(self):
self.assertRaises(Exception, self.client.close)
self.client.stop()
self.client.close()
# should be able to restart
self.client.start()
def test_connection_pipe(self):
client = self.client
read_pipe = client._connection._read_pipe
write_pipe = client._connection._write_pipe
assert read_pipe is not None
assert write_pipe is not None
# stop client and pipe should not yet be closed
client.stop()
assert read_pipe is not None
assert write_pipe is not None
os.fstat(read_pipe)
os.fstat(write_pipe)
# close client, and pipes should be
client.close()
try:
os.fstat(read_pipe)
except OSError as e:
if not e.errno == errno.EBADF:
raise
else:
self.fail("Expected read_pipe to be closed")
try:
os.fstat(write_pipe)
except OSError as e:
if not e.errno == errno.EBADF:
raise
else:
self.fail("Expected write_pipe to be closed")
# start client back up. should get a new, valid pipe
client.start()
read_pipe = client._connection._read_pipe
write_pipe = client._connection._write_pipe
assert read_pipe is not None
assert write_pipe is not None
os.fstat(read_pipe)
os.fstat(write_pipe)
def test_dirty_pipe(self):
client = self.client
read_pipe = client._connection._read_pipe
write_pipe = client._connection._write_pipe
# add a stray byte to the pipe and ensure that doesn't
# blow up client. simulates case where some error leaves
# a byte in the pipe which doesn't correspond to the
# request queue.
os.write(write_pipe, b'\0')
# eventually this byte should disappear from pipe
wait(lambda: client.handler.select([read_pipe], [], [], 0)[0] == [])
class TestConnectionDrop(KazooTestCase):
def test_connection_dropped(self):
ev = threading.Event()
def back(state):
if state == KazooState.CONNECTED:
ev.set()
# create a node with a large value and stop the ZK node
path = "/" + uuid.uuid4().hex
self.client.create(path)
self.client.add_listener(back)
result = self.client.set_async(path, b'a' * 1000 * 1024)
self.client._call(_CONNECTION_DROP, None)
self.assertRaises(ConnectionLoss, result.get)
# we have a working connection to a new node
ev.wait(30)
eq_(ev.is_set(), True)
class TestReadOnlyMode(KazooTestCase):
def setUp(self):
self.setup_zookeeper(read_only=True)
skip = False
if TRAVIS_ZK_VERSION and TRAVIS_ZK_VERSION < (3, 4):
skip = True
elif TRAVIS_ZK_VERSION and TRAVIS_ZK_VERSION >= (3, 4):
skip = False
else:
ver = self.client.server_version()
if ver[1] < 4:
skip = True
if skip:
raise SkipTest("Must use Zookeeper 3.4 or above")
def tearDown(self):
self.client.stop()
def test_read_only(self):
from kazoo.exceptions import NotReadOnlyCallError
from kazoo.protocol.states import KeeperState
client = self.client
states = []
ev = threading.Event()
@client.add_listener
def listen(state):
states.append(state)
if client.client_state == KeeperState.CONNECTED_RO:
ev.set()
try:
self.cluster[1].stop()
self.cluster[2].stop()
ev.wait(6)
eq_(ev.is_set(), True)
eq_(client.client_state, KeeperState.CONNECTED_RO)
# Test read only command
eq_(client.get_children('/'), [])
# Test error with write command
@raises(NotReadOnlyCallError)
def testit():
client.create('/fred')
testit()
# Wait for a ping
time.sleep(15)
finally:
client.remove_listener(listen)
self.cluster[1].run()
self.cluster[2].run()
|
apache-2.0
|
danielhrisca/asammdf
|
asammdf/blocks/mdf_v4.py
|
1
|
395012
|
"""
ASAM MDF version 4 file format module
"""
import bisect
from collections import defaultdict
from functools import lru_cache
from hashlib import md5
import logging
from math import ceil
import mmap
import os
from pathlib import Path
import shutil
import sys
from tempfile import gettempdir, TemporaryFile
from traceback import format_exc
from zlib import decompress
import canmatrix
from lz4.frame import compress as lz_compress
from lz4.frame import decompress as lz_decompress
from numpy import (
arange,
argwhere,
array,
array_equal,
column_stack,
concatenate,
cumsum,
dtype,
empty,
fliplr,
float32,
float64,
frombuffer,
full,
linspace,
nonzero,
packbits,
searchsorted,
transpose,
uint8,
uint16,
uint32,
uint64,
unique,
where,
zeros,
)
from numpy.core.defchararray import decode, encode
from numpy.core.records import fromarrays, fromstring
from pandas import DataFrame
from . import v4_constants as v4c
from ..signal import Signal
from ..version import __version__
from .bus_logging_utils import extract_mux
from .conversion_utils import conversion_transfer
from .mdf_common import MDF_Common
from .source_utils import Source
from .utils import (
all_blocks_addresses,
as_non_byte_sized_signed_int,
CHANNEL_COUNT,
ChannelsDB,
CONVERT,
count_channel_groups,
DataBlockInfo,
debug_channel,
extract_cncomment_xml,
extract_display_name,
fmt_to_datatype_v4,
get_fmt_v4,
get_text_v4,
Group,
InvalidationBlockInfo,
is_file_like,
load_can_database,
MdfException,
sanitize_xml,
SignalDataBlockInfo,
UINT8_uf,
UINT16_uf,
UINT32_p,
UINT32_uf,
UINT64_uf,
UniqueDB,
validate_version_argument,
VirtualChannelGroup,
)
from .v4_blocks import (
AttachmentBlock,
Channel,
ChannelArrayBlock,
ChannelConversion,
ChannelGroup,
DataBlock,
DataGroup,
DataList,
DataZippedBlock,
EventBlock,
FileHistory,
FileIdentificationBlock,
HeaderBlock,
HeaderList,
ListData,
SourceInformation,
TextBlock,
)
MASTER_CHANNELS = (v4c.CHANNEL_TYPE_MASTER, v4c.CHANNEL_TYPE_VIRTUAL_MASTER)
COMMON_SIZE = v4c.COMMON_SIZE
COMMON_u = v4c.COMMON_u
COMMON_uf = v4c.COMMON_uf
COMMON_SHORT_SIZE = v4c.COMMON_SHORT_SIZE
COMMON_SHORT_uf = v4c.COMMON_SHORT_uf
COMMON_SHORT_u = v4c.COMMON_SHORT_u
logger = logging.getLogger("asammdf")
__all__ = ["MDF4"]
try:
from .cutils import extract, get_vlsd_offsets, lengths, sort_data_block
# for now avoid usign the cextension code
# 2/0
except:
def extract(signal_data, is_byte_array, offsets=()):
# offsets_ = set(offsets)
size = len(signal_data)
positions = []
values = []
pos = 0
while pos < size:
positions.append(pos)
# if offsets_ and pos not in offsets_:
# raise Exception(f"VLSD offsets do not match the signal data:\n{positions}\n{offsets[:len(positions)]}")
(str_size,) = UINT32_uf(signal_data, pos)
pos = pos + 4 + str_size
values.append(signal_data[pos - str_size : pos])
if is_byte_array:
values = array(values)
values = values.view(dtype=f"({values.itemsize},)u1")
else:
values = array(values)
return values
def sort_data_block(
signal_data, partial_records, cg_size, record_id_nr, _unpack_stuct
):
"""Reads an unsorted DTBLOCK and writes the results to `partial_records`.
Args:
signal_data (bytes): DTBLOCK contents
partial_records (dict): dictionary with `cg_record_id` as key and list of bytes
as value.
cg_size (dict): Dictionary with `cg_record_id` as key and
number of record databytes (i.e. `cg_data_bytes`)
record_id_nr (int): Number of Bytes used for record IDs
in the data block (`dg_rec_id_size`).
_unpack_stuct (callable): Struct("...").unpack_from callable
Returns:
bytes: rest of data which couldn't be parsed, can be used in consecutive
reading attempt
"""
i = 0
size = len(signal_data)
pos = 0
rem = b""
while i + record_id_nr < size:
(rec_id,) = _unpack_stuct(signal_data, i)
# skip record id
i += record_id_nr
rec_size = cg_size[rec_id]
if rec_size:
if rec_size + i > size:
rem = signal_data[pos:]
break
endpoint = i + rec_size
partial_records[rec_id].append(signal_data[i:endpoint])
i = endpoint
else:
if i + 4 > size:
rem = signal_data[pos:]
break
(rec_size,) = UINT32_uf(signal_data, i)
endpoint = i + rec_size + 4
if endpoint > size:
rem = signal_data[pos:]
break
partial_records[rec_id].append(signal_data[i:endpoint])
i = endpoint
pos = i
else:
rem = signal_data[pos:]
return rem
def lengths(iterable):
return [len(item) for item in iterable]
def get_vlsd_offsets(data):
offsets = [0] + [len(item) for item in data]
offsets = cumsum(offsets)
return offsets[:-1], offsets[-1]
class MDF4(MDF_Common):
"""The *header* attibute is a *HeaderBlock*.
The *groups* attribute is a list of dicts, each one with the following keys:
* ``data_group`` - DataGroup object
* ``channel_group`` - ChannelGroup object
* ``channels`` - list of Channel objects with the same order as found in the mdf file
* ``channel_dependencies`` - list of *ChannelArrayBlock* in case of channel arrays;
list of Channel objects in case of structure channel composition
* ``data_block`` - address of data block
* ``data_location``- integer code for data location (original file, temporary file or
memory)
* ``data_block_addr`` - list of raw samples starting addresses
* ``data_block_type`` - list of codes for data block type
* ``data_block_size`` - list of raw samples block size
* ``sorted`` - sorted indicator flag
* ``record_size`` - dict that maps record ID's to record sizes in bytes (including invalidation bytes)
* ``param`` - row size used for tranposizition, in case of tranposed zipped blockss
Parameters
----------
name : string
mdf file name (if provided it must be a real file name) or
file-like object
version : string
mdf file version ('4.00', '4.10', '4.11', '4.20'); default '4.10'
kwargs :
callback : function
keyword only argument: function to call to update the progress; the
function must accept two arguments (the current progress and maximum
progress value)
use_display_names : bool
keyword only argument: for MDF4 files parse the XML channel comment to
search for the display name; XML parsing is quite expensive so setting
this to *False* can decrease the loading times very much; default
*False*
remove_source_from_channel_names (True) : bool
copy_on_get (True) : bool
copy channel values (np.array) to avoid high memory usage
compact_vlsd (False) : bool
use slower method to save the exact sample size for VLSD channels
column_storage (True) : bool
use column storage for MDF version >= 4.20
encryption_key : bytes
use this key to decode encrypted attachments
Attributes
----------
attachments : list
list of file attachments
channels_db : dict
used for fast channel access by name; for each name key the value is a
list of (group index, channel index) tuples
events : list
list event blocks
file_comment : TextBlock
file comment TextBlock
file_history : list
list of (FileHistory, TextBlock) pairs
groups : list
list of data group dicts
header : HeaderBlock
mdf file header
identification : FileIdentificationBlock
mdf file start block
last_call_info : dict | None
a dict to hold information about the last called method.
.. versionadded:: 5.12.0
masters_db : dict
used for fast master channel access; for each group index key the value
is the master channel index
name : string
mdf file name
version : str
mdf version
"""
_terminate = False
def __init__(self, name=None, version="4.10", channels=(), **kwargs):
self._kwargs = kwargs
self.groups = []
self.header = None
self.identification = None
self.file_history = []
self.channels_db = ChannelsDB()
self.masters_db = {}
self.attachments = []
self._attachments_cache = {}
self.file_comment = None
self.events = []
self.bus_logging_map = {"CAN": {}, "ETHERNET": {}, "FLEXRAY": {}, "LIN": {}}
self._attachments_map = {}
self._ch_map = {}
self._master_channel_metadata = {}
self._invalidation_cache = {}
self._external_dbc_cache = {}
self._si_map = {}
self._file_si_map = {}
self._cc_map = {}
self._file_cc_map = {}
self._cg_map = {}
self._cn_data_map = {}
self._dbc_cache = {}
self._interned_strings = {}
self.load_filter = set(channels)
self._tempfile = TemporaryFile()
self._file = None
self._raise_on_multiple_occurrences = True
self._read_fragment_size = 0 * 2 ** 20
self._write_fragment_size = 4 * 2 ** 20
self._use_display_names = kwargs.get("use_display_names", False)
self._remove_source_from_channel_names = kwargs.get(
"remove_source_from_channel_names", False
)
self._encryption_function = kwargs.get("encryption_function", None)
self._decryption_function = kwargs.get("decryption_function", None)
self.copy_on_get = kwargs.get("copy_on_get", True)
self.compact_vlsd = kwargs.get("compact_vlsd", False)
self.raise_on_multiple_occurrences = True
self._single_bit_uint_as_bool = False
self._integer_interpolation = 0
self._float_interpolation = 1
self.virtual_groups = {} # master group 2 referencing groups
self.virtual_groups_map = {} # group index 2 master group
self._master = None
self.last_call_info = None
# make sure no appended block has the address 0
self._tempfile.write(b"\0")
self._callback = kwargs.get("callback", None)
if name:
if is_file_like(name):
self._file = name
self.name = Path("From_FileLike.mf4")
self._from_filelike = True
self._read(mapped=False)
else:
with open(name, "rb") as stream:
identification = FileIdentificationBlock(stream=stream)
version = identification["version_str"]
version = version.decode("utf-8").strip(" \n\t\0")
flags = identification["unfinalized_standard_flags"]
if version >= "4.10" and flags:
tmpdir = Path(gettempdir())
self.name = tmpdir / Path(name).name
shutil.copy(name, self.name)
self._file = open(self.name, "rb+")
self._from_filelike = False
self._read(mapped=False)
else:
if sys.maxsize < 2 ** 32:
self.name = Path(name)
self._file = open(self.name, "rb")
self._from_filelike = False
self._read(mapped=False)
else:
self.name = Path(name)
x = open(self.name, "rb")
self._file = mmap.mmap(x.fileno(), 0, access=mmap.ACCESS_READ)
self._from_filelike = False
self._read(mapped=True)
self._file.close()
x.close()
self._file = open(self.name, "rb")
else:
self._from_filelike = False
version = validate_version_argument(version)
self.header = HeaderBlock()
self.identification = FileIdentificationBlock(version=version)
self.version = version
self.name = Path("__new__.mf4")
if self.version >= "4.20":
self._column_storage = kwargs.get("column_storage", True)
else:
self._column_storage = False
self._parent = None
def __del__(self):
self.close()
def _check_finalised(self) -> bool:
flags = self.identification["unfinalized_standard_flags"]
if flags & 1:
message = (
f"Unfinalised file {self.name}:"
" Update of cycle counters for CG/CA blocks required"
)
logger.info(message)
if flags & 1 << 1:
message = f"Unfinalised file {self.name}: Update of cycle counters for SR blocks required"
logger.info(message)
if flags & 1 << 2:
message = f"Unfinalised file {self.name}: Update of length for last DT block required"
logger.info(message)
if flags & 1 * 8:
message = f"Unfinalised file {self.name}: Update of length for last RD block required"
logger.info(message)
if flags & 1 << 4:
message = (
f"Unfinalised file {self.name}:"
" Update of last DL block in each chained list"
" of DL blocks required"
)
logger.info(message)
if flags & 1 << 5:
message = (
f"Unfinalised file {self.name}:"
" Update of cg_data_bytes and cg_inval_bytes"
" in VLSD CG block required"
)
logger.info(message)
if flags & 1 << 6:
message = (
f"Unfinalised file {self.name}:"
" Update of offset values for VLSD channel required"
" in case a VLSD CG block is used"
)
logger.info(message)
return flags
def _read(self, mapped=False):
stream = self._file
self._mapped = mapped
dg_cntr = 0
stream.seek(0, 2)
self.file_limit = stream.tell()
stream.seek(0)
cg_count, _ = count_channel_groups(stream)
if self._callback:
self._callback(0, cg_count)
current_cg_index = 0
self.identification = FileIdentificationBlock(stream=stream, mapped=mapped)
version = self.identification["version_str"]
self.version = version.decode("utf-8").strip(" \n\t\0")
if self.version >= "4.10":
# Check for finalization past version 4.10
finalisation_flags = self._check_finalised()
if finalisation_flags:
message = f"Attempting finalization of {self.name}"
logger.info(message)
self._finalize()
self._mapped = mapped = False
stream = self._file
self.header = HeaderBlock(address=0x40, stream=stream, mapped=mapped)
# read file history
fh_addr = self.header["file_history_addr"]
while fh_addr:
if fh_addr > self.file_limit:
logger.warning(
f"File history address {fh_addr:X} is outside the file size {self.file_limit}"
)
break
history_block = FileHistory(address=fh_addr, stream=stream, mapped=mapped)
self.file_history.append(history_block)
fh_addr = history_block.next_fh_addr
# read attachments
at_addr = self.header["first_attachment_addr"]
index = 0
while at_addr:
if at_addr > self.file_limit:
logger.warning(
f"Attachment address {at_addr:X} is outside the file size {self.file_limit}"
)
break
at_block = AttachmentBlock(address=at_addr, stream=stream, mapped=mapped)
self._attachments_map[at_addr] = index
self.attachments.append(at_block)
at_addr = at_block.next_at_addr
index += 1
# go to first date group and read each data group sequentially
dg_addr = self.header.first_dg_addr
while dg_addr:
if dg_addr > self.file_limit:
logger.warning(
f"Data group address {dg_addr:X} is outside the file size {self.file_limit}"
)
break
new_groups = []
group = DataGroup(address=dg_addr, stream=stream, mapped=mapped)
record_id_nr = group.record_id_len
# go to first channel group of the current data group
cg_addr = first_cg_addr = group.first_cg_addr
cg_nr = 0
cg_size = {}
while cg_addr:
if cg_addr > self.file_limit:
logger.warning(
f"Channel group address {cg_addr:X} is outside the file size {self.file_limit}"
)
break
cg_nr += 1
if cg_addr == first_cg_addr:
grp = Group(group)
else:
grp = Group(group.copy())
# read each channel group sequentially
block = ChannelGroup(
address=cg_addr,
stream=stream,
mapped=mapped,
si_map=self._si_map,
version=self.version,
tx_map=self._interned_strings,
)
self._cg_map[cg_addr] = dg_cntr
channel_group = grp.channel_group = block
grp.record_size = cg_size
if channel_group.flags & v4c.FLAG_CG_VLSD:
# VLDS flag
record_id = channel_group.record_id
cg_size[record_id] = 0
elif channel_group.flags & v4c.FLAG_CG_BUS_EVENT:
samples_size = channel_group.samples_byte_nr
inval_size = channel_group.invalidation_bytes_nr
record_id = channel_group.record_id
cg_size[record_id] = samples_size + inval_size
else:
# in case no `cg_flags` are set
samples_size = channel_group.samples_byte_nr
inval_size = channel_group.invalidation_bytes_nr
record_id = channel_group.record_id
cg_size[record_id] = samples_size + inval_size
if record_id_nr:
grp.sorted = False
else:
grp.sorted = True
# go to first channel of the current channel group
ch_addr = channel_group.first_ch_addr
ch_cntr = 0
# Read channels by walking recursively in the channel group
# starting from the first channel
self._read_channels(
ch_addr, grp, stream, dg_cntr, ch_cntr, mapped=mapped
)
cg_addr = channel_group.next_cg_addr
dg_cntr += 1
current_cg_index += 1
if self._callback:
self._callback(current_cg_index, cg_count)
if self._terminate:
self.close()
return
new_groups.append(grp)
# store channel groups record sizes dict in each
# new group data belong to the initial unsorted group, and add
# the key 'sorted' with the value False to use a flag;
address = group.data_block_addr
total_size = 0
inval_total_size = 0
block_type = b"##DT"
for new_group in new_groups:
channel_group = new_group.channel_group
if channel_group.flags & v4c.FLAG_CG_REMOTE_MASTER:
block_type = b"##DV"
total_size += (
channel_group.samples_byte_nr * channel_group.cycles_nr
)
inval_total_size += (
channel_group.invalidation_bytes_nr * channel_group.cycles_nr
)
else:
block_type = b"##DT"
total_size += (
channel_group.samples_byte_nr
+ channel_group.invalidation_bytes_nr
) * channel_group.cycles_nr
if (
self.identification["unfinalized_standard_flags"]
& v4c.FLAG_UNFIN_UPDATE_CG_COUNTER
):
total_size = int(10 ** 12)
inval_total_size = int(10 ** 12)
info, uses_ld = self._get_data_blocks_info(
address=address,
stream=stream,
block_type=block_type,
mapped=mapped,
total_size=total_size,
inval_total_size=inval_total_size,
)
for grp in new_groups:
grp.data_location = v4c.LOCATION_ORIGINAL_FILE
grp.set_blocks_info(info)
grp.uses_ld = uses_ld
self.groups.extend(new_groups)
dg_addr = group.next_dg_addr
# all channels have been loaded so now we can link the
# channel dependencies and load the signal data for VLSD channels
for gp_index, grp in enumerate(self.groups):
if (
self.version >= "4.20"
and grp.channel_group.flags & v4c.FLAG_CG_REMOTE_MASTER
):
grp.channel_group.cg_master_index = self._cg_map[
grp.channel_group.cg_master_addr
]
index = grp.channel_group.cg_master_index
else:
index = gp_index
self.virtual_groups_map[gp_index] = index
if index not in self.virtual_groups:
self.virtual_groups[index] = VirtualChannelGroup()
virtual_channel_group = self.virtual_groups[index]
virtual_channel_group.groups.append(gp_index)
virtual_channel_group.record_size += (
grp.channel_group.samples_byte_nr
+ grp.channel_group.invalidation_bytes_nr
)
virtual_channel_group.cycles_nr = grp.channel_group.cycles_nr
for ch_index, dep_list in enumerate(grp.channel_dependencies):
if not dep_list:
continue
for dep in dep_list:
if isinstance(dep, ChannelArrayBlock):
if dep.flags & v4c.FLAG_CA_DYNAMIC_AXIS:
for i in range(dep.dims):
ch_addr = dep[f"dynamic_size_{i}_ch_addr"]
if ch_addr:
ref_channel = self._ch_map[ch_addr]
dep.dynamic_size_channels.append(ref_channel)
else:
dep.dynamic_size_channels.append(None)
if dep.flags & v4c.FLAG_CA_INPUT_QUANTITY:
for i in range(dep.dims):
ch_addr = dep[f"input_quantity_{i}_ch_addr"]
if ch_addr:
ref_channel = self._ch_map[ch_addr]
dep.input_quantity_channels.append(ref_channel)
else:
dep.input_quantity_channels.append(None)
if dep.flags & v4c.FLAG_CA_OUTPUT_QUANTITY:
ch_addr = dep["output_quantity_ch_addr"]
if ch_addr:
ref_channel = self._ch_map[ch_addr]
dep.output_quantity_channel = ref_channel
else:
dep.output_quantity_channel = None
if dep.flags & v4c.FLAG_CA_COMPARISON_QUANTITY:
ch_addr = dep["comparison_quantity_ch_addr"]
if ch_addr:
ref_channel = self._ch_map[ch_addr]
dep.comparison_quantity_channel = ref_channel
else:
dep.comparison_quantity_channel = None
if dep.flags & v4c.FLAG_CA_AXIS:
for i in range(dep.dims):
cc_addr = dep[f"axis_conversion_{i}"]
if cc_addr:
conv = ChannelConversion(
stream=stream,
address=cc_addr,
mapped=mapped,
tx_map={},
)
dep.axis_conversions.append(conv)
else:
dep.axis_conversions.append(None)
if (dep.flags & v4c.FLAG_CA_AXIS) and not (
dep.flags & v4c.FLAG_CA_FIXED_AXIS
):
for i in range(dep.dims):
ch_addr = dep[f"scale_axis_{i}_ch_addr"]
if ch_addr:
ref_channel = self._ch_map[ch_addr]
dep.axis_channels.append(ref_channel)
else:
dep.axis_channels.append(None)
else:
break
self._sort()
for grp in self.groups:
channels = grp.channels
if (
len(channels) == 1
and channels[0].dtype_fmt.itemsize == grp.channel_group.samples_byte_nr
):
grp.single_channel_dtype = channels[0].dtype_fmt
self._process_bus_logging()
# read events
addr = self.header.first_event_addr
ev_map = {}
event_index = 0
while addr:
if addr > self.file_limit:
logger.warning(
f"Event address {addr:X} is outside the file size {self.file_limit}"
)
break
event = EventBlock(address=addr, stream=stream, mapped=mapped)
event.update_references(self._ch_map, self._cg_map)
self.events.append(event)
ev_map[addr] = event_index
event_index += 1
addr = event.next_ev_addr
for event in self.events:
addr = event.parent_ev_addr
if addr:
parent = ev_map.get(addr, None)
if parent is not None:
event.parent = parent
else:
event.parent = None
addr = event.range_start_ev_addr
if addr:
range_start_ev_addr = ev_map.get(addr, None)
if range_start_ev_addr is not None:
event.parent = range_start_ev_addr
else:
event.parent = None
self._si_map.clear()
self._ch_map.clear()
self._cc_map.clear()
self._interned_strings.clear()
self._attachments_map.clear()
self.progress = cg_count, cg_count
def _read_channels(
self,
ch_addr,
grp,
stream,
dg_cntr,
ch_cntr,
channel_composition=False,
mapped=False,
):
filter_channels = len(self.load_filter) > 0
use_display_names = self._use_display_names
channels = grp.channels
dependencies = grp.channel_dependencies
unique_names = UniqueDB()
if channel_composition:
composition = []
composition_channels = []
if grp.channel_group.path_separator:
path_separator = chr(grp.channel_group.path_separator)
else:
path_separator = "\\"
while ch_addr:
# read channel block and create channel object
if ch_addr > self.file_limit:
logger.warning(
f"Channel address {ch_addr:X} is outside the file size {self.file_limit}"
)
break
if filter_channels:
if mapped:
(
id_,
links_nr,
next_ch_addr,
name_addr,
comment_addr,
) = v4c.CHANNEL_FILTER_uf(stream, ch_addr)
channel_type = stream[ch_addr + v4c.COMMON_SIZE + links_nr * 8]
name = get_text_v4(name_addr, stream, mapped=mapped)
if use_display_names:
comment = get_text_v4(comment_addr, stream, mapped=mapped)
display_name = extract_display_name(comment)
else:
display_name = ""
comment = None
else:
stream.seek(ch_addr)
(
id_,
links_nr,
next_ch_addr,
name_addr,
comment_addr,
) = v4c.CHANNEL_FILTER_u(stream.read(v4c.CHANNEL_FILTER_SIZE))
stream.seek(v4c.COMMON_SIZE + links_nr * 8)
channel_type = stream.read(1)[0]
name = get_text_v4(name_addr, stream, mapped=mapped)
if use_display_names:
comment = get_text_v4(comment_addr, stream, mapped=mapped)
display_name = extract_display_name(comment)
else:
display_name = ""
comment = None
if id_ != b"##CN":
message = f'Expected "##CN" block @{hex(ch_addr)} but found "{id_}"'
raise MdfException(message)
if (
channel_composition
or channel_type in v4c.MASTER_TYPES
or name in self.load_filter
or (use_display_names and display_name in self.load_filter)
):
if comment is None:
comment = get_text_v4(comment_addr, stream, mapped=mapped)
channel = Channel(
address=ch_addr,
stream=stream,
cc_map=self._cc_map,
si_map=self._si_map,
at_map=self._attachments_map,
use_display_names=use_display_names,
mapped=mapped,
tx_map=self._interned_strings,
file_limit=self.file_limit,
parsed_strings=(name, display_name, comment),
)
else:
ch_addr = next_ch_addr
continue
else:
channel = Channel(
address=ch_addr,
stream=stream,
cc_map=self._cc_map,
si_map=self._si_map,
at_map=self._attachments_map,
use_display_names=use_display_names,
mapped=mapped,
tx_map=self._interned_strings,
file_limit=self.file_limit,
parsed_strings=None,
)
if channel.channel_type == v4c.CHANNEL_TYPE_SYNC:
channel.attachment = self._attachments_map.get(
channel.data_block_addr,
None,
)
if self._remove_source_from_channel_names:
channel.name = channel.name.split(path_separator, 1)[0]
channel.display_name = channel.display_name.split(path_separator, 1)[0]
entry = (dg_cntr, ch_cntr)
self._ch_map[ch_addr] = entry
channels.append(channel)
if channel_composition:
composition.append(entry)
composition_channels.append(channel)
if channel.display_name:
self.channels_db.add(channel.display_name, entry)
self.channels_db.add(channel.name, entry)
# signal data
cn_data_addr = channel.data_block_addr
grp.signal_data.append(cn_data_addr)
if cn_data_addr:
self._cn_data_map[cn_data_addr] = entry
if channel.channel_type in MASTER_CHANNELS:
self.masters_db[dg_cntr] = ch_cntr
ch_cntr += 1
component_addr = channel.component_addr
if component_addr:
if component_addr > self.file_limit:
logger.warning(
f"Channel component address {component_addr:X} is outside the file size {self.file_limit}"
)
break
# check if it is a CABLOCK or CNBLOCK
stream.seek(component_addr)
blk_id = stream.read(4)
if blk_id == b"##CN":
index = ch_cntr - 1
dependencies.append(None)
(
ch_cntr,
ret_composition,
ret_composition_dtype,
) = self._read_channels(
component_addr,
grp,
stream,
dg_cntr,
ch_cntr,
True,
mapped=mapped,
)
dependencies[index] = ret_composition
channel.dtype_fmt = ret_composition_dtype
else:
# only channel arrays with storage=CN_TEMPLATE are
# supported so far
ca_block = ChannelArrayBlock(
address=component_addr, stream=stream, mapped=mapped
)
if ca_block.storage != v4c.CA_STORAGE_TYPE_CN_TEMPLATE:
logger.warning("Only CN template arrays are supported")
ca_list = [ca_block]
while ca_block.composition_addr:
ca_block = ChannelArrayBlock(
address=ca_block.composition_addr,
stream=stream,
mapped=mapped,
)
ca_list.append(ca_block)
dependencies.append(ca_list)
channel.dtype_fmt = dtype(
get_fmt_v4(
channel.data_type,
channel.bit_offset + channel.bit_count,
channel.channel_type,
)
)
else:
dependencies.append(None)
channel.dtype_fmt = dtype(
get_fmt_v4(
channel.data_type,
channel.bit_offset + channel.bit_count,
channel.channel_type,
)
)
# go to next channel of the current channel group
ch_addr = channel.next_ch_addr
if channel_composition:
composition_channels.sort()
composition_dtype = dtype(
[
(unique_names.get_unique_name(channel.name), channel.dtype_fmt)
for channel in composition_channels
]
)
else:
composition = None
composition_dtype = None
return ch_cntr, composition, composition_dtype
def _load_signal_data(
self, address=None, stream=None, group=None, index=None, offset=0, count=None
):
"""this method is used to get the channel signal data, usually for
VLSD channels
Parameters
----------
address : int
address of refrerenced block
stream : handle
file IO stream handle
Returns
-------
data : bytes
signal data bytes
"""
with_bounds = False
if address == 0:
data = b""
elif address is not None and stream is not None:
stream.seek(address)
blk_id = stream.read(4)
if blk_id == b"##SD":
data = DataBlock(address=address, stream=stream)
data = data.data
elif blk_id == b"##DZ":
data = DataZippedBlock(address=address, stream=stream)
data = data.data
elif blk_id == b"##CG":
group = self.groups[self._cg_map[address]]
data = b"".join(fragment[0] for fragment in self._load_data(group))
elif blk_id == b"##DL":
data = []
while address:
# the data list will contain only links to SDBLOCK's
data_list = DataList(address=address, stream=stream)
nr = data_list.links_nr
# aggregate data from all SDBLOCK
for i in range(nr - 1):
addr = data_list[f"data_block_addr{i}"]
stream.seek(addr)
blk_id = stream.read(4)
if blk_id == b"##SD":
block = DataBlock(address=addr, stream=stream)
data.append(block.data)
elif blk_id == b"##DZ":
block = DataZippedBlock(address=addr, stream=stream)
data.append(block.data)
else:
message = f'Expected SD, DZ or DL block at {hex(address)} but found id="{blk_id}"'
logger.warning(message)
return b"", with_bounds
address = data_list.next_dl_addr
data = b"".join(data)
elif blk_id == b"##CN":
data = b""
elif blk_id == b"##HL":
hl = HeaderList(address=address, stream=stream)
data, with_bounds = self._load_signal_data(
address=hl.first_dl_addr, stream=stream, group=group, index=index
)
elif blk_id == b"##AT":
data = b""
else:
message = f'Expected AT, CG, SD, DL, DZ or CN block at {hex(address)} but found id="{blk_id}"'
logger.warning(message)
data = b""
elif group is not None and index is not None:
if group.data_location == v4c.LOCATION_ORIGINAL_FILE:
data, with_bounds = self._load_signal_data(
address=group.signal_data[index], stream=self._file
)
elif group.data_location == v4c.LOCATION_MEMORY:
data = group.signal_data[index]
else:
data = []
stream = self._tempfile
address = group.signal_data[index]
if address:
if isinstance(address, int):
if address in self._cg_map:
group = self.groups[self._cg_map[address]]
data.append(b"".join(e[0] for e in self._load_data(group)))
else:
if isinstance(address[0], SignalDataBlockInfo):
if address[0].offsets is not None:
with_bounds = True
current_offset = 0
if count is not None:
end = offset + count
else:
end = None
for info in address:
current_count = info.count
if current_offset + current_count < offset:
current_offset += current_count
continue
if current_offset < offset:
start_addr = (
info.address
+ info.offsets[offset - current_offset]
)
else:
start_addr = info.address
if end is not None:
if end <= current_offset:
break
elif end >= current_offset + current_count:
end_addr = info.address + info.size
else:
end_addr = (
info.address
+ info.offsets[end - current_offset]
)
else:
end_addr = info.address + info.size
size = int(end_addr - start_addr)
start_addr = int(start_addr)
stream.seek(start_addr)
data.append(stream.read(size))
current_offset += current_count
else:
for info in address:
if not info.size:
continue
stream.seek(info.address)
data.append(stream.read(info.size))
elif address[0] in self._cg_map:
group = self.groups[self._cg_map[address[0]]]
data.append(b"".join(e[0] for e in self._load_data(group)))
data = b"".join(data)
else:
data = b""
return data, with_bounds
def _load_data(
self, group, record_offset=0, record_count=None, optimize_read=False
):
""" get group's data block bytes """
offset = 0
invalidation_offset = 0
has_yielded = False
_count = 0
data_group = group.data_group
channel_group = group.channel_group
if group.data_location == v4c.LOCATION_ORIGINAL_FILE:
stream = self._file
else:
stream = self._tempfile
read = stream.read
seek = stream.seek
if group.uses_ld:
samples_size = channel_group.samples_byte_nr
invalidation_size = channel_group.invalidation_bytes_nr
invalidation_record_offset = record_offset * invalidation_size
rm = True
else:
rm = False
samples_size = (
channel_group.samples_byte_nr + channel_group.invalidation_bytes_nr
)
invalidation_size = channel_group.invalidation_bytes_nr
record_offset *= samples_size
finished = False
if record_count is not None:
invalidation_record_count = record_count * invalidation_size
record_count *= samples_size
if not samples_size:
if rm:
yield b"", offset, _count, b""
else:
yield b"", offset, _count, None
else:
if group.read_split_count:
split_size = group.read_split_count * samples_size
invalidation_split_size = group.read_split_count * invalidation_size
else:
if self._read_fragment_size:
split_size = self._read_fragment_size // samples_size
invalidation_split_size = split_size * invalidation_size
split_size *= samples_size
else:
channels_nr = len(group.channels)
y_axis = CONVERT
idx = searchsorted(CHANNEL_COUNT, channels_nr, side="right") - 1
if idx < 0:
idx = 0
split_size = y_axis[idx]
split_size = split_size // samples_size
invalidation_split_size = split_size * invalidation_size
split_size *= samples_size
if split_size == 0:
split_size = samples_size
invalidation_split_size = invalidation_size
split_size = int(split_size)
invalidation_split_size = int(invalidation_split_size)
blocks = iter(group.data_blocks)
if group.data_blocks:
cur_size = 0
data = []
cur_invalidation_size = 0
invalidation_data = []
while True:
try:
info = next(blocks)
address, size, block_size, block_type, param, block_limit = (
info.address,
info.raw_size,
info.size,
info.block_type,
info.param,
info.block_limit,
)
if rm and invalidation_size:
invalidation_info = info.invalidation_block
else:
invalidation_info = None
except StopIteration:
break
if offset + size < record_offset + 1:
offset += size
if rm and invalidation_size:
if invalidation_info.all_valid:
count = size // samples_size
invalidation_offset += count * invalidation_size
else:
invalidation_offset += invalidation_info.raw_size
continue
seek(address)
new_data = read(block_size)
if block_type == v4c.DZ_BLOCK_DEFLATE:
new_data = decompress(new_data, 0, size)
elif block_type == v4c.DZ_BLOCK_TRANSPOSED:
new_data = decompress(new_data, 0, size)
cols = param
lines = size // cols
nd = frombuffer(new_data[: lines * cols], dtype=uint8)
nd = nd.reshape((cols, lines))
new_data = nd.T.tobytes() + new_data[lines * cols :]
elif block_type == v4c.DZ_BLOCK_LZ:
new_data = lz_decompress(new_data)
if block_limit is not None:
new_data = new_data[:block_limit]
if len(data) > split_size - cur_size:
new_data = memoryview(new_data)
if rm and invalidation_size:
if invalidation_info.all_valid:
count = size // samples_size
new_invalidation_data = bytes(count * invalidation_size)
else:
seek(invalidation_info.address)
new_invalidation_data = read(invalidation_info.size)
if invalidation_info.block_type == v4c.DZ_BLOCK_DEFLATE:
new_invalidation_data = decompress(
new_invalidation_data,
0,
invalidation_info.raw_size,
)
elif (
invalidation_info.block_type == v4c.DZ_BLOCK_TRANSPOSED
):
new_invalidation_data = decompress(
new_invalidation_data,
0,
invalidation_info.raw_size,
)
cols = invalidation_info.param
lines = invalidation_info.raw_size // cols
nd = frombuffer(
new_invalidation_data[: lines * cols], dtype=uint8
)
nd = nd.reshape((cols, lines))
new_invalidation_data = (
nd.T.tobytes()
+ new_invalidation_data[lines * cols :]
)
if invalidation_info.block_limit is not None:
new_invalidation_data = new_invalidation_data[
: invalidation_info.block_limit
]
inv_size = len(new_invalidation_data)
if offset < record_offset:
delta = record_offset - offset
new_data = new_data[delta:]
size -= delta
offset = record_offset
if rm and invalidation_size:
delta = invalidation_record_offset - invalidation_offset
new_invalidation_data = new_invalidation_data[delta:]
inv_size -= delta
invalidation_offset = invalidation_record_offset
while size >= split_size - cur_size:
if data:
data.append(new_data[: split_size - cur_size])
new_data = new_data[split_size - cur_size :]
data_ = b"".join(data)
if rm and invalidation_size:
invalidation_data.append(
new_invalidation_data[
: invalidation_split_size
- cur_invalidation_size
]
)
new_invalidation_data = new_invalidation_data[
invalidation_split_size - cur_invalidation_size :
]
invalidation_data_ = b"".join(invalidation_data)
if record_count is not None:
if rm and invalidation_size:
__data = data_[:record_count]
_count = len(__data) // samples_size
yield __data, offset // samples_size, _count, invalidation_data_[
:invalidation_record_count
]
invalidation_record_count -= len(invalidation_data_)
else:
__data = data_[:record_count]
_count = len(__data) // samples_size
yield __data, offset // samples_size, _count, None
has_yielded = True
record_count -= len(data_)
if record_count <= 0:
finished = True
break
else:
if rm and invalidation_size:
_count = len(data_) // samples_size
yield data_, offset // samples_size, _count, invalidation_data_
else:
_count = len(data_) // samples_size
yield data_, offset // samples_size, _count, None
has_yielded = True
data = []
else:
data_, new_data = (
new_data[:split_size],
new_data[split_size:],
)
if rm and invalidation_size:
invalidation_data_ = new_invalidation_data[
:invalidation_split_size
]
new_invalidation_data = new_invalidation_data[
invalidation_split_size:
]
if record_count is not None:
if rm and invalidation_size:
yield data_[
:record_count
], offset // samples_size, _count, invalidation_data_[
:invalidation_record_count
]
invalidation_record_count -= len(invalidation_data_)
else:
__data = data_[:record_count]
_count = len(__data) // samples_size
yield __data, offset // samples_size, _count, None
has_yielded = True
record_count -= len(data_)
if record_count <= 0:
finished = True
break
else:
if rm and invalidation_size:
_count = len(data_) // samples_size
yield data_, offset // samples_size, _count, invalidation_data_
else:
_count = len(data_) // samples_size
yield data_, offset // samples_size, _count, None
has_yielded = True
offset += split_size
size -= split_size - cur_size
data = []
cur_size = 0
if rm and invalidation_size:
invalidation_offset += invalidation_split_size
invalidation_data = []
cur_invalidation_size = 0
inv_size -= invalidation_split_size - cur_invalidation_size
if finished:
data = []
if rm and invalidation_size:
invalidation_data = []
break
if size:
data.append(new_data)
cur_size += size
size = 0
if rm and invalidation_size:
invalidation_data.append(new_invalidation_data)
cur_invalidation_size += inv_size
if data:
data_ = b"".join(data)
if rm and invalidation_size:
invalidation_data_ = b"".join(invalidation_data)
if record_count is not None:
if rm and invalidation_size:
__data = data_[:record_count]
_count = len(__data) // samples_size
yield __data, offset // samples_size, _count, invalidation_data_[
:invalidation_record_count
]
invalidation_record_count -= len(invalidation_data_)
else:
__data = data_[:record_count]
_count = len(__data) // samples_size
yield __data, offset // samples_size, _count, None
has_yielded = True
record_count -= len(data_)
else:
if rm and invalidation_size:
_count = len(data_) // samples_size
yield data_, offset // samples_size, _count, invalidation_data_
else:
_count = len(data_) // samples_size
yield data_, offset // samples_size, _count, None
has_yielded = True
data = []
if not has_yielded:
if rm and invalidation_size:
yield b"", 0, 0, b""
else:
yield b"", 0, 0, None
else:
if rm and invalidation_size:
yield b"", offset, 0, b""
else:
yield b"", offset, 0, None
def _prepare_record(self, group):
"""compute record dtype and parents dict fro this group
Parameters
----------
group : dict
MDF group dict
Returns
-------
parents, dtypes : dict, numpy.dtype
mapping of channels to records fields, records fields dtype
"""
parents, dtypes = group.parents, group.types
if parents is None:
no_parent = None, None
channel_group = group.channel_group
channels = group.channels
record_size = channel_group.samples_byte_nr
invalidation_bytes_nr = channel_group.invalidation_bytes_nr
next_byte_aligned_position = 0
types = []
current_parent = ""
parent_start_offset = 0
parents = {}
group_channels = UniqueDB()
sortedchannels = sorted(enumerate(channels), key=lambda i: i[1])
for original_index, new_ch in sortedchannels:
start_offset = new_ch.byte_offset
bit_offset = new_ch.bit_offset
data_type = new_ch.data_type
bit_count = new_ch.bit_count
ch_type = new_ch.channel_type
dependency_list = group.channel_dependencies[original_index]
name = new_ch.name
# handle multiple occurrence of same channel name
name = group_channels.get_unique_name(name)
if start_offset >= next_byte_aligned_position:
if ch_type not in v4c.VIRTUAL_TYPES:
if not dependency_list:
parent_start_offset = start_offset
# check if there are byte gaps in the record
gap = parent_start_offset - next_byte_aligned_position
if gap:
types.append(("", f"V{gap}"))
# adjust size to 1, 2, 4 or 8 bytes
size = bit_offset + bit_count
byte_size, rem = size // 8, size % 8
if rem:
byte_size += 1
bit_size = byte_size * 8
if data_type in (
v4c.DATA_TYPE_SIGNED_MOTOROLA,
v4c.DATA_TYPE_UNSIGNED_MOTOROLA,
):
if size > 32:
size = 8
bit_offset += 64 - bit_size
elif size > 16:
size = 4
bit_offset += 32 - bit_size
elif size > 8:
size = 2
bit_offset += 16 - bit_size
else:
size = 1
elif data_type not in v4c.NON_SCALAR_TYPES:
if size > 32:
size = 8
elif size > 16:
size = 4
elif size > 8:
size = 2
else:
size = 1
else:
size = size // 8
next_byte_aligned_position = parent_start_offset + size
bit_count = size * 8
if next_byte_aligned_position <= record_size:
if not new_ch.dtype_fmt:
new_ch.dtype_fmt = get_fmt_v4(
data_type, bit_count, ch_type
)
dtype_pair = (name, new_ch.dtype_fmt)
types.append(dtype_pair)
parents[original_index] = name, bit_offset
else:
next_byte_aligned_position = parent_start_offset
current_parent = name
else:
if isinstance(dependency_list[0], ChannelArrayBlock):
ca_block = dependency_list[0]
# check if there are byte gaps in the record
gap = start_offset - next_byte_aligned_position
if gap:
dtype_pair = "", f"V{gap}"
types.append(dtype_pair)
size = bit_count // 8 or 1
shape = tuple(
ca_block[f"dim_size_{i}"]
for i in range(ca_block.dims)
)
if (
ca_block.byte_offset_base // size > 1
and len(shape) == 1
):
shape += (ca_block.byte_offset_base // size,)
dim = 1
for d in shape:
dim *= d
if not new_ch.dtype_fmt:
new_ch.dtype_fmt = get_fmt_v4(data_type, bit_count)
dtype_pair = (name, new_ch.dtype_fmt, shape)
types.append(dtype_pair)
current_parent = name
next_byte_aligned_position = start_offset + size * dim
parents[original_index] = name, 0
else:
parents[original_index] = no_parent
# virtual channels do not have bytes in the record
else:
parents[original_index] = no_parent
else:
size = bit_offset + bit_count
byte_size, rem = size // 8, size % 8
if rem:
byte_size += 1
max_overlapping_size = (
next_byte_aligned_position - start_offset
) * 8
needed_size = bit_offset + bit_count
if max_overlapping_size >= needed_size:
if data_type in (
v4c.DATA_TYPE_SIGNED_MOTOROLA,
v4c.DATA_TYPE_UNSIGNED_MOTOROLA,
):
parents[original_index] = (
current_parent,
(next_byte_aligned_position - start_offset - byte_size)
* 8
+ bit_offset,
)
else:
parents[original_index] = (
current_parent,
((start_offset - parent_start_offset) * 8) + bit_offset,
)
if next_byte_aligned_position > record_size:
break
gap = record_size - next_byte_aligned_position
if gap > 0:
dtype_pair = "", f"V{gap}"
types.append(dtype_pair)
if not group.uses_ld:
dtype_pair = "invalidation_bytes", "<u1", (invalidation_bytes_nr,)
types.append(dtype_pair)
dtypes = dtype(types)
group.parents, group.types = parents, dtypes
return parents, dtypes
def _get_data_blocks_info(
self,
address,
stream,
block_type=b"##DT",
mapped=False,
total_size=0,
inval_total_size=0,
):
info = []
mapped = mapped or not is_file_like(stream)
uses_ld = False
if mapped:
if address:
id_string, block_len = COMMON_SHORT_uf(stream, address)
# can be a DataBlock
if id_string == block_type:
size = block_len - 24
if size:
if total_size < size:
block_limit = total_size
else:
block_limit = None
total_size -= size
info.append(
DataBlockInfo(
address=address + COMMON_SIZE,
block_type=v4c.DT_BLOCK,
raw_size=size,
size=size,
param=0,
block_limit=block_limit,
)
)
# or a DataZippedBlock
elif id_string == b"##DZ":
(
original_type,
zip_type,
param,
original_size,
zip_size,
) = v4c.DZ_COMMON_INFO_uf(stream, address)
if original_size:
if zip_type == v4c.FLAG_DZ_DEFLATE:
block_type_ = v4c.DZ_BLOCK_DEFLATE
param = 0
else:
block_type_ = v4c.DZ_BLOCK_TRANSPOSED
if total_size < original_size:
block_limit = total_size
else:
block_limit = None
total_size -= original_size
info.append(
DataBlockInfo(
address=address + v4c.DZ_COMMON_SIZE,
block_type=block_type_,
raw_size=original_size,
size=zip_size,
param=param,
block_limit=block_limit,
)
)
# or a DataList
elif id_string == b"##DL":
while address:
dl = DataList(address=address, stream=stream, mapped=mapped)
for i in range(dl.data_block_nr):
addr = dl[f"data_block_addr{i}"]
id_string, block_len = COMMON_SHORT_uf(stream, addr)
# can be a DataBlock
if id_string == block_type:
size = block_len - 24
if size:
if total_size < size:
block_limit = total_size
else:
block_limit = None
total_size -= size
info.append(
DataBlockInfo(
address=addr + COMMON_SIZE,
block_type=v4c.DT_BLOCK,
raw_size=size,
size=size,
param=0,
block_limit=block_limit,
)
)
# or a DataZippedBlock
elif id_string == b"##DZ":
(
original_type,
zip_type,
param,
original_size,
zip_size,
) = v4c.DZ_COMMON_INFO_uf(stream, addr)
if original_size:
if zip_type == v4c.FLAG_DZ_DEFLATE:
block_type_ = v4c.DZ_BLOCK_DEFLATE
param = 0
else:
block_type_ = v4c.DZ_BLOCK_TRANSPOSED
if total_size < original_size:
block_limit = total_size
else:
block_limit = None
total_size -= original_size
info.append(
DataBlockInfo(
address=addr + v4c.DZ_COMMON_SIZE,
block_type=block_type_,
raw_size=original_size,
size=zip_size,
param=param,
block_limit=block_limit,
)
)
address = dl.next_dl_addr
# or a ListData
elif id_string == b"##LD":
uses_ld = True
while address:
ld = ListData(address=address, stream=stream, mapped=mapped)
has_invalidation = ld.flags & v4c.FLAG_LD_INVALIDATION_PRESENT
for i in range(ld.data_block_nr):
addr = ld[f"data_block_addr_{i}"]
id_string, block_len = COMMON_SHORT_uf(stream, addr)
# can be a DataBlock
if id_string == b"##DV":
size = block_len - 24
if size:
if total_size < size:
block_limit = total_size
else:
block_limit = None
total_size -= size
info.append(
DataBlockInfo(
address=addr + COMMON_SIZE,
block_type=v4c.DT_BLOCK,
raw_size=size,
size=size,
param=0,
block_limit=block_limit,
)
)
# or a DataZippedBlock
elif id_string == b"##DZ":
(
original_type,
zip_type,
param,
original_size,
zip_size,
) = v4c.DZ_COMMON_INFO_uf(stream, addr)
if original_size:
if zip_type == v4c.FLAG_DZ_DEFLATE:
block_type_ = v4c.DZ_BLOCK_DEFLATE
param = 0
else:
block_type_ = v4c.DZ_BLOCK_TRANSPOSED
if total_size < original_size:
block_limit = total_size
else:
block_limit = None
total_size -= original_size
info.append(
DataBlockInfo(
address=addr + v4c.DZ_COMMON_SIZE,
block_type=block_type_,
raw_size=original_size,
size=zip_size,
param=param,
block_limit=block_limit,
)
)
if has_invalidation:
inval_addr = ld[f"invalidation_bits_addr_{i}"]
if inval_addr:
id_string, block_len = COMMON_SHORT_uf(
stream, inval_addr
)
if id_string == b"##DI":
size = block_len - 24
if size:
if inval_total_size < size:
block_limit = inval_total_size
else:
block_limit = None
inval_total_size -= size
info[
-1
].invalidation_block = InvalidationBlockInfo(
address=inval_addr + COMMON_SIZE,
block_type=v4c.DT_BLOCK,
raw_size=size,
size=size,
param=0,
block_limit=block_limit,
)
else:
(
original_type,
zip_type,
param,
original_size,
zip_size,
) = v4c.DZ_COMMON_INFO_uf(stream, inval_addr)
if original_size:
if zip_type == v4c.FLAG_DZ_DEFLATE:
block_type_ = v4c.DZ_BLOCK_DEFLATE
param = 0
else:
block_type_ = v4c.DZ_BLOCK_TRANSPOSED
if inval_total_size < original_size:
block_limit = inval_total_size
else:
block_limit = None
inval_total_size -= original_size
info[
-1
].invalidation_block = InvalidationBlockInfo(
address=inval_addr + v4c.DZ_COMMON_SIZE,
block_type=block_type_,
raw_size=original_size,
size=zip_size,
param=param,
block_limit=block_limit,
)
else:
info[-1].invalidation_block = InvalidationBlockInfo(
address=0,
block_type=v4c.DT_BLOCK,
raw_size=None,
size=None,
param=None,
all_valid=True,
)
address = ld.next_ld_addr
# or a header list
elif id_string == b"##HL":
hl = HeaderList(address=address, stream=stream, mapped=mapped)
address = hl.first_dl_addr
info, uses_ld = self._get_data_blocks_info(
address,
stream,
block_type,
mapped,
total_size,
inval_total_size,
)
else:
if address:
stream.seek(address)
id_string, block_len = COMMON_SHORT_u(stream.read(COMMON_SHORT_SIZE))
# can be a DataBlock
if id_string == block_type:
size = block_len - 24
if size:
if total_size < size:
block_limit = total_size
else:
block_limit = None
total_size -= size
info.append(
DataBlockInfo(
address=address + COMMON_SIZE,
block_type=v4c.DT_BLOCK,
raw_size=size,
size=size,
param=0,
block_limit=block_limit,
)
)
# or a DataZippedBlock
elif id_string == b"##DZ":
stream.seek(address)
(
original_type,
zip_type,
param,
original_size,
zip_size,
) = v4c.DZ_COMMON_INFO_u(stream.read(v4c.DZ_COMMON_SIZE))
if original_size:
if zip_type == v4c.FLAG_DZ_DEFLATE:
block_type_ = v4c.DZ_BLOCK_DEFLATE
param = 0
else:
block_type_ = v4c.DZ_BLOCK_TRANSPOSED
if total_size < original_size:
block_limit = total_size
else:
block_limit = None
total_size -= original_size
info.append(
DataBlockInfo(
address=address + v4c.DZ_COMMON_SIZE,
block_type=block_type_,
raw_size=original_size,
size=zip_size,
param=param,
block_limit=block_limit,
)
)
# or a DataList
elif id_string == b"##DL":
while address:
dl = DataList(address=address, stream=stream)
for i in range(dl.data_block_nr):
addr = dl[f"data_block_addr{i}"]
stream.seek(addr)
id_string, block_len = COMMON_SHORT_u(
stream.read(COMMON_SHORT_SIZE)
)
# can be a DataBlock
if id_string == block_type:
size = block_len - 24
if size:
if total_size < size:
block_limit = total_size
else:
block_limit = None
total_size -= size
info.append(
DataBlockInfo(
address=addr + COMMON_SIZE,
block_type=v4c.DT_BLOCK,
raw_size=size,
size=size,
param=0,
block_limit=block_limit,
)
)
# or a DataZippedBlock
elif id_string == b"##DZ":
stream.seek(addr)
(
original_type,
zip_type,
param,
original_size,
zip_size,
) = v4c.DZ_COMMON_INFO_u(
stream.read(v4c.DZ_COMMON_SIZE)
)
if original_size:
if zip_type == v4c.FLAG_DZ_DEFLATE:
block_type_ = v4c.DZ_BLOCK_DEFLATE
param = 0
else:
block_type_ = v4c.DZ_BLOCK_TRANSPOSED
if total_size < original_size:
block_limit = total_size
else:
block_limit = None
total_size -= original_size
info.append(
DataBlockInfo(
address=addr + v4c.DZ_COMMON_SIZE,
block_type=block_type_,
raw_size=original_size,
size=zip_size,
param=param,
block_limit=block_limit,
)
)
address = dl.next_dl_addr
# or a DataList
elif id_string == b"##LD":
uses_ld = True
while address:
ld = ListData(address=address, stream=stream)
has_invalidation = ld.flags & v4c.FLAG_LD_INVALIDATION_PRESENT
for i in range(ld.data_block_nr):
addr = ld[f"data_block_addr{i}"]
stream.seek(addr)
id_string, block_len = COMMON_SHORT_u(
stream.read(COMMON_SHORT_SIZE)
)
# can be a DataBlock
if id_string == b"##DV":
size = block_len - 24
if size:
if total_size < size:
block_limit = total_size
else:
block_limit = None
total_size -= size
info.append(
DataBlockInfo(
address=addr + COMMON_SIZE,
block_type=v4c.DT_BLOCK,
raw_size=size,
size=size,
param=0,
block_limit=block_limit,
)
)
# or a DataZippedBlock
elif id_string == b"##DZ":
stream.seek(addr)
(
original_type,
zip_type,
param,
original_size,
zip_size,
) = v4c.DZ_COMMON_INFO_u(
stream.read(v4c.DZ_COMMON_SIZE)
)
if original_size:
if zip_type == v4c.FLAG_DZ_DEFLATE:
block_type_ = v4c.DZ_BLOCK_DEFLATE
param = 0
else:
block_type_ = v4c.DZ_BLOCK_TRANSPOSED
if total_size < original_size:
block_limit = total_size
else:
block_limit = None
total_size -= original_size
info.append(
DataBlockInfo(
address=addr + v4c.DZ_COMMON_SIZE,
block_type=block_type_,
raw_size=original_size,
size=zip_size,
param=param,
block_limit=block_limit,
)
)
if has_invalidation:
inval_addr = ld[f"invalidation_bits_addr_{i}"]
if inval_addr:
stream.seek(inval_addr)
id_string, block_len = COMMON_SHORT_u(
stream.read(COMMON_SHORT_SIZE)
)
if id_string == b"##DI":
size = block_len - 24
if size:
if inval_total_size < size:
block_limit = inval_total_size
else:
block_limit = None
inval_total_size -= size
info[
-1
].invalidation_block = InvalidationBlockInfo(
address=inval_addr + COMMON_SIZE,
block_type=v4c.DT_BLOCK,
raw_size=size,
size=size,
param=0,
block_limit=block_limit,
)
else:
(
original_type,
zip_type,
param,
original_size,
zip_size,
) = v4c.DZ_COMMON_INFO_u(
stream.read(v4c.DZ_COMMON_SIZE)
)
if original_size:
if zip_type == v4c.FLAG_DZ_DEFLATE:
block_type_ = v4c.DZ_BLOCK_DEFLATE
param = 0
else:
block_type_ = v4c.DZ_BLOCK_TRANSPOSED
if inval_total_size < original_size:
block_limit = inval_total_size
else:
block_limit = None
inval_total_size -= original_size
info[
-1
].invalidation_block = InvalidationBlockInfo(
address=inval_addr + v4c.DZ_COMMON_SIZE,
block_type=block_type_,
raw_size=original_size,
size=zip_size,
param=param,
block_limit=block_limit,
)
else:
info[-1].invalidation_block = InvalidationBlockInfo(
address=0,
block_type=v4c.DT_BLOCK,
raw_size=0,
size=0,
param=0,
all_valid=True,
)
address = ld.next_ld_addr
# or a header list
elif id_string == b"##HL":
hl = HeaderList(address=address, stream=stream)
address = hl.first_dl_addr
info, uses_ld = self._get_data_blocks_info(
address,
stream,
block_type,
mapped,
total_size,
inval_total_size,
)
return info, uses_ld
def get_invalidation_bits(self, group_index, channel, fragment):
"""get invalidation indexes for the channel
Parameters
----------
group_index : int
group index
channel : Channel
channel object
fragment : (bytes, int)
(fragment bytes, fragment offset)
Returns
-------
invalidation_bits : iterable
iterable of valid channel indexes; if all are valid `None` is
returned
"""
group = self.groups[group_index]
dtypes = group.types
data_bytes, offset, _count, invalidation_bytes = fragment
try:
invalidation = self._invalidation_cache[(group_index, offset, _count)]
except KeyError:
if invalidation_bytes is not None:
size = group.channel_group.invalidation_bytes_nr
invalidation = frombuffer(invalidation_bytes, dtype=f"({size},)u1")
else:
record = group.record
if record is None:
dtypes = group.types
if dtypes.itemsize:
record = fromstring(data_bytes, dtype=dtypes)
else:
record = None
invalidation = record["invalidation_bytes"].copy()
self._invalidation_cache[(group_index, offset, _count)] = invalidation
ch_invalidation_pos = channel.pos_invalidation_bit
pos_byte, pos_offset = ch_invalidation_pos // 8, ch_invalidation_pos % 8
mask = 1 << pos_offset
invalidation_bits = invalidation[:, pos_byte] & mask
invalidation_bits = invalidation_bits.astype(bool)
return invalidation_bits
def configure(
self,
*,
from_other=None,
read_fragment_size=None,
write_fragment_size=None,
use_display_names=None,
single_bit_uint_as_bool=None,
integer_interpolation=None,
copy_on_get=None,
float_interpolation=None,
raise_on_multiple_occurrences=None,
):
"""configure MDF parameters.
The default values for the options are the following:
* read_fragment_size = 0
* write_fragment_size = 4MB
* use_display_names = False
* single_bit_uint_as_bool = False
* integer_interpolation = 0 (ffill - use previous sample)
* float_interpolation = 1 (linear interpolation)
* copy_on_get = False
* raise_on_multiple_occurrences = True
Parameters
----------
read_fragment_size : int
size hint of split data blocks, default 8MB; if the initial size is
smaller, then no data list is used. The actual split size depends on
the data groups' records size
write_fragment_size : int
size hint of split data blocks, default 4MB; if the initial size is
smaller, then no data list is used. The actual split size depends on
the data groups' records size. Maximum size is 4MB to ensure
compatibility with CANape
use_display_names : bool
search for display name in the Channel XML comment
single_bit_uint_as_bool : bool
return single bit channels are np.bool arrays
integer_interpolation : int
interpolation mode for integer channels:
* 0 - repeat previous sample
* 1 - use linear interpolation
* 2 - hybrid interpolation: channels with integer data type (raw values) that have a
conversion that outputs float values will use linear interpolation, otherwise
the previous sample is used
.. versionchanged:: 6.2.0
added hybrid mode interpolation
copy_on_get : bool
copy arrays in the get method
float_interpolation : int
interpolation mode for float channels:
* 0 - repeat previous sample
* 1 - use linear interpolation
.. versionadded:: 6.2.0
raise_on_multiple_occurrences : bool
raise exception when there are multiple channel occurrences in the file and
the `get` call is ambiguos; default True
.. versionadded:: 6.2.0
from_other : MDF
copy configuration options from other MDF
.. versionadded:: 6.2.0
"""
if from_other is not None:
self._read_fragment_size = from_other._read_fragment_size
self._write_fragment_size = from_other._write_fragment_size
self._use_display_names = from_other._use_display_names
self._single_bit_uint_as_bool = from_other._single_bit_uint_as_bool
self._integer_interpolation = from_other._integer_interpolation
self.copy_on_get = from_other.copy_on_get
self._float_interpolation = from_other._float_interpolation
self.raise_on_multiple_occurrences = from_other.raise_on_multiple_occurrences
if read_fragment_size is not None:
self._read_fragment_size = int(read_fragment_size)
if write_fragment_size:
self._write_fragment_size = min(int(write_fragment_size), 4 * 2 ** 20)
if use_display_names is not None:
self._use_display_names = bool(use_display_names)
if single_bit_uint_as_bool is not None:
self._single_bit_uint_as_bool = bool(single_bit_uint_as_bool)
if integer_interpolation in (0, 1, 2):
self._integer_interpolation = int(integer_interpolation)
if copy_on_get is not None:
self.copy_on_get = copy_on_get
if float_interpolation in (0, 1):
self._float_interpolation = int(float_interpolation)
if raise_on_multiple_occurrences is not None:
self.raise_on_multiple_occurrences = bool(raise_on_multiple_occurrences)
def append(
self,
signals,
acq_name=None,
acq_source=None,
comment="Python",
common_timebase=False,
units=None,
):
"""
Appends a new data group.
For channel dependencies type Signals, the *samples* attribute must be
a numpy.recarray
Parameters
----------
signals : list | Signal | pandas.DataFrame
list of *Signal* objects, or a single *Signal* object, or a pandas
*DataFrame* object. All bytes columns in the pandas *DataFrame*
must be *utf-8* encoded
acq_name : str
channel group acquisition name
acq_source : Source
channel group acquisition source
comment : str
channel group comment; default 'Python'
common_timebase : bool
flag to hint that the signals have the same timebase. Only set this
if you know for sure that all appended channels share the same
time base
units : dict
will contain the signal units mapped to the signal names when
appending a pandas DataFrame
Examples
--------
>>> # case 1 conversion type None
>>> s1 = np.array([1, 2, 3, 4, 5])
>>> s2 = np.array([-1, -2, -3, -4, -5])
>>> s3 = np.array([0.1, 0.04, 0.09, 0.16, 0.25])
>>> t = np.array([0.001, 0.002, 0.003, 0.004, 0.005])
>>> names = ['Positive', 'Negative', 'Float']
>>> units = ['+', '-', '.f']
>>> info = {}
>>> s1 = Signal(samples=s1, timestamps=t, unit='+', name='Positive')
>>> s2 = Signal(samples=s2, timestamps=t, unit='-', name='Negative')
>>> s3 = Signal(samples=s3, timestamps=t, unit='flts', name='Floats')
>>> mdf = MDF4('new.mdf')
>>> mdf.append([s1, s2, s3], comment='created by asammdf v4.0.0')
>>> # case 2: VTAB conversions from channels inside another file
>>> mdf1 = MDF4('in.mf4')
>>> ch1 = mdf1.get("Channel1_VTAB")
>>> ch2 = mdf1.get("Channel2_VTABR")
>>> sigs = [ch1, ch2]
>>> mdf2 = MDF4('out.mf4')
>>> mdf2.append(sigs, comment='created by asammdf v4.0.0')
>>> mdf2.append(ch1, comment='just a single channel')
>>> df = pd.DataFrame.from_dict({'s1': np.array([1, 2, 3, 4, 5]), 's2': np.array([-1, -2, -3, -4, -5])})
>>> units = {'s1': 'V', 's2': 'A'}
>>> mdf2.append(df, units=units)
"""
source_block = (
SourceInformation.from_common_source(acq_source)
if acq_source
else acq_source
)
if isinstance(signals, Signal):
signals = [signals]
elif isinstance(signals, DataFrame):
self._append_dataframe(
signals,
acq_name=acq_name,
acq_source=source_block,
comment=comment,
units=units,
)
return
if not signals:
return
prepare_record = True
# check if the signals have a common timebase
# if not interpolate the signals using the union of all timbases
if signals:
t_ = signals[0].timestamps
if not common_timebase:
for s in signals[1:]:
if not array_equal(s.timestamps, t_):
different = True
break
else:
different = False
if different:
times = [s.timestamps for s in signals]
t = unique(concatenate(times)).astype(float64)
signals = [
s.interp(
t,
integer_interpolation_mode=self._integer_interpolation,
float_interpolation_mode=self._float_interpolation,
)
for s in signals
]
times = None
else:
t = t_
else:
t = t_
else:
t = []
if self.version >= "4.20" and (self._column_storage or 1):
return self._append_column_oriented(
signals, acq_name=acq_name, acq_source=source_block, comment=comment
)
dg_cntr = len(self.groups)
gp = Group(None)
gp.signal_data = gp_sdata = []
gp.signal_data_size = gp_sdata_size = []
gp.channels = gp_channels = []
gp.channel_dependencies = gp_dep = []
gp.signal_types = gp_sig_types = []
cycles_nr = len(t)
# channel group
kwargs = {"cycles_nr": cycles_nr, "samples_byte_nr": 0}
gp.channel_group = ChannelGroup(**kwargs)
gp.channel_group.acq_name = acq_name
gp.channel_group.acq_source = source_block
gp.channel_group.comment = comment
if any(sig.invalidation_bits is not None for sig in signals):
invalidation_bytes_nr = 1
gp.channel_group.invalidation_bytes_nr = invalidation_bytes_nr
inval_bits = []
else:
invalidation_bytes_nr = 0
inval_bits = []
inval_cntr = 0
self.groups.append(gp)
fields = []
types = []
parents = {}
ch_cntr = 0
offset = 0
field_names = UniqueDB()
defined_texts = {}
si_map = self._si_map
# setup all blocks related to the time master channel
file = self._tempfile
tell = file.tell
seek = file.seek
seek(0, 2)
if signals:
master_metadata = signals[0].master_metadata
else:
master_metadata = None
if master_metadata:
time_name, sync_type = master_metadata
if sync_type in (0, 1):
time_unit = "s"
elif sync_type == 2:
time_unit = "deg"
elif sync_type == 3:
time_unit = "m"
elif sync_type == 4:
time_unit = "index"
else:
time_name, sync_type = "time", v4c.SYNC_TYPE_TIME
time_unit = "s"
gp.channel_group.acq_source = source_block
if signals:
# time channel
t_type, t_size = fmt_to_datatype_v4(t.dtype, t.shape)
kwargs = {
"channel_type": v4c.CHANNEL_TYPE_MASTER,
"data_type": t_type,
"sync_type": sync_type,
"byte_offset": 0,
"bit_offset": 0,
"bit_count": t_size,
}
ch = Channel(**kwargs)
ch.unit = time_unit
ch.name = time_name
ch.source = source_block
ch.dtype_fmt = t.dtype
name = time_name
gp_channels.append(ch)
gp_sdata.append(None)
gp_sdata_size.append(0)
self.channels_db.add(name, (dg_cntr, ch_cntr))
self.masters_db[dg_cntr] = 0
# data group record parents
parents[ch_cntr] = name, 0
# time channel doesn't have channel dependencies
gp_dep.append(None)
fields.append(t)
types.append((name, t.dtype))
field_names.get_unique_name(name)
offset += t_size // 8
ch_cntr += 1
gp_sig_types.append(0)
for signal in signals:
sig = signal
samples = sig.samples
sig_dtype = samples.dtype
sig_shape = samples.shape
names = sig_dtype.names
name = signal.name
if names is None:
sig_type = v4c.SIGNAL_TYPE_SCALAR
if sig_dtype.kind in "SV":
sig_type = v4c.SIGNAL_TYPE_STRING
else:
prepare_record = False
if names in (v4c.CANOPEN_TIME_FIELDS, v4c.CANOPEN_DATE_FIELDS):
sig_type = v4c.SIGNAL_TYPE_CANOPEN
elif names[0] != sig.name:
sig_type = v4c.SIGNAL_TYPE_STRUCTURE_COMPOSITION
else:
sig_type = v4c.SIGNAL_TYPE_ARRAY
gp_sig_types.append(sig_type)
# first add the signals in the simple signal list
if sig_type == v4c.SIGNAL_TYPE_SCALAR:
# compute additional byte offset for large records size
s_type, s_size = fmt_to_datatype_v4(sig_dtype, sig_shape)
byte_size = s_size // 8 or 1
data_block_addr = 0
if sig_dtype.kind == "u" and signal.bit_count <= 4:
s_size = signal.bit_count
if signal.stream_sync:
channel_type = v4c.CHANNEL_TYPE_SYNC
if signal.attachment:
at_data, at_name, hash_sum = signal.attachment
attachment_index = self.attach(
at_data, at_name, hash_sum, mime="video/avi", embedded=False
)
attachment = attachment_index
else:
attachment = None
sync_type = v4c.SYNC_TYPE_TIME
else:
channel_type = v4c.CHANNEL_TYPE_VALUE
sync_type = v4c.SYNC_TYPE_NONE
if signal.attachment:
at_data, at_name, hash_sum = signal.attachment
attachment_index = self.attach(at_data, at_name, hash_sum)
attachment = attachment_index
else:
attachment = None
kwargs = {
"channel_type": channel_type,
"sync_type": sync_type,
"bit_count": s_size,
"byte_offset": offset,
"bit_offset": 0,
"data_type": s_type,
"data_block_addr": data_block_addr,
"flags": 0,
}
if attachment is not None:
kwargs["attachment_addr"] = 0
if invalidation_bytes_nr and signal.invalidation_bits is not None:
inval_bits.append(signal.invalidation_bits)
kwargs["flags"] = v4c.FLAG_CN_INVALIDATION_PRESENT
kwargs["pos_invalidation_bit"] = inval_cntr
inval_cntr += 1
ch = Channel(**kwargs)
ch.name = name
ch.unit = signal.unit
ch.comment = signal.comment
ch.display_name = signal.display_name
if len(sig_shape) > 1:
ch.dtype_fmt = dtype((sig_dtype, sig_shape[1:]))
else:
ch.dtype_fmt = sig_dtype
ch.attachment = attachment
# conversions for channel
if signal.raw:
ch.conversion = conversion_transfer(signal.conversion, version=4)
# source for channel
source = signal.source
if source:
if source in si_map:
ch.source = si_map[source]
else:
new_source = SourceInformation(
source_type=source.source_type,
bus_type=source.bus_type
)
new_source.name = source.name
new_source.path = source.path
new_source.comment = source.comment
si_map[source] = new_source
ch.source = new_source
gp_channels.append(ch)
offset += byte_size
gp_sdata.append(None)
gp_sdata_size.append(0)
entry = (dg_cntr, ch_cntr)
self.channels_db.add(name, entry)
if ch.display_name:
self.channels_db.add(ch.display_name, entry)
# update the parents as well
field_name = field_names.get_unique_name(name)
parents[ch_cntr] = field_name, 0
fields.append(samples)
types.append((field_name, sig_dtype, sig_shape[1:]))
ch_cntr += 1
# simple channels don't have channel dependencies
gp_dep.append(None)
elif sig_type == v4c.SIGNAL_TYPE_CANOPEN:
field_name = field_names.get_unique_name(name)
if names == v4c.CANOPEN_TIME_FIELDS:
vals = signal.samples.tobytes()
fields.append(frombuffer(vals, dtype="V6"))
types.append((field_name, "V6"))
byte_size = 6
s_type = v4c.DATA_TYPE_CANOPEN_TIME
s_dtype = dtype("V6")
else:
vals = []
for field in ("ms", "min", "hour", "day", "month", "year"):
if field == "hour":
vals.append(
signal.samples[field]
+ (signal.samples["summer_time"] << 7)
)
elif field == "day":
vals.append(
signal.samples[field]
+ (signal.samples["day_of_week"] << 4)
)
else:
vals.append(signal.samples[field])
vals = fromarrays(vals).tobytes()
fields.append(frombuffer(vals, dtype="V7"))
types.append((field_name, "V7"))
byte_size = 7
s_type = v4c.DATA_TYPE_CANOPEN_DATE
s_dtype = dtype("V7")
s_size = byte_size * 8
# there is no channel dependency
gp_dep.append(None)
# add channel block
kwargs = {
"channel_type": v4c.CHANNEL_TYPE_VALUE,
"bit_count": s_size,
"byte_offset": offset,
"bit_offset": 0,
"data_type": s_type,
"flags": 0,
}
if invalidation_bytes_nr and signal.invalidation_bits is not None:
inval_bits.append(signal.invalidation_bits)
kwargs["flags"] |= v4c.FLAG_CN_INVALIDATION_PRESENT
kwargs["pos_invalidation_bit"] = inval_cntr
inval_cntr += 1
ch = Channel(**kwargs)
ch.name = name
ch.unit = signal.unit
ch.comment = signal.comment
ch.display_name = signal.display_name
ch.dtype_fmt = s_dtype
# source for channel
source = signal.source
if source:
if source in si_map:
ch.source = si_map[source]
else:
new_source = SourceInformation(
source_type=source.source_type, bus_type=source.bus_type
)
new_source.name = source.name
new_source.path = source.path
new_source.comment = source.comment
si_map[source] = new_source
ch.source = new_source
gp_channels.append(ch)
offset += byte_size
entry = (dg_cntr, ch_cntr)
self.channels_db.add(name, entry)
if ch.display_name:
self.channels_db.add(ch.display_name, entry)
# update the parents as well
parents[ch_cntr] = field_name, 0
gp_sdata.append(0)
gp_sdata_size.append(0)
ch_cntr += 1
elif sig_type == v4c.SIGNAL_TYPE_STRUCTURE_COMPOSITION:
(
offset,
dg_cntr,
ch_cntr,
struct_self,
new_fields,
new_types,
inval_cntr,
) = self._append_structure_composition(
gp,
signal,
field_names,
offset,
dg_cntr,
ch_cntr,
parents,
defined_texts,
invalidation_bytes_nr,
inval_bits,
inval_cntr,
)
fields.extend(new_fields)
types.extend(new_types)
elif sig_type == v4c.SIGNAL_TYPE_ARRAY:
# here we have channel arrays or mdf v3 channel dependencies
samples = signal.samples[names[0]]
shape = samples.shape[1:]
if len(names) > 1 or len(shape) > 1:
# add channel dependency block for composed parent channel
dims_nr = len(shape)
names_nr = len(names)
if names_nr == 0:
kwargs = {
"dims": dims_nr,
"ca_type": v4c.CA_TYPE_LOOKUP,
"flags": v4c.FLAG_CA_FIXED_AXIS,
"byte_offset_base": samples.dtype.itemsize,
}
for i in range(dims_nr):
kwargs[f"dim_size_{i}"] = shape[i]
elif len(names) == 1:
kwargs = {
"dims": dims_nr,
"ca_type": v4c.CA_TYPE_ARRAY,
"flags": 0,
"byte_offset_base": samples.dtype.itemsize,
}
for i in range(dims_nr):
kwargs[f"dim_size_{i}"] = shape[i]
else:
kwargs = {
"dims": dims_nr,
"ca_type": v4c.CA_TYPE_LOOKUP,
"flags": v4c.FLAG_CA_AXIS,
"byte_offset_base": samples.dtype.itemsize,
}
for i in range(dims_nr):
kwargs[f"dim_size_{i}"] = shape[i]
parent_dep = ChannelArrayBlock(**kwargs)
gp_dep.append([parent_dep])
else:
# add channel dependency block for composed parent channel
kwargs = {
"dims": 1,
"ca_type": v4c.CA_TYPE_SCALE_AXIS,
"flags": 0,
"byte_offset_base": samples.dtype.itemsize,
"dim_size_0": shape[0],
}
parent_dep = ChannelArrayBlock(**kwargs)
gp_dep.append([parent_dep])
field_name = field_names.get_unique_name(name)
fields.append(samples)
dtype_pair = field_name, samples.dtype, shape
types.append(dtype_pair)
# first we add the structure channel
s_type, s_size = fmt_to_datatype_v4(samples.dtype, samples.shape, True)
# add channel block
kwargs = {
"channel_type": v4c.CHANNEL_TYPE_VALUE,
"bit_count": s_size,
"byte_offset": offset,
"bit_offset": 0,
"data_type": s_type,
"flags": 0,
}
if invalidation_bytes_nr:
if signal.invalidation_bits is not None:
inval_bits.append(signal.invalidation_bits)
kwargs["flags"] |= v4c.FLAG_CN_INVALIDATION_PRESENT
kwargs["pos_invalidation_bit"] = inval_cntr
inval_cntr += 1
ch = Channel(**kwargs)
ch.name = name
ch.unit = signal.unit
ch.comment = signal.comment
ch.display_name = signal.display_name
ch.dtype_fmt = samples.dtype
# source for channel
source = signal.source
if source:
if source in si_map:
ch.source = si_map[source]
else:
new_source = SourceInformation(
source_type=source.source_type, bus_type=source.bus_type
)
new_source.name = source.name
new_source.path = source.path
new_source.comment = source.comment
si_map[source] = new_source
ch.source = new_source
gp_channels.append(ch)
size = s_size // 8
for dim in shape:
size *= dim
offset += size
gp_sdata.append(None)
gp_sdata_size.append(0)
entry = (dg_cntr, ch_cntr)
self.channels_db.add(name, entry)
if ch.display_name:
self.channels_db.add(ch.display_name, entry)
# update the parents as well
parents[ch_cntr] = name, 0
ch_cntr += 1
for name in names[1:]:
field_name = field_names.get_unique_name(name)
samples = signal.samples[name]
shape = samples.shape[1:]
fields.append(samples)
types.append((field_name, samples.dtype, shape))
# add channel dependency block
kwargs = {
"dims": 1,
"ca_type": v4c.CA_TYPE_SCALE_AXIS,
"flags": 0,
"byte_offset_base": samples.dtype.itemsize,
"dim_size_0": shape[0],
}
dep = ChannelArrayBlock(**kwargs)
gp_dep.append([dep])
# add components channel
s_type, s_size = fmt_to_datatype_v4(samples.dtype, ())
byte_size = s_size // 8 or 1
kwargs = {
"channel_type": v4c.CHANNEL_TYPE_VALUE,
"bit_count": s_size,
"byte_offset": offset,
"bit_offset": 0,
"data_type": s_type,
"flags": 0,
}
if invalidation_bytes_nr:
if signal.invalidation_bits is not None:
inval_bits.append(signal.invalidation_bits)
kwargs["flags"] |= v4c.FLAG_CN_INVALIDATION_PRESENT
kwargs["pos_invalidation_bit"] = inval_cntr
inval_cntr += 1
ch = Channel(**kwargs)
ch.name = name
ch.unit = signal.unit
ch.comment = signal.comment
ch.display_name = signal.display_name
ch.dtype_fmt = samples.dtype
gp_channels.append(ch)
entry = dg_cntr, ch_cntr
parent_dep.axis_channels.append(entry)
for dim in shape:
byte_size *= dim
offset += byte_size
gp_sdata.append(None)
gp_sdata_size.append(0)
self.channels_db.add(name, entry)
# update the parents as well
parents[ch_cntr] = field_name, 0
ch_cntr += 1
else:
encoding = signal.encoding
samples = signal.samples
sig_dtype = samples.dtype
if encoding == "utf-8":
data_type = v4c.DATA_TYPE_STRING_UTF_8
elif encoding == "latin-1":
data_type = v4c.DATA_TYPE_STRING_LATIN_1
elif encoding == "utf-16-be":
data_type = v4c.DATA_TYPE_STRING_UTF_16_BE
elif encoding == "utf-16-le":
data_type = v4c.DATA_TYPE_STRING_UTF_16_LE
else:
raise MdfException(f'wrong encoding "{encoding}" for string signal')
if self.compact_vlsd:
data = []
offsets = []
off = 0
if encoding == "utf-16-le":
for elem in samples:
offsets.append(off)
size = len(elem)
if size % 2:
size += 1
elem = elem + b"\0"
data.append(UINT32_p(size))
data.append(elem)
off += size + 4
else:
for elem in samples:
offsets.append(off)
size = len(elem)
data.append(UINT32_p(size))
data.append(elem)
off += size + 4
data_size = off
offsets = array(offsets, dtype=uint64)
if data_size:
data_addr = tell()
info = SignalDataBlockInfo(
address=data_addr,
size=data_size,
count=len(samples),
offsets=offsets,
)
gp_sdata.append([info])
gp_sdata_size.append(data_size)
file.seek(0, 2)
file.write(b"".join(data))
else:
data_addr = 0
gp_sdata.append([])
gp_sdata_size.append(0)
else:
offsets = arange(len(samples), dtype=uint64) * (
signal.samples.itemsize + 4
)
values = [
full(len(samples), samples.itemsize, dtype=uint32),
samples,
]
types_ = [("o", uint32), ("s", sig_dtype)]
data = fromarrays(values, dtype=types_)
data_size = len(data) * data.itemsize
if data_size:
data_addr = tell()
info = SignalDataBlockInfo(
address=data_addr,
size=data_size,
count=len(data),
offsets=offsets,
)
gp_sdata.append([info])
gp_sdata_size.append(data_size)
data.tofile(file)
else:
data_addr = 0
gp_sdata.append([])
gp_sdata_size.append(0)
# compute additional byte offset for large records size
byte_size = 8
kwargs = {
"channel_type": v4c.CHANNEL_TYPE_VLSD,
"bit_count": 64,
"byte_offset": offset,
"bit_offset": 0,
"data_type": data_type,
"data_block_addr": data_addr,
"flags": 0,
}
if invalidation_bytes_nr:
if signal.invalidation_bits is not None:
inval_bits.append(signal.invalidation_bits)
kwargs["flags"] |= v4c.FLAG_CN_INVALIDATION_PRESENT
kwargs["pos_invalidation_bit"] = inval_cntr
inval_cntr += 1
ch = Channel(**kwargs)
ch.name = name
ch.unit = signal.unit
ch.comment = signal.comment
ch.display_name = signal.display_name
ch.dtype_fmt = dtype("<u8")
# conversions for channel
conversion = conversion_transfer(signal.conversion, version=4)
if signal.raw:
ch.conversion = conversion
# source for channel
source = signal.source
if source:
if source in si_map:
ch.source = si_map[source]
else:
new_source = SourceInformation(
source_type=source.source_type, bus_type=source.bus_type
)
new_source.name = source.name
new_source.path = source.path
new_source.comment = source.comment
si_map[source] = new_source
ch.source = new_source
gp_channels.append(ch)
offset += byte_size
entry = (dg_cntr, ch_cntr)
self.channels_db.add(name, entry)
if ch.display_name:
self.channels_db.add(ch.display_name, entry)
# update the parents as well
field_name = field_names.get_unique_name(name)
parents[ch_cntr] = field_name, 0
fields.append(offsets)
types.append((field_name, uint64))
ch_cntr += 1
# simple channels don't have channel dependencies
gp_dep.append(None)
if invalidation_bytes_nr:
invalidation_bytes_nr = len(inval_bits)
for _ in range(8 - invalidation_bytes_nr % 8):
inval_bits.append(zeros(cycles_nr, dtype=bool))
inval_bits.reverse()
invalidation_bytes_nr = len(inval_bits) // 8
gp.channel_group.invalidation_bytes_nr = invalidation_bytes_nr
inval_bits = fliplr(
packbits(array(inval_bits).T).reshape(
(cycles_nr, invalidation_bytes_nr)
)
)
if self.version < "4.20":
fields.append(inval_bits)
types.append(
("invalidation_bytes", inval_bits.dtype, inval_bits.shape[1:])
)
gp.channel_group.cycles_nr = cycles_nr
gp.channel_group.samples_byte_nr = offset
virtual_group = VirtualChannelGroup()
self.virtual_groups[dg_cntr] = virtual_group
self.virtual_groups_map[dg_cntr] = dg_cntr
virtual_group.groups.append(dg_cntr)
virtual_group.record_size = offset + invalidation_bytes_nr
virtual_group.cycles_nr = cycles_nr
# data group
gp.data_group = DataGroup()
# data block
types = dtype(types)
gp.sorted = True
if prepare_record:
gp.types = types
gp.parents = parents
if signals and cycles_nr:
samples = fromarrays(fields, dtype=types)
else:
samples = array([])
del signals
del fields
size = len(samples) * samples.itemsize
if size:
if self.version < "4.20":
block_size = self._write_fragment_size or 20 * 1024 * 1024
chunk = ceil(block_size / samples.itemsize)
count = ceil(len(samples) / chunk)
for i in range(count):
data_ = samples[i * chunk : (i + 1) * chunk].tobytes()
raw_size = len(data_)
data_ = lz_compress(data_)
size = len(data_)
data_address = self._tempfile.tell()
self._tempfile.write(data_)
gp.data_blocks.append(
DataBlockInfo(
address=data_address,
block_type=v4c.DZ_BLOCK_LZ,
raw_size=raw_size,
size=size,
param=0,
)
)
else:
data_address = self._tempfile.tell()
gp.uses_ld = True
data_address = tell()
data = samples.tobytes()
del samples
raw_size = len(data)
data = lz_compress(data)
size = len(data)
self._tempfile.write(data)
gp.data_blocks.append(
DataBlockInfo(
address=data_address,
block_type=v4c.DZ_BLOCK_LZ,
raw_size=raw_size,
size=size,
param=0,
)
)
if inval_bits is not None:
addr = tell()
data = inval_bits.tobytes()
raw_size = len(data)
data = lz_compress(data)
size = len(data)
self._tempfile.write(data)
gp.data_blocks[-1].invalidation_block(
InvalidationBlockInfo(
address=addr,
block_type=v4c.DZ_BLOCK_LZ,
raw_size=raw_size,
size=size,
param=None,
)
)
gp.data_location = v4c.LOCATION_TEMPORARY_FILE
return dg_cntr
def _append_column_oriented(
self, signals, acq_name=None, acq_source=None, comment=None
):
defined_texts = {}
si_map = self._si_map
# setup all blocks related to the time master channel
file = self._tempfile
tell = file.tell
seek = file.seek
write = file.write
seek(0, 2)
dg_cntr = initial_dg_cntr = len(self.groups)
# add the master group
gp = Group(None)
gp.signal_data = gp_sdata = []
gp.signal_data_size = gp_sdata_size = []
gp.channels = gp_channels = []
gp.channel_dependencies = gp_dep = []
gp.signal_types = gp_sig_types = []
gp.uses_ld = True
gp.data_group = DataGroup()
gp.sorted = True
samples = signals[0].timestamps
cycles_nr = len(samples)
# channel group
kwargs = {"cycles_nr": cycles_nr, "samples_byte_nr": 0}
gp.channel_group = ChannelGroup(**kwargs)
gp.channel_group.acq_name = acq_name
gp.channel_group.acq_source = acq_source
gp.channel_group.comment = comment
self.groups.append(gp)
ch_cntr = 0
types = []
parents = {}
ch_cntr = 0
offset = 0
prepare_record = True
source_block = None
master_metadata = signals[0].master_metadata
if master_metadata:
time_name, sync_type = master_metadata
if sync_type in (0, 1):
time_unit = "s"
elif sync_type == 2:
time_unit = "deg"
elif sync_type == 3:
time_unit = "m"
elif sync_type == 4:
time_unit = "index"
else:
time_name, sync_type = "time", v4c.SYNC_TYPE_TIME
time_unit = "s"
gp.channel_group.acq_source = source_block
# time channel
t_type, t_size = fmt_to_datatype_v4(samples.dtype, samples.shape)
kwargs = {
"channel_type": v4c.CHANNEL_TYPE_MASTER,
"data_type": t_type,
"sync_type": sync_type,
"byte_offset": 0,
"bit_offset": 0,
"bit_count": t_size,
}
ch = Channel(**kwargs)
ch.unit = time_unit
ch.name = time_name
ch.source = source_block
ch.dtype_fmt = samples.dtype
name = time_name
gp_channels.append(ch)
gp_sdata.append(None)
gp_sdata_size.append(0)
self.channels_db.add(name, (dg_cntr, ch_cntr))
self.masters_db[dg_cntr] = 0
# data group record parents
parents[ch_cntr] = name, 0
# time channel doesn't have channel dependencies
gp_dep.append(None)
types.append((name, samples.dtype))
offset += t_size // 8
ch_cntr += 1
gp_sig_types.append(0)
gp.channel_group.samples_byte_nr = offset
# data group
gp.data_group = DataGroup()
# data block
types = dtype(types)
gp.sorted = True
gp.types = types
gp.parents = parents
size = cycles_nr * samples.itemsize
cg_master_index = dg_cntr
virtual_group = VirtualChannelGroup()
self.virtual_groups[cg_master_index] = virtual_group
self.virtual_groups_map[dg_cntr] = dg_cntr
virtual_group.groups.append(dg_cntr)
virtual_group.record_size = offset
virtual_group.cycles_nr = cycles_nr
dg_cntr += 1
if size:
data_address = tell()
gp.data_location = v4c.LOCATION_TEMPORARY_FILE
write(samples.tobytes())
chunk = self._write_fragment_size // samples.itemsize
chunk *= samples.itemsize
while size:
if size > chunk:
gp.data_blocks.append(
DataBlockInfo(
address=data_address,
block_type=v4c.DT_BLOCK,
raw_size=chunk,
size=chunk,
param=0,
)
)
data_address += chunk
size -= chunk
else:
gp.data_blocks.append(
DataBlockInfo(
address=data_address,
block_type=v4c.DT_BLOCK,
raw_size=size,
size=size,
param=0,
)
)
size = 0
else:
gp.data_location = v4c.LOCATION_TEMPORARY_FILE
for signal in signals:
gp = Group(None)
gp.signal_data = gp_sdata = []
gp.signal_data_size = gp_sdata_size = []
gp.channels = gp_channels = []
gp.channel_dependencies = gp_dep = []
gp.signal_types = gp_sig_types = []
gp.data_group = DataGroup()
gp.sorted = True
gp.uses_ld = True
# channel group
kwargs = {
"cycles_nr": cycles_nr,
"samples_byte_nr": 0,
"flags": v4c.FLAG_CG_REMOTE_MASTER,
}
gp.channel_group = ChannelGroup(**kwargs)
gp.channel_group.acq_name = acq_name
gp.channel_group.acq_source = acq_source
gp.channel_group.comment = comment
gp.channel_group.cg_master_index = cg_master_index
self.groups.append(gp)
types = []
parents = {}
ch_cntr = 0
offset = 0
field_names = UniqueDB()
sig = signal
samples = sig.samples
sig_dtype = samples.dtype
sig_shape = samples.shape
names = sig_dtype.names
name = signal.name
if names is None:
sig_type = v4c.SIGNAL_TYPE_SCALAR
if sig_dtype.kind in "SV":
sig_type = v4c.SIGNAL_TYPE_STRING
else:
if names in (v4c.CANOPEN_TIME_FIELDS, v4c.CANOPEN_DATE_FIELDS):
sig_type = v4c.SIGNAL_TYPE_CANOPEN
elif names[0] != sig.name:
sig_type = v4c.SIGNAL_TYPE_STRUCTURE_COMPOSITION
else:
sig_type = v4c.SIGNAL_TYPE_ARRAY
gp_sig_types.append(sig_type)
# first add the signals in the simple signal list
if sig_type == v4c.SIGNAL_TYPE_SCALAR:
# compute additional byte offset for large records size
s_type, s_size = fmt_to_datatype_v4(sig_dtype, sig_shape)
byte_size = s_size // 8 or 1
if sig_dtype.kind == "u" and signal.bit_count <= 4:
s_size = signal.bit_count
if signal.stream_sync:
channel_type = v4c.CHANNEL_TYPE_SYNC
if signal.attachment:
at_data, at_name, hash_sum = signal.attachment
attachment_addr = self.attach(
at_data, at_name, hash_sum, mime="video/avi", embedded=False
)
data_block_addr = attachment_addr
else:
data_block_addr = 0
sync_type = v4c.SYNC_TYPE_TIME
else:
channel_type = v4c.CHANNEL_TYPE_VALUE
data_block_addr = 0
sync_type = v4c.SYNC_TYPE_NONE
kwargs = {
"channel_type": channel_type,
"sync_type": sync_type,
"bit_count": s_size,
"byte_offset": offset,
"bit_offset": 0,
"data_type": s_type,
"data_block_addr": data_block_addr,
"flags": 0,
}
if signal.invalidation_bits is not None:
invalidation_bits = signal.invalidation_bits
kwargs["flags"] = v4c.FLAG_CN_INVALIDATION_PRESENT
kwargs["pos_invalidation_bit"] = 0
else:
invalidation_bits = None
ch = Channel(**kwargs)
ch.name = name
ch.unit = signal.unit
ch.comment = signal.comment
ch.display_name = signal.display_name
# conversions for channel
if signal.raw:
ch.conversion = conversion_transfer(signal.conversion, version=4)
# source for channel
source = signal.source
if source:
if source in si_map:
ch.source = si_map[source]
else:
new_source = SourceInformation(
source_type=source.source_type, bus_type=source.bus_type
)
new_source.name = source.name
new_source.path = source.path
new_source.comment = source.comment
si_map[source] = new_source
ch.source = new_source
gp_channels.append(ch)
offset = byte_size
gp_sdata.append(None)
gp_sdata_size.append(0)
entry = (dg_cntr, ch_cntr)
self.channels_db.add(name, entry)
if ch.display_name:
self.channels_db.add(ch.display_name, entry)
# update the parents as well
parents[ch_cntr] = name, 0
_shape = sig_shape[1:]
types.append((name, sig_dtype, _shape))
gp.single_channel_dtype = ch.dtype_fmt = dtype((sig_dtype, _shape))
# simple channels don't have channel dependencies
gp_dep.append(None)
elif sig_type == v4c.SIGNAL_TYPE_CANOPEN:
if names == v4c.CANOPEN_TIME_FIELDS:
types.append((name, "V6"))
gp.single_channel_dtype = dtype("V6")
byte_size = 6
s_type = v4c.DATA_TYPE_CANOPEN_TIME
else:
vals = []
for field in ("ms", "min", "hour", "day", "month", "year"):
if field == "hour":
vals.append(
signal.samples[field]
+ (signal.samples["summer_time"] << 7)
)
elif field == "day":
vals.append(
signal.samples[field]
+ (signal.samples["day_of_week"] << 4)
)
else:
vals.append(signal.samples[field])
samples = fromarrays(vals)
types.append((name, "V7"))
gp.single_channel_dtype = dtype("V7")
byte_size = 7
s_type = v4c.DATA_TYPE_CANOPEN_DATE
s_size = byte_size * 8
# there is no channel dependency
gp_dep.append(None)
# add channel block
kwargs = {
"channel_type": v4c.CHANNEL_TYPE_VALUE,
"bit_count": s_size,
"byte_offset": offset,
"bit_offset": 0,
"data_type": s_type,
"flags": 0,
}
if signal.invalidation_bits is not None:
invalidation_bits = signal.invalidation_bits
kwargs["flags"] = v4c.FLAG_CN_INVALIDATION_PRESENT
kwargs["pos_invalidation_bit"] = 0
else:
invalidation_bits = None
ch = Channel(**kwargs)
ch.name = name
ch.unit = signal.unit
ch.comment = signal.comment
ch.display_name = signal.display_name
ch.dtype_fmt = gp.single_channel_dtype
# source for channel
source = signal.source
if source:
if source in si_map:
ch.source = si_map[source]
else:
new_source = SourceInformation(
source_type=source.source_type, bus_type=source.bus_type
)
new_source.name = source.name
new_source.path = source.path
new_source.comment = source.comment
si_map[source] = new_source
ch.source = new_source
gp_channels.append(ch)
offset = byte_size
entry = (dg_cntr, ch_cntr)
self.channels_db.add(name, entry)
if ch.display_name:
self.channels_db.add(ch.display_name, entry)
# update the parents as well
parents[ch_cntr] = name, 0
gp_sdata.append(0)
gp_sdata_size.append(0)
elif sig_type == v4c.SIGNAL_TYPE_STRUCTURE_COMPOSITION:
(
offset,
dg_cntr,
ch_cntr,
struct_self,
new_fields,
new_types,
) = self._append_structure_composition_column_oriented(
gp,
signal,
field_names,
offset,
dg_cntr,
ch_cntr,
parents,
defined_texts,
)
if signal.invalidation_bits is not None:
invalidation_bits = signal.invalidation_bits
else:
invalidation_bits = None
gp["types"] = dtype(new_types)
offset = gp["types"].itemsize
samples = signal.samples
elif sig_type == v4c.SIGNAL_TYPE_ARRAY:
fields = []
# here we have channel arrays or mdf v3 channel dependencies
samples = signal.samples[names[0]]
shape = samples.shape[1:]
if len(names) > 1 or len(shape) > 1:
# add channel dependency block for composed parent channel
dims_nr = len(shape)
names_nr = len(names)
if names_nr == 0:
kwargs = {
"dims": dims_nr,
"ca_type": v4c.CA_TYPE_LOOKUP,
"flags": v4c.FLAG_CA_FIXED_AXIS,
"byte_offset_base": samples.dtype.itemsize,
}
for i in range(dims_nr):
kwargs[f"dim_size_{i}"] = shape[i]
elif len(names) == 1:
kwargs = {
"dims": dims_nr,
"ca_type": v4c.CA_TYPE_ARRAY,
"flags": 0,
"byte_offset_base": samples.dtype.itemsize,
}
for i in range(dims_nr):
kwargs[f"dim_size_{i}"] = shape[i]
else:
kwargs = {
"dims": dims_nr,
"ca_type": v4c.CA_TYPE_LOOKUP,
"flags": v4c.FLAG_CA_AXIS,
"byte_offset_base": samples.dtype.itemsize,
}
for i in range(dims_nr):
kwargs[f"dim_size_{i}"] = shape[i]
parent_dep = ChannelArrayBlock(**kwargs)
gp_dep.append([parent_dep])
else:
# add channel dependency block for composed parent channel
kwargs = {
"dims": 1,
"ca_type": v4c.CA_TYPE_SCALE_AXIS,
"flags": 0,
"byte_offset_base": samples.dtype.itemsize,
"dim_size_0": shape[0],
}
parent_dep = ChannelArrayBlock(**kwargs)
gp_dep.append([parent_dep])
field_name = field_names.get_unique_name(name)
fields.append(samples)
dtype_pair = field_name, samples.dtype, shape
types.append(dtype_pair)
# first we add the structure channel
s_type, s_size = fmt_to_datatype_v4(samples.dtype, samples.shape, True)
# add channel block
kwargs = {
"channel_type": v4c.CHANNEL_TYPE_VALUE,
"bit_count": s_size,
"byte_offset": offset,
"bit_offset": 0,
"data_type": s_type,
"flags": 0,
}
if signal.invalidation_bits is not None:
invalidation_bits = signal.invalidation_bits
kwargs["flags"] = v4c.FLAG_CN_INVALIDATION_PRESENT
kwargs["pos_invalidation_bit"] = 0
else:
invalidation_bits = None
ch = Channel(**kwargs)
ch.name = name
ch.unit = signal.unit
ch.comment = signal.comment
ch.display_name = signal.display_name
ch.dtype_fmt = samples.dtype
# source for channel
source = signal.source
if source:
if source in si_map:
ch.source = si_map[source]
else:
new_source = SourceInformation(
source_type=source.source_type, bus_type=source.bus_type
)
new_source.name = source.name
new_source.path = source.path
new_source.comment = source.comment
si_map[source] = new_source
ch.source = new_source
gp_channels.append(ch)
size = s_size // 8
for dim in shape:
size *= dim
offset += size
gp_sdata.append(None)
entry = (dg_cntr, ch_cntr)
self.channels_db.add(name, entry)
if ch.display_name:
self.channels_db.add(ch.display_name, entry)
# update the parents as well
parents[ch_cntr] = name, 0
ch_cntr += 1
for name in names[1:]:
field_name = field_names.get_unique_name(name)
samples = signal.samples[name]
shape = samples.shape[1:]
fields.append(samples)
types.append((field_name, samples.dtype, shape))
# add channel dependency block
kwargs = {
"dims": 1,
"ca_type": v4c.CA_TYPE_SCALE_AXIS,
"flags": 0,
"byte_offset_base": samples.dtype.itemsize,
"dim_size_0": shape[0],
}
dep = ChannelArrayBlock(**kwargs)
gp_dep.append([dep])
# add components channel
s_type, s_size = fmt_to_datatype_v4(samples.dtype, ())
byte_size = s_size // 8 or 1
kwargs = {
"channel_type": v4c.CHANNEL_TYPE_VALUE,
"bit_count": s_size,
"byte_offset": offset,
"bit_offset": 0,
"data_type": s_type,
"flags": 0,
}
if signal.invalidation_bits is not None:
invalidation_bits = signal.invalidation_bits
kwargs["flags"] = v4c.FLAG_CN_INVALIDATION_PRESENT
kwargs["pos_invalidation_bit"] = 0
else:
invalidation_bits = None
ch = Channel(**kwargs)
ch.name = name
ch.unit = signal.unit
ch.comment = signal.comment
ch.display_name = signal.display_name
ch.dtype_fmt = samples.dtype
gp_channels.append(ch)
entry = dg_cntr, ch_cntr
parent_dep.axis_channels.append(entry)
for dim in shape:
byte_size *= dim
offset += byte_size
gp_sdata.append(None)
gp_sdata_size.append(0)
self.channels_db.add(name, entry)
# update the parents as well
parents[ch_cntr] = field_name, 0
ch_cntr += 1
gp["types"] = dtype(types)
samples = signal.samples
else:
encoding = signal.encoding
samples = signal.samples
sig_dtype = samples.dtype
if encoding == "utf-8":
data_type = v4c.DATA_TYPE_STRING_UTF_8
elif encoding == "latin-1":
data_type = v4c.DATA_TYPE_STRING_LATIN_1
elif encoding == "utf-16-be":
data_type = v4c.DATA_TYPE_STRING_UTF_16_BE
elif encoding == "utf-16-le":
data_type = v4c.DATA_TYPE_STRING_UTF_16_LE
else:
raise MdfException(f'wrong encoding "{encoding}" for string signal')
offsets = arange(len(samples), dtype=uint64) * (
signal.samples.itemsize + 4
)
values = [full(len(samples), samples.itemsize, dtype=uint32), samples]
types_ = [("o", uint32), ("s", sig_dtype)]
data = fromarrays(values, dtype=types_)
data_size = len(data) * data.itemsize
if data_size:
data_addr = tell()
info = SignalDataBlockInfo(
address=data_addr,
size=data_size,
count=len(data),
offsets=offsets,
)
gp_sdata.append([info])
data.tofile(file)
else:
data_addr = 0
gp_sdata.append([])
# compute additional byte offset for large records size
byte_size = 8
kwargs = {
"channel_type": v4c.CHANNEL_TYPE_VLSD,
"bit_count": 64,
"byte_offset": offset,
"bit_offset": 0,
"data_type": data_type,
"data_block_addr": data_addr,
"flags": 0,
}
if signal.invalidation_bits is not None:
invalidation_bits = signal.invalidation_bits
kwargs["flags"] = v4c.FLAG_CN_INVALIDATION_PRESENT
kwargs["pos_invalidation_bit"] = 0
else:
invalidation_bits = None
ch = Channel(**kwargs)
ch.name = name
ch.unit = signal.unit
ch.comment = signal.comment
ch.display_name = signal.display_name
# conversions for channel
conversion = conversion_transfer(signal.conversion, version=4)
if signal.raw:
ch.conversion = conversion
# source for channel
source = signal.source
if source:
if source in si_map:
ch.source = si_map[source]
else:
new_source = SourceInformation(
source_type=source.source_type, bus_type=source.bus_type
)
new_source.name = source.name
new_source.path = source.path
new_source.comment = source.comment
si_map[source] = new_source
ch.source = new_source
gp_channels.append(ch)
offset = byte_size
entry = (dg_cntr, ch_cntr)
self.channels_db.add(name, entry)
if ch.display_name:
self.channels_db.add(ch.display_name, entry)
# update the parents as well
parents[ch_cntr] = name, 0
types.append((name, uint64))
gp.single_channel_dtype = ch.dtype_fmt = uint64
samples = offsets
# simple channels don't have channel dependencies
gp_dep.append(None)
gp.channel_group.samples_byte_nr = offset
if invalidation_bits is not None:
gp.channel_group.invalidation_bytes_nr = 1
virtual_group.groups.append(dg_cntr)
self.virtual_groups_map[dg_cntr] = cg_master_index
virtual_group.record_size += offset
if signal.invalidation_bits:
virtual_group.record_size += 1
dg_cntr += 1
size = cycles_nr * samples.itemsize
if size:
data_address = tell()
data = samples.tobytes()
raw_size = len(data)
data = lz_compress(data)
size = len(data)
write(data)
gp.data_blocks.append(
DataBlockInfo(
address=data_address,
block_type=v4c.DZ_BLOCK_LZ,
raw_size=raw_size,
size=size,
param=0,
)
)
if invalidation_bits is not None:
addr = tell()
data = invalidation_bits.tobytes()
raw_size = len(data)
data = lz_compress(data)
size = len(data)
write(data)
gp.data_blocks[-1].invalidation_block(
InvalidationBlockInfo(
address=addr,
block_type=v4c.DZ_BLOCK_LZ,
raw_size=raw_size,
size=size,
param=None,
)
)
gp.data_location = v4c.LOCATION_TEMPORARY_FILE
return initial_dg_cntr
def _append_dataframe(
self, df, acq_name=None, acq_source=None, comment=None, units=None
):
"""
Appends a new data group from a Pandas data frame.
"""
units = units or {}
if df.shape == (0, 0):
return
t = df.index
index_name = df.index.name
time_name = index_name or "time"
sync_type = v4c.SYNC_TYPE_TIME
time_unit = "s"
dg_cntr = len(self.groups)
gp = Group(None)
gp.signal_data = gp_sdata = []
gp.signal_data_size = gp_sdata_size = []
gp.channels = gp_channels = []
gp.channel_dependencies = gp_dep = []
gp.signal_types = gp_sig_types = []
cycles_nr = len(t)
# channel group
kwargs = {"cycles_nr": cycles_nr, "samples_byte_nr": 0}
gp.channel_group = ChannelGroup(**kwargs)
gp.channel_group.acq_name = acq_name
gp.channel_group.acq_source = acq_source
gp.channel_group.comment = comment
self.groups.append(gp)
fields = []
types = []
parents = {}
ch_cntr = 0
offset = 0
field_names = UniqueDB()
# setup all blocks related to the time master channel
file = self._tempfile
tell = file.tell
seek = file.seek
seek(0, 2)
virtual_group = VirtualChannelGroup()
self.virtual_groups[dg_cntr] = virtual_group
self.virtual_groups_map[dg_cntr] = dg_cntr
virtual_group.groups.append(dg_cntr)
virtual_group.cycles_nr = cycles_nr
# time channel
t_type, t_size = fmt_to_datatype_v4(t.dtype, t.shape)
kwargs = {
"channel_type": v4c.CHANNEL_TYPE_MASTER,
"data_type": t_type,
"sync_type": sync_type,
"byte_offset": 0,
"bit_offset": 0,
"bit_count": t_size,
"min_raw_value": t[0] if cycles_nr else 0,
"max_raw_value": t[-1] if cycles_nr else 0,
"lower_limit": t[0] if cycles_nr else 0,
"upper_limit": t[-1] if cycles_nr else 0,
"flags": v4c.FLAG_PHY_RANGE_OK | v4c.FLAG_VAL_RANGE_OK,
}
ch = Channel(**kwargs)
ch.unit = time_unit
ch.name = time_name
ch.dtype_fmt = t.dtype
name = time_name
gp_channels.append(ch)
gp_sdata.append(None)
gp_sdata_size.append(0)
self.channels_db.add(name, (dg_cntr, ch_cntr))
self.masters_db[dg_cntr] = 0
# data group record parents
parents[ch_cntr] = name, 0
# time channel doesn't have channel dependencies
gp_dep.append(None)
fields.append(t)
types.append((name, t.dtype))
field_names.get_unique_name(name)
offset += t_size // 8
ch_cntr += 1
gp_sig_types.append(0)
for signal in df:
if index_name == signal:
continue
sig = df[signal]
name = signal
sig_type = v4c.SIGNAL_TYPE_SCALAR
if sig.dtype.kind in "SV":
sig_type = v4c.SIGNAL_TYPE_STRING
gp_sig_types.append(sig_type)
# first add the signals in the simple signal list
if sig_type == v4c.SIGNAL_TYPE_SCALAR:
# compute additional byte offset for large records size
s_type, s_size = fmt_to_datatype_v4(sig.dtype, sig.shape)
byte_size = s_size // 8 or 1
channel_type = v4c.CHANNEL_TYPE_VALUE
data_block_addr = 0
sync_type = v4c.SYNC_TYPE_NONE
kwargs = {
"channel_type": channel_type,
"sync_type": sync_type,
"bit_count": s_size,
"byte_offset": offset,
"bit_offset": 0,
"data_type": s_type,
"data_block_addr": data_block_addr,
}
ch = Channel(**kwargs)
ch.name = name
ch.unit = units.get(name, "")
ch.dtype_fmt = dtype((sig.dtype, sig.shape[1:]))
gp_channels.append(ch)
offset += byte_size
gp_sdata.append(None)
gp_sdata_size.append(0)
self.channels_db.add(name, (dg_cntr, ch_cntr))
# update the parents as well
field_name = field_names.get_unique_name(name)
parents[ch_cntr] = field_name, 0
fields.append(sig)
types.append((field_name, sig.dtype, sig.shape[1:]))
ch_cntr += 1
# simple channels don't have channel dependencies
gp_dep.append(None)
elif sig_type == v4c.SIGNAL_TYPE_STRING:
offsets = arange(len(sig), dtype=uint64) * (sig.dtype.itemsize + 4)
values = [full(len(sig), sig.dtype.itemsize, dtype=uint32), sig.values]
types_ = [("", uint32), ("", sig.dtype)]
data = fromarrays(values, dtype=types_)
data_size = len(data) * data.itemsize
if data_size:
data_addr = tell()
info = SignalDataBlockInfo(
address=data_addr,
size=data_size,
count=len(data),
offsets=offsets,
)
gp_sdata.append([info])
data.tofile(file)
else:
data_addr = 0
gp_sdata.append([])
# compute additional byte offset for large records size
byte_size = 8
kwargs = {
"channel_type": v4c.CHANNEL_TYPE_VLSD,
"bit_count": 64,
"byte_offset": offset,
"bit_offset": 0,
"data_type": v4c.DATA_TYPE_STRING_UTF_8,
"min_raw_value": 0,
"max_raw_value": 0,
"lower_limit": 0,
"upper_limit": 0,
"flags": 0,
"data_block_addr": data_addr,
}
ch = Channel(**kwargs)
ch.name = name
ch.unit = units.get(name, "")
ch.dtype_fmt = dtype("<u8")
gp_channels.append(ch)
offset += byte_size
self.channels_db.add(name, (dg_cntr, ch_cntr))
# update the parents as well
field_name = field_names.get_unique_name(name)
parents[ch_cntr] = field_name, 0
fields.append(offsets)
types.append((field_name, uint64))
ch_cntr += 1
# simple channels don't have channel dependencies
gp_dep.append(None)
virtual_group.record_size = offset
virtual_group.cycles_nr = cycles_nr
gp.channel_group.cycles_nr = cycles_nr
gp.channel_group.samples_byte_nr = offset
# data group
gp.data_group = DataGroup()
# data block
types = dtype(types)
gp.sorted = True
gp.types = types
gp.parents = parents
if df.shape[0]:
samples = fromarrays(fields, dtype=types)
else:
samples = array([])
size = len(samples) * samples.itemsize
if size:
data_address = self._tempfile.tell()
gp.data_location = v4c.LOCATION_TEMPORARY_FILE
samples.tofile(self._tempfile)
self._tempfile.write(samples.tobytes())
gp.data_blocks.append(
DataBlockInfo(
address=data_address,
block_type=v4c.DT_BLOCK,
raw_size=size,
size=size,
param=0,
)
)
else:
gp.data_location = v4c.LOCATION_TEMPORARY_FILE
def _append_structure_composition(
self,
grp,
signal,
field_names,
offset,
dg_cntr,
ch_cntr,
parents,
defined_texts,
invalidation_bytes_nr,
inval_bits,
inval_cntr,
):
si_map = self._si_map
fields = []
types = []
file = self._tempfile
seek = file.seek
seek(0, 2)
gp = grp
gp_sdata = gp.signal_data
gp_sdata_size = gp.signal_data_size
gp_channels = gp.channels
gp_dep = gp.channel_dependencies
name = signal.name
names = signal.samples.dtype.names
field_name = field_names.get_unique_name(name)
# first we add the structure channel
if signal.attachment and signal.attachment[0]:
at_data, at_name, hash_sum = signal.attachment
if at_name is not None:
suffix = Path(at_name).suffix.lower().strip(".")
else:
suffix = "dbc"
if suffix == "a2l":
mime = "applciation/A2L"
else:
mime = f"application/x-{suffix}"
attachment_index = self.attach(
at_data, at_name, hash_sum=hash_sum, mime=mime
)
attachment = attachment_index
else:
attachment = None
# add channel block
kwargs = {
"channel_type": v4c.CHANNEL_TYPE_VALUE,
"bit_count": signal.samples.dtype.itemsize * 8,
"byte_offset": offset,
"bit_offset": 0,
"data_type": v4c.DATA_TYPE_BYTEARRAY,
"precision": 0,
}
if attachment is not None:
kwargs["attachment_addr"] = 0
source_bus = signal.source and signal.source.source_type == v4c.SOURCE_BUS
if source_bus:
kwargs["flags"] = v4c.FLAG_CN_BUS_EVENT
flags_ = v4c.FLAG_CN_BUS_EVENT
grp.channel_group.flags |= (
v4c.FLAG_CG_BUS_EVENT | v4c.FLAG_CG_PLAIN_BUS_EVENT
)
else:
kwargs["flags"] = 0
flags_ = 0
if invalidation_bytes_nr and signal.invalidation_bits is not None:
inval_bits.append(signal.invalidation_bits)
kwargs["flags"] |= v4c.FLAG_CN_INVALIDATION_PRESENT
kwargs["pos_invalidation_bit"] = inval_cntr
inval_cntr += 1
ch = Channel(**kwargs)
ch.name = name
ch.unit = signal.unit
ch.comment = signal.comment
ch.display_name = signal.display_name
ch.attachment = attachment
ch.dtype_fmt = signal.samples.dtype
if source_bus and grp.channel_group.acq_source is None:
grp.channel_group.acq_source = SourceInformation.from_common_source(
signal.source
)
if signal.source.bus_type == v4c.BUS_TYPE_CAN:
grp.channel_group.path_separator = 46
grp.channel_group.acq_name = "CAN"
elif signal.source.bus_type == v4c.BUS_TYPE_FLEXRAY:
grp.channel_group.path_separator = 46
grp.channel_group.acq_name = "FLEXRAY"
elif signal.source.bus_type == v4c.BUS_TYPE_ETHERNET:
grp.channel_group.path_separator = 46
grp.channel_group.acq_name = "ETHERNET"
elif signal.source.bus_type == v4c.BUS_TYPE_K_LINE:
grp.channel_group.path_separator = 46
grp.channel_group.acq_name = "K_LINE"
elif signal.source.bus_type == v4c.BUS_TYPE_MOST:
grp.channel_group.path_separator = 46
grp.channel_group.acq_name = "MOST"
elif signal.source.bus_type == v4c.BUS_TYPE_LIN:
grp.channel_group.path_separator = 46
grp.channel_group.acq_name = "LIN"
# source for channel
source = signal.source
if source:
if source in si_map:
ch.source = si_map[source]
else:
new_source = SourceInformation(
source_type=source.source_type, bus_type=source.bus_type
)
new_source.name = source.name
new_source.path = source.path
new_source.comment = source.comment
si_map[source] = new_source
ch.source = new_source
entry = dg_cntr, ch_cntr
gp_channels.append(ch)
struct_self = entry
gp_sdata.append(None)
gp_sdata_size.append(0)
self.channels_db.add(name, entry)
if ch.display_name:
self.channels_db.add(ch.display_name, entry)
# update the parents as well
parents[ch_cntr] = name, 0
ch_cntr += 1
dep_list = []
gp_dep.append(dep_list)
# then we add the fields
for name in names:
field_name = field_names.get_unique_name(name)
samples = signal.samples[name]
fld_names = samples.dtype.names
if fld_names is None:
sig_type = v4c.SIGNAL_TYPE_SCALAR
if samples.dtype.kind in "SV":
sig_type = v4c.SIGNAL_TYPE_STRING
else:
if fld_names in (v4c.CANOPEN_TIME_FIELDS, v4c.CANOPEN_DATE_FIELDS):
sig_type = v4c.SIGNAL_TYPE_CANOPEN
elif fld_names[0] != name:
sig_type = v4c.SIGNAL_TYPE_STRUCTURE_COMPOSITION
else:
sig_type = v4c.SIGNAL_TYPE_ARRAY
if sig_type in (v4c.SIGNAL_TYPE_SCALAR, v4c.SIGNAL_TYPE_STRING):
s_type, s_size = fmt_to_datatype_v4(samples.dtype, samples.shape)
byte_size = s_size // 8 or 1
fields.append(samples)
types.append((field_name, samples.dtype, samples.shape[1:]))
# add channel block
kwargs = {
"channel_type": v4c.CHANNEL_TYPE_VALUE,
"bit_count": s_size,
"byte_offset": offset,
"bit_offset": 0,
"data_type": s_type,
"flags": flags_,
}
if invalidation_bytes_nr:
if signal.invalidation_bits is not None:
inval_bits.append(signal.invalidation_bits)
kwargs["flags"] |= v4c.FLAG_CN_INVALIDATION_PRESENT
kwargs["pos_invalidation_bit"] = inval_cntr
inval_cntr += 1
ch = Channel(**kwargs)
ch.name = name
ch.dtype_fmt = dtype((samples.dtype, samples.shape[1:]))
entry = (dg_cntr, ch_cntr)
gp_channels.append(ch)
dep_list.append(entry)
offset += byte_size
gp_sdata.append(None)
gp_sdata_size.append(0)
self.channels_db.add(name, entry)
# update the parents as well
parents[ch_cntr] = field_name, 0
ch_cntr += 1
gp_dep.append(None)
elif sig_type == v4c.SIGNAL_TYPE_ARRAY:
# here we have channel arrays or mdf v3 channel dependencies
array_samples = samples
names = samples.dtype.names
samples = array_samples[names[0]]
shape = samples.shape[1:]
if len(names) > 1:
# add channel dependency block for composed parent channel
dims_nr = len(shape)
names_nr = len(names)
if names_nr == 0:
kwargs = {
"dims": dims_nr,
"ca_type": v4c.CA_TYPE_LOOKUP,
"flags": v4c.FLAG_CA_FIXED_AXIS,
"byte_offset_base": samples.dtype.itemsize,
}
for i in range(dims_nr):
kwargs[f"dim_size_{i}"] = shape[i]
elif len(names) == 1:
kwargs = {
"dims": dims_nr,
"ca_type": v4c.CA_TYPE_ARRAY,
"flags": 0,
"byte_offset_base": samples.dtype.itemsize,
}
for i in range(dims_nr):
kwargs[f"dim_size_{i}"] = shape[i]
else:
kwargs = {
"dims": dims_nr,
"ca_type": v4c.CA_TYPE_LOOKUP,
"flags": v4c.FLAG_CA_AXIS,
"byte_offset_base": samples.dtype.itemsize,
}
for i in range(dims_nr):
kwargs[f"dim_size_{i}"] = shape[i]
parent_dep = ChannelArrayBlock(**kwargs)
gp_dep.append([parent_dep])
else:
# add channel dependency block for composed parent channel
kwargs = {
"dims": 1,
"ca_type": v4c.CA_TYPE_SCALE_AXIS,
"flags": 0,
"byte_offset_base": samples.dtype.itemsize,
"dim_size_0": shape[0],
}
parent_dep = ChannelArrayBlock(**kwargs)
gp_dep.append([parent_dep])
field_name = field_names.get_unique_name(name)
fields.append(samples)
dtype_pair = field_name, samples.dtype, shape
types.append(dtype_pair)
# first we add the structure channel
s_type, s_size = fmt_to_datatype_v4(samples.dtype, samples.shape, True)
# add channel block
kwargs = {
"channel_type": v4c.CHANNEL_TYPE_VALUE,
"bit_count": s_size,
"byte_offset": offset,
"bit_offset": 0,
"data_type": s_type,
"flags": 0,
}
if invalidation_bytes_nr:
if signal.invalidation_bits is not None:
inval_bits.append(signal.invalidation_bits)
kwargs["flags"] |= v4c.FLAG_CN_INVALIDATION_PRESENT
kwargs["pos_invalidation_bit"] = inval_cntr
inval_cntr += 1
ch = Channel(**kwargs)
ch.name = name
ch.unit = signal.unit
ch.comment = signal.comment
ch.display_name = signal.display_name
ch.dtype_fmt = samples.dtype
# source for channel
source = signal.source
if source:
if source in si_map:
ch.source = si_map[source]
else:
new_source = SourceInformation(
source_type=source.source_type, bus_type=source.bus_type
)
new_source.name = source.name
new_source.path = source.path
new_source.comment = source.comment
si_map[source] = new_source
ch.source = new_source
gp_channels.append(ch)
size = s_size // 8
for dim in shape:
size *= dim
offset += size
gp_sdata.append(None)
entry = (dg_cntr, ch_cntr)
self.channels_db.add(name, entry)
if ch.display_name:
self.channels_db.add(ch.display_name, entry)
# update the parents as well
parents[ch_cntr] = name, 0
ch_cntr += 1
for name in names[1:]:
field_name = field_names.get_unique_name(name)
samples = array_samples[name]
shape = samples.shape[1:]
fields.append(samples)
types.append((field_name, samples.dtype, shape))
# add channel dependency block
kwargs = {
"dims": 1,
"ca_type": v4c.CA_TYPE_SCALE_AXIS,
"flags": 0,
"byte_offset_base": samples.dtype.itemsize,
"dim_size_0": shape[0],
}
dep = ChannelArrayBlock(**kwargs)
gp_dep.append([dep])
# add components channel
s_type, s_size = fmt_to_datatype_v4(samples.dtype, ())
byte_size = s_size // 8 or 1
kwargs = {
"channel_type": v4c.CHANNEL_TYPE_VALUE,
"bit_count": s_size,
"byte_offset": offset,
"bit_offset": 0,
"data_type": s_type,
"flags": 0,
}
if invalidation_bytes_nr:
if signal.invalidation_bits is not None:
inval_bits.append(signal.invalidation_bits)
kwargs["flags"] |= v4c.FLAG_CN_INVALIDATION_PRESENT
kwargs["pos_invalidation_bit"] = inval_cntr
inval_cntr += 1
ch = Channel(**kwargs)
ch.name = name
ch.unit = signal.unit
ch.comment = signal.comment
ch.display_name = signal.display_name
ch.dtype_fmt = samples.dtype
gp_channels.append(ch)
entry = dg_cntr, ch_cntr
parent_dep.axis_channels.append(entry)
for dim in shape:
byte_size *= dim
offset += byte_size
gp_sdata.append(None)
gp_sdata_size.append(0)
self.channels_db.add(name, entry)
# update the parents as well
parents[ch_cntr] = field_name, 0
ch_cntr += 1
elif sig_type == v4c.SIGNAL_TYPE_STRUCTURE_COMPOSITION:
struct = Signal(
samples,
samples,
name=name,
invalidation_bits=signal.invalidation_bits,
)
(
offset,
dg_cntr,
ch_cntr,
sub_structure,
new_fields,
new_types,
inval_cntr,
) = self._append_structure_composition(
grp,
struct,
field_names,
offset,
dg_cntr,
ch_cntr,
parents,
defined_texts,
invalidation_bytes_nr,
inval_bits,
inval_cntr,
)
dep_list.append(sub_structure)
fields.extend(new_fields)
types.extend(new_types)
return offset, dg_cntr, ch_cntr, struct_self, fields, types, inval_cntr
def _append_structure_composition_column_oriented(
self,
grp,
signal,
field_names,
offset,
dg_cntr,
ch_cntr,
parents,
defined_texts,
):
si_map = self._si_map
fields = []
types = []
file = self._tempfile
seek = file.seek
seek(0, 2)
gp = grp
gp_sdata = gp.signal_data
gp_sdata_size = gp.signal_data_size
gp_channels = gp.channels
gp_dep = gp.channel_dependencies
name = signal.name
names = signal.samples.dtype.names
field_name = field_names.get_unique_name(name)
# first we add the structure channel
if signal.attachment and signal.attachment[0]:
at_data, at_name, hash_sum = signal.attachment
if at_name is not None:
suffix = Path(at_name).suffix.strip(".")
else:
suffix = "dbc"
attachment_index = self.attach(
at_data, at_name, hash_sum=hash_sum, mime=f"application/x-{suffix}"
)
attachment = attachment_index
else:
attachment = None
# add channel block
kwargs = {
"channel_type": v4c.CHANNEL_TYPE_VALUE,
"bit_count": signal.samples.dtype.itemsize * 8,
"byte_offset": offset,
"bit_offset": 0,
"data_type": v4c.DATA_TYPE_BYTEARRAY,
"precision": 0,
}
if attachment is not None:
kwargs["attachment_addr"] = 0
source_bus = signal.source and signal.source.source_type == v4c.SOURCE_BUS
if source_bus:
kwargs["flags"] = v4c.FLAG_CN_BUS_EVENT
flags_ = v4c.FLAG_CN_BUS_EVENT
grp.channel_group.flags |= v4c.FLAG_CG_BUS_EVENT
else:
kwargs["flags"] = 0
flags_ = 0
if signal.invalidation_bits is not None:
kwargs["flags"] |= v4c.FLAG_CN_INVALIDATION_PRESENT
kwargs["pos_invalidation_bit"] = 0
ch = Channel(**kwargs)
ch.name = name
ch.unit = signal.unit
ch.comment = signal.comment
ch.display_name = signal.display_name
ch.attachment = attachment
ch.dtype_fmt = signal.samples.dtype
if source_bus:
grp.channel_group.acq_source = SourceInformation.from_common_source(
signal.source
)
if signal.source.bus_type == v4c.BUS_TYPE_CAN:
grp.channel_group.path_separator = 46
grp.channel_group.acq_name = "CAN"
elif signal.source.bus_type == v4c.BUS_TYPE_FLEXRAY:
grp.channel_group.path_separator = 46
grp.channel_group.acq_name = "FLEXRAY"
elif signal.source.bus_type == v4c.BUS_TYPE_ETHERNET:
grp.channel_group.path_separator = 46
grp.channel_group.acq_name = "ETHERNET"
# source for channel
source = signal.source
if source:
if source in si_map:
ch.source = si_map[source]
else:
new_source = SourceInformation(
source_type=source.source_type, bus_type=source.bus_type
)
new_source.name = source.name
new_source.path = source.path
new_source.comment = source.comment
si_map[source] = new_source
ch.source = new_source
entry = dg_cntr, ch_cntr
gp_channels.append(ch)
struct_self = entry
gp_sdata.append(None)
gp_sdata_size.append(0)
self.channels_db.add(name, entry)
if ch.display_name:
self.channels_db.add(ch.display_name, entry)
# update the parents as well
parents[ch_cntr] = name, 0
ch_cntr += 1
dep_list = []
gp_dep.append(dep_list)
# then we add the fields
for name in names:
field_name = field_names.get_unique_name(name)
samples = signal.samples[name]
fld_names = samples.dtype.names
if fld_names is None:
sig_type = v4c.SIGNAL_TYPE_SCALAR
if samples.dtype.kind in "SV":
sig_type = v4c.SIGNAL_TYPE_STRING
else:
if fld_names in (v4c.CANOPEN_TIME_FIELDS, v4c.CANOPEN_DATE_FIELDS):
sig_type = v4c.SIGNAL_TYPE_CANOPEN
elif fld_names[0] != name:
sig_type = v4c.SIGNAL_TYPE_STRUCTURE_COMPOSITION
else:
sig_type = v4c.SIGNAL_TYPE_ARRAY
if sig_type in (v4c.SIGNAL_TYPE_SCALAR, v4c.SIGNAL_TYPE_STRING):
s_type, s_size = fmt_to_datatype_v4(samples.dtype, samples.shape)
byte_size = s_size // 8 or 1
fields.append(samples)
types.append((field_name, samples.dtype, samples.shape[1:]))
# add channel block
kwargs = {
"channel_type": v4c.CHANNEL_TYPE_VALUE,
"bit_count": s_size,
"byte_offset": offset,
"bit_offset": 0,
"data_type": s_type,
"flags": flags_,
}
if signal.invalidation_bits is not None:
kwargs["flags"] |= v4c.FLAG_CN_INVALIDATION_PRESENT
kwargs["pos_invalidation_bit"] = 0
ch = Channel(**kwargs)
ch.name = name
ch.dtype_fmt = dtype((samples.dtype, samples.shape[1:]))
entry = (dg_cntr, ch_cntr)
gp_channels.append(ch)
dep_list.append(entry)
offset += byte_size
gp_sdata.append(None)
gp_sdata_size.append(0)
self.channels_db.add(name, entry)
# update the parents as well
parents[ch_cntr] = field_name, 0
ch_cntr += 1
gp_dep.append(None)
elif sig_type == v4c.SIGNAL_TYPE_ARRAY:
# here we have channel arrays or mdf v3 channel dependencies
array_samples = samples
names = samples.dtype.names
samples = array_samples[names[0]]
shape = samples.shape[1:]
if len(names) > 1:
# add channel dependency block for composed parent channel
dims_nr = len(shape)
names_nr = len(names)
if names_nr == 0:
kwargs = {
"dims": dims_nr,
"ca_type": v4c.CA_TYPE_LOOKUP,
"flags": v4c.FLAG_CA_FIXED_AXIS,
"byte_offset_base": samples.dtype.itemsize,
}
for i in range(dims_nr):
kwargs[f"dim_size_{i}"] = shape[i]
elif len(names) == 1:
kwargs = {
"dims": dims_nr,
"ca_type": v4c.CA_TYPE_ARRAY,
"flags": 0,
"byte_offset_base": samples.dtype.itemsize,
}
for i in range(dims_nr):
kwargs[f"dim_size_{i}"] = shape[i]
else:
kwargs = {
"dims": dims_nr,
"ca_type": v4c.CA_TYPE_LOOKUP,
"flags": v4c.FLAG_CA_AXIS,
"byte_offset_base": samples.dtype.itemsize,
}
for i in range(dims_nr):
kwargs[f"dim_size_{i}"] = shape[i]
parent_dep = ChannelArrayBlock(**kwargs)
gp_dep.append([parent_dep])
else:
# add channel dependency block for composed parent channel
kwargs = {
"dims": 1,
"ca_type": v4c.CA_TYPE_SCALE_AXIS,
"flags": 0,
"byte_offset_base": samples.dtype.itemsize,
"dim_size_0": shape[0],
}
parent_dep = ChannelArrayBlock(**kwargs)
gp_dep.append([parent_dep])
field_name = field_names.get_unique_name(name)
fields.append(samples)
dtype_pair = field_name, samples.dtype, shape
types.append(dtype_pair)
# first we add the structure channel
s_type, s_size = fmt_to_datatype_v4(samples.dtype, samples.shape, True)
# add channel block
kwargs = {
"channel_type": v4c.CHANNEL_TYPE_VALUE,
"bit_count": s_size,
"byte_offset": offset,
"bit_offset": 0,
"data_type": s_type,
"flags": 0,
}
if signal.invalidation_bits is not None:
kwargs["flags"] |= v4c.FLAG_CN_INVALIDATION_PRESENT
kwargs["pos_invalidation_bit"] = 0
ch = Channel(**kwargs)
ch.name = name
ch.unit = signal.unit
ch.comment = signal.comment
ch.display_name = signal.display_name
ch.dtype_fmt = samples.dtype
# source for channel
source = signal.source
if source:
if source in si_map:
ch.source = si_map[source]
else:
new_source = SourceInformation(
source_type=source.source_type, bus_type=source.bus_type
)
new_source.name = source.name
new_source.path = source.path
new_source.comment = source.comment
si_map[source] = new_source
ch.source = new_source
gp_channels.append(ch)
size = s_size // 8
for dim in shape:
size *= dim
offset += size
gp_sdata.append(None)
entry = (dg_cntr, ch_cntr)
self.channels_db.add(name, entry)
if ch.display_name:
self.channels_db.add(ch.display_name, entry)
# update the parents as well
parents[ch_cntr] = name, 0
ch_cntr += 1
for name in names[1:]:
field_name = field_names.get_unique_name(name)
samples = array_samples[name]
shape = samples.shape[1:]
fields.append(samples)
types.append((field_name, samples.dtype, shape))
# add channel dependency block
kwargs = {
"dims": 1,
"ca_type": v4c.CA_TYPE_SCALE_AXIS,
"flags": 0,
"byte_offset_base": samples.dtype.itemsize,
"dim_size_0": shape[0],
}
dep = ChannelArrayBlock(**kwargs)
gp_dep.append([dep])
# add components channel
s_type, s_size = fmt_to_datatype_v4(samples.dtype, ())
byte_size = s_size // 8 or 1
kwargs = {
"channel_type": v4c.CHANNEL_TYPE_VALUE,
"bit_count": s_size,
"byte_offset": offset,
"bit_offset": 0,
"data_type": s_type,
"flags": 0,
}
if signal.invalidation_bits is not None:
kwargs["flags"] |= v4c.FLAG_CN_INVALIDATION_PRESENT
kwargs["pos_invalidation_bit"] = 0
ch = Channel(**kwargs)
ch.name = name
ch.unit = signal.unit
ch.comment = signal.comment
ch.display_name = signal.display_name
ch.dtype_fmt = samples.dtype
gp_channels.append(ch)
entry = dg_cntr, ch_cntr
parent_dep.axis_channels.append(entry)
for dim in shape:
byte_size *= dim
offset += byte_size
gp_sdata.append(None)
gp_sdata_size.append(0)
self.channels_db.add(name, entry)
# update the parents as well
parents[ch_cntr] = field_name, 0
ch_cntr += 1
elif sig_type == v4c.SIGNAL_TYPE_STRUCTURE_COMPOSITION:
struct = Signal(
samples,
samples,
name=name,
invalidation_bits=signal.invalidation_bits,
)
(
offset,
dg_cntr,
ch_cntr,
sub_structure,
new_fields,
new_types,
) = self._append_structure_composition_column_oriented(
grp,
struct,
field_names,
offset,
dg_cntr,
ch_cntr,
parents,
defined_texts,
)
dep_list.append(sub_structure)
fields.extend(new_fields)
types.extend(new_types)
return offset, dg_cntr, ch_cntr, struct_self, fields, types
def extend(self, index, signals):
"""
Extend a group with new samples. *signals* contains (values, invalidation_bits)
pairs for each extended signal. The first pair is the master channel's pair, and the
next pairs must respect the same order in which the signals were appended. The samples must have raw
or physical values according to the *Signals* used for the initial append.
Parameters
----------
index : int
group index
signals : list
list on (numpy.ndarray, numpy.ndarray) objects
Examples
--------
>>> # case 1 conversion type None
>>> s1 = np.array([1, 2, 3, 4, 5])
>>> s2 = np.array([-1, -2, -3, -4, -5])
>>> s3 = np.array([0.1, 0.04, 0.09, 0.16, 0.25])
>>> t = np.array([0.001, 0.002, 0.003, 0.004, 0.005])
>>> names = ['Positive', 'Negative', 'Float']
>>> units = ['+', '-', '.f']
>>> s1 = Signal(samples=s1, timestamps=t, unit='+', name='Positive')
>>> s2 = Signal(samples=s2, timestamps=t, unit='-', name='Negative')
>>> s3 = Signal(samples=s3, timestamps=t, unit='flts', name='Floats')
>>> mdf = MDF4('new.mdf')
>>> mdf.append([s1, s2, s3], comment='created by asammdf v1.1.0')
>>> t = np.array([0.006, 0.007, 0.008, 0.009, 0.010])
>>> # extend without invalidation bits
>>> mdf2.extend(0, [(t, None), (s1, None), (s2, None), (s3, None)])
>>> # some invaldiation btis
>>> s1_inv = np.array([0,0,0,1,1], dtype=np.bool)
>>> mdf2.extend(0, [(t, None), (s1.samples, None), (s2.samples, None), (s3.samples, None)])
"""
if self.version >= "4.20" and (self._column_storage or 1):
return self._extend_column_oriented(index, signals)
gp = self.groups[index]
if not signals:
message = '"append" requires a non-empty list of Signal objects'
raise MdfException(message)
stream = self._tempfile
fields = []
types = []
inval_bits = []
added_cycles = len(signals[0][0])
invalidation_bytes_nr = gp.channel_group.invalidation_bytes_nr
for i, ((signal, invalidation_bits), sig_type) in enumerate(
zip(signals, gp.signal_types)
):
# first add the signals in the simple signal list
if sig_type == v4c.SIGNAL_TYPE_SCALAR:
fields.append(signal)
types.append(("", signal.dtype, signal.shape[1:]))
if invalidation_bytes_nr and invalidation_bits is not None:
inval_bits.append(invalidation_bits)
elif sig_type == v4c.SIGNAL_TYPE_CANOPEN:
names = signal.dtype.names
if names == v4c.CANOPEN_TIME_FIELDS:
vals = signal.tobytes()
fields.append(frombuffer(vals, dtype="V6"))
types.append(("", "V6"))
else:
vals = []
for field in ("ms", "min", "hour", "day", "month", "year"):
vals.append(signal[field])
vals = fromarrays(vals).tobytes()
fields.append(frombuffer(vals, dtype="V7"))
types.append(("", "V7"))
if invalidation_bytes_nr and invalidation_bits is not None:
inval_bits.append(invalidation_bits)
elif sig_type == v4c.SIGNAL_TYPE_STRUCTURE_COMPOSITION:
if invalidation_bytes_nr and invalidation_bits is not None:
inval_bits.append(invalidation_bits)
fields.append(signal)
types.append(("", signal.dtype))
elif sig_type == v4c.SIGNAL_TYPE_ARRAY:
names = signal.dtype.names
samples = signal[names[0]]
shape = samples.shape[1:]
fields.append(samples)
types.append(("", samples.dtype, shape))
if invalidation_bytes_nr and invalidation_bits is not None:
inval_bits.append(invalidation_bits)
for name in names[1:]:
samples = signal[name]
shape = samples.shape[1:]
fields.append(samples)
types.append(("", samples.dtype, shape))
if invalidation_bytes_nr and invalidation_bits is not None:
inval_bits.append(invalidation_bits)
else:
if self.compact_vlsd:
cur_offset = sum(blk.size for blk in gp.signal_data[i])
data = []
offsets = []
off = 0
if gp.channels[i].data_type == v4c.DATA_TYPE_STRING_UTF_16_LE:
for elem in signal:
offsets.append(off)
size = len(elem)
if size % 2:
size += 1
elem = elem + b"\0"
data.append(UINT32_p(size))
data.append(elem)
off += size + 4
else:
for elem in signal:
offsets.append(off)
size = len(elem)
data.append(UINT32_p(size))
data.append(elem)
off += size + 4
offsets = array(offsets, dtype=uint64)
stream.seek(0, 2)
addr = stream.tell()
data_size = off
if data_size:
info = SignalDataBlockInfo(
address=addr,
size=data_size,
count=len(signal),
offsets=offsets,
)
gp.signal_data[i].append(info)
stream.write(b"".join(data))
offsets += cur_offset
fields.append(offsets)
types.append(("", uint64))
else:
cur_offset = sum(blk.size for blk in gp.signal_data[i])
offsets = arange(len(signal), dtype=uint64) * (signal.itemsize + 4)
values = [full(len(signal), signal.itemsize, dtype=uint32), signal]
types_ = [("", uint32), ("", signal.dtype)]
values = fromarrays(values, dtype=types_)
stream.seek(0, 2)
addr = stream.tell()
block_size = len(values) * values.itemsize
if block_size:
info = SignalDataBlockInfo(
address=addr,
size=block_size,
count=len(values),
offsets=offsets,
)
gp.signal_data[i].append(info)
values.tofile(stream)
offsets += cur_offset
fields.append(offsets)
types.append(("", uint64))
if invalidation_bytes_nr and invalidation_bits is not None:
inval_bits.append(invalidation_bits)
if invalidation_bytes_nr:
invalidation_bytes_nr = len(inval_bits)
cycles_nr = len(inval_bits[0])
for _ in range(8 - invalidation_bytes_nr % 8):
inval_bits.append(zeros(cycles_nr, dtype=bool))
inval_bits.reverse()
invalidation_bytes_nr = len(inval_bits) // 8
gp.channel_group.invalidation_bytes_nr = invalidation_bytes_nr
inval_bits = fliplr(
packbits(array(inval_bits).T).reshape(
(cycles_nr, invalidation_bytes_nr)
)
)
if self.version < "4.20":
fields.append(inval_bits)
types.append(
("invalidation_bytes", inval_bits.dtype, inval_bits.shape[1:])
)
samples = fromarrays(fields, dtype=types)
del fields
del types
stream.seek(0, 2)
addr = stream.tell()
size = len(samples) * samples.itemsize
if size:
if self.version < "4.20":
data = samples.tobytes()
raw_size = len(data)
data = lz_compress(data)
size = len(data)
stream.write(data)
gp.data_blocks.append(
DataBlockInfo(
address=addr,
block_type=v4c.DZ_BLOCK_LZ,
raw_size=raw_size,
size=size,
param=0,
)
)
gp.channel_group.cycles_nr += added_cycles
self.virtual_groups[index].cycles_nr += added_cycles
else:
data = samples.tobytes()
raw_size = len(data)
data = lz_compress(data)
size = len(data)
stream.write(data)
gp.data_blocks.append(
DataBlockInfo(
address=addr,
block_type=v4c.DT_BLOCK_LZ,
raw_size=raw_size,
size=size,
param=0,
)
)
gp.channel_group.cycles_nr += added_cycles
self.virtual_groups[index].cycles_nr += added_cycles
if invalidation_bytes_nr:
addr = stream.tell()
data = inval_bits.tobytes()
raw_size = len(data)
data = lz_compress(data)
size = len(data)
stream.write(data)
gp.data_blocks[-1].invalidation_block(
InvalidationBlockInfo(
address=addr,
block_type=v4c.DT_BLOCK_LZ,
raw_size=raw_size,
size=size,
param=None,
)
)
def _extend_column_oriented(self, index, signals):
"""
Extend a group with new samples. *signals* contains (values, invalidation_bits)
pairs for each extended signal. The first pair is the master channel's pair, and the
next pairs must respect the same order in which the signals were appended. The samples must have raw
or physical values according to the *Signals* used for the initial append.
Parameters
----------
index : int
group index
signals : list
list on (numpy.ndarray, numpy.ndarray) objects
Examples
--------
>>> # case 1 conversion type None
>>> s1 = np.array([1, 2, 3, 4, 5])
>>> s2 = np.array([-1, -2, -3, -4, -5])
>>> s3 = np.array([0.1, 0.04, 0.09, 0.16, 0.25])
>>> t = np.array([0.001, 0.002, 0.003, 0.004, 0.005])
>>> names = ['Positive', 'Negative', 'Float']
>>> units = ['+', '-', '.f']
>>> s1 = Signal(samples=s1, timestamps=t, unit='+', name='Positive')
>>> s2 = Signal(samples=s2, timestamps=t, unit='-', name='Negative')
>>> s3 = Signal(samples=s3, timestamps=t, unit='flts', name='Floats')
>>> mdf = MDF4('new.mdf')
>>> mdf.append([s1, s2, s3], comment='created by asammdf v1.1.0')
>>> t = np.array([0.006, 0.007, 0.008, 0.009, 0.010])
>>> # extend without invalidation bits
>>> mdf2.extend(0, [(t, None), (s1, None), (s2, None), (s3, None)])
>>> # some invaldiation btis
>>> s1_inv = np.array([0,0,0,1,1], dtype=np.bool)
>>> mdf2.extend(0, [(t, None), (s1.samples, None), (s2.samples, None), (s3.samples, None)])
"""
gp = self.groups[index]
if not signals:
message = '"append" requires a non-empty list of Signal objects'
raise MdfException(message)
stream = self._tempfile
stream.seek(0, 2)
write = stream.write
tell = stream.tell
added_cycles = len(signals[0][0])
self.virtual_groups[index].cycles_nr += added_cycles
for i, (signal, invalidation_bits) in enumerate(signals):
gp = self.groups[index + i]
sig_type = gp.signal_types[0]
# first add the signals in the simple signal list
if sig_type == v4c.SIGNAL_TYPE_SCALAR:
samples = signal
elif sig_type == v4c.SIGNAL_TYPE_CANOPEN:
names = signal.dtype.names
if names == v4c.CANOPEN_TIME_FIELDS:
samples = signal
else:
vals = []
for field in ("ms", "min", "hour", "day", "month", "year"):
vals.append(signal[field])
samples = fromarrays(vals)
elif sig_type == v4c.SIGNAL_TYPE_STRUCTURE_COMPOSITION:
samples = signal
elif sig_type == v4c.SIGNAL_TYPE_ARRAY:
samples = signal
else:
cur_offset = sum(blk.size for blk in gp.signal_data[0])
offsets = arange(len(signal), dtype=uint64) * (signal.itemsize + 4)
values = [full(len(signal), signal.itemsize, dtype=uint32), signal]
types_ = [("", uint32), ("", signal.dtype)]
values = fromarrays(values, dtype=types_)
addr = tell()
block_size = len(values) * values.itemsize
if block_size:
info = SignalDataBlockInfo(
address=addr,
size=block_size,
count=len(values),
offsets=offsets,
)
gp.signal_data[i].append(info)
write(values.tobytes())
offsets += cur_offset
samples = offsets
addr = tell()
if added_cycles:
data = samples.tobytes()
raw_size = len(data)
data = lz_compress(data)
size = len(data)
write(data)
gp.data_blocks.append(
DataBlockInfo(
address=addr,
block_type=v4c.DZ_BLOCK_LZ,
raw_size=raw_size,
size=size,
param=0,
)
)
gp.channel_group.cycles_nr += added_cycles
if invalidation_bits is not None:
addr = tell()
data = invalidation_bits.tobytes()
raw_size = len(data)
data = lz_compress(data)
size = len(data)
write(data)
gp.data_blocks[-1].invalidation_block(
InvalidationBlockInfo(
address=addr,
block_type=v4c.DZ_BLOCK_LZ,
raw_size=raw_size,
size=size,
param=None,
)
)
def attach(
self,
data,
file_name=None,
hash_sum=None,
comment=None,
compression=True,
mime=r"application/octet-stream",
embedded=True,
encryption_function=None,
):
"""attach embedded attachment as application/octet-stream.
Parameters
----------
data : bytes
data to be attached
file_name : str
string file name
hash_sum : bytes
md5 of the data
comment : str
attachment comment
compression : bool
use compression for embedded attachment data
mime : str
mime type string
embedded : bool
attachment is embedded in the file
encryption_function : bool, default None
function used to encrypt the data. The function should only take a single bytes object as
argument and return the encrypted bytes object. This is only valid for embedded attachments
.. versionadded:: 6.2.0
Returns
-------
index : int
new attachment index
"""
if hash_sum is None:
worker = md5()
worker.update(data)
hash_sum = worker.hexdigest()
hash_sum_encrypted = hash_sum
if hash_sum in self._attachments_cache:
return self._attachments_cache[hash_sum]
else:
creator_index = len(self.file_history)
fh = FileHistory()
fh.comment = """<FHcomment>
<TX>Added new embedded attachment from {}</TX>
<tool_id>asammdf</tool_id>
<tool_vendor>asammdf</tool_vendor>
<tool_version>{}</tool_version>
</FHcomment>""".format(
file_name if file_name else "bin.bin", __version__
)
self.file_history.append(fh)
file_name = file_name or "bin.bin"
encrypted = False
encryption_function = encryption_function or self._encryption_function
if encryption_function is not None:
try:
data = encryption_function(data)
worker = md5()
worker.update(data)
hash_sum_encrypted = worker.hexdigest()
if hash_sum_encrypted in self._attachments_cache:
return self._attachments_cache[hash_sum_encrypted]
encrypted = True
except:
pass
at_block = AttachmentBlock(
data=data,
compression=compression,
embedded=embedded,
file_name=file_name,
)
at_block["creator_index"] = creator_index
if encrypted:
at_block.flags |= v4c.FLAG_AT_ENCRYPTED
self.attachments.append(at_block)
suffix = Path(file_name).suffix.lower().strip(".")
if suffix == "a2l":
mime = "application/A2L"
else:
mime = f"application/x-{suffix}"
at_block.mime = mime
at_block.comment = comment
index = len(self.attachments) - 1
self._attachments_cache[hash_sum] = index
self._attachments_cache[hash_sum_encrypted] = index
return index
def close(self):
"""if the MDF was created with memory=False and new
channels have been appended, then this must be called just before the
object is not used anymore to clean-up the temporary file"""
self._parent = None
if self._tempfile is not None:
self._tempfile.close()
if not self._from_filelike and self._file is not None:
self._file.close()
for gp in self.groups:
gp.clear()
self.groups.clear()
self.header = None
self.identification = None
self.file_history.clear()
self.channels_db.clear()
self.masters_db.clear()
self.attachments.clear()
self._attachments_cache.clear()
self.file_comment = None
self.events.clear()
self._ch_map.clear()
self._master_channel_metadata.clear()
self._invalidation_cache.clear()
self._external_dbc_cache.clear()
self._si_map.clear()
self._file_si_map.clear()
self._cc_map.clear()
self._file_cc_map.clear()
self._cg_map.clear()
self._cn_data_map.clear()
self._dbc_cache.clear()
self.virtual_groups.clear()
@lru_cache(maxsize=128)
def extract_attachment(self, index=None, decryption_function=None):
"""extract attachment data by index. If it is an embedded attachment,
then this method creates the new file according to the attachment file
name information
Parameters
----------
index : int
attachment index; default *None*
decryption_function : bool, default None
function used to decrypt the data. The function should only take a single bytes object as
argument and return the decrypted bytes object. This is only valid for embedded attachments
.. versionadded:: 6.2.0
Returns
-------
data : (bytes, pathlib.Path)
tuple of attachment data and path
"""
if index is None:
return b"", Path(""), md5().digest()
attachment = self.attachments[index]
current_path = Path.cwd()
file_path = Path(attachment.file_name or "embedded")
try:
os.chdir(self.name.resolve().parent)
flags = attachment.flags
# for embedded attachments extrat data and create new files
if flags & v4c.FLAG_AT_EMBEDDED:
data = attachment.extract()
md5_worker = md5()
md5_worker.update(data)
md5_sum = md5_worker.digest()
if attachment.flags & v4c.FLAG_AT_ENCRYPTED and (
decryption_function is not None
or self._decryption_function is not None
):
try:
decryption_function = (
decryption_function or self._decryption_function
)
data = decryption_function(data)
except:
pass
else:
# for external attachments read the file and return the content
if flags & v4c.FLAG_AT_MD5_VALID:
data = open(file_path, "rb").read()
file_path = Path(f"FROM_{file_path}")
md5_worker = md5()
md5_worker.update(data)
md5_sum = md5_worker.digest()
if attachment["md5_sum"] == md5_sum:
if attachment.mime.startswith("text"):
with open(file_path, "r") as f:
data = f.read()
else:
message = (
f'ATBLOCK md5sum="{attachment["md5_sum"]}" '
f"and external attachment data ({file_path}) "
f'md5sum="{md5_sum}"'
)
logger.warning(message)
else:
if attachment.mime.startswith("text"):
mode = "r"
else:
mode = "rb"
with open(file_path, mode) as f:
file_path = Path(f"FROM_{file_path}")
data = f.read()
md5_worker = md5()
md5_worker.update(data)
md5_sum = md5_worker.digest()
except Exception as err:
os.chdir(current_path)
message = f'Exception during attachment "{attachment.file_name}" extraction: {err!r}'
logger.warning(message)
data = b""
md5_sum = md5().digest()
finally:
os.chdir(current_path)
return data, file_path, md5_sum
def get(
self,
name=None,
group=None,
index=None,
raster=None,
samples_only=False,
data=None,
raw=False,
ignore_invalidation_bits=False,
record_offset=0,
record_count=None,
):
"""Gets channel samples. The raw data group samples are not loaded to
memory so it is advised to use ``filter`` or ``select`` instead of
performing several ``get`` calls.
Channel can be specified in two ways:
* using the first positional argument *name*
* if there are multiple occurrences for this channel then the
*group* and *index* arguments can be used to select a specific
group.
* if there are multiple occurrences for this channel and either the
*group* or *index* arguments is None then a warning is issued
* using the group number (keyword argument *group*) and the channel
number (keyword argument *index*). Use *info* method for group and
channel numbers
If the *raster* keyword argument is not *None* the output is
interpolated accordingly
Parameters
----------
name : string
name of channel
group : int
0-based group index
index : int
0-based channel index
raster : float
time raster in seconds
samples_only : bool
if *True* return only the channel samples as numpy array; if
*False* return a *Signal* object
data : bytes
prevent redundant data read by providing the raw data group samples
raw : bool
return channel samples without appling the conversion rule; default
`False`
ignore_invalidation_bits : bool
option to ignore invalidation bits
record_offset : int
if *data=None* use this to select the record offset from which the
group data should be loaded
record_count : int
number of records to read; default *None* and in this case all
available records are used
Returns
-------
res : (numpy.array, numpy.array) | Signal
returns *Signal* if *samples_only*=*False* (default option),
otherwise returns a (numpy.array, numpy.array) tuple of samples and
invalidation bits. If invalidation bits are not used or if
*ignore_invalidation_bits* if False, then the second item will be
None.
The *Signal* samples are:
* numpy recarray for channels that have composition/channel
array address or for channel of type
CANOPENDATE, CANOPENTIME
* numpy array for all the rest
Raises
------
MdfException :
* if the channel name is not found
* if the group index is out of range
* if the channel index is out of range
Examples
--------
>>> from asammdf import MDF, Signal
>>> import numpy as np
>>> t = np.arange(5)
>>> s = np.ones(5)
>>> mdf = MDF(version='4.10')
>>> for i in range(4):
... sigs = [Signal(s*(i*10+j), t, name='Sig') for j in range(1, 4)]
... mdf.append(sigs)
...
>>> # first group and channel index of the specified channel name
...
>>> mdf.get('Sig')
UserWarning: Multiple occurrences for channel "Sig". Using first occurrence from data group 4. Provide both "group" and "index" arguments to select another data group
<Signal Sig:
samples=[ 1. 1. 1. 1. 1.]
timestamps=[0 1 2 3 4]
unit=""
info=None
comment="">
>>> # first channel index in the specified group
...
>>> mdf.get('Sig', 1)
<Signal Sig:
samples=[ 11. 11. 11. 11. 11.]
timestamps=[0 1 2 3 4]
unit=""
info=None
comment="">
>>> # channel named Sig from group 1 channel index 2
...
>>> mdf.get('Sig', 1, 2)
<Signal Sig:
samples=[ 12. 12. 12. 12. 12.]
timestamps=[0 1 2 3 4]
unit=""
info=None
comment="">
>>> # channel index 1 or group 2
...
>>> mdf.get(None, 2, 1)
<Signal Sig:
samples=[ 21. 21. 21. 21. 21.]
timestamps=[0 1 2 3 4]
unit=""
info=None
comment="">
>>> mdf.get(group=2, index=1)
<Signal Sig:
samples=[ 21. 21. 21. 21. 21.]
timestamps=[0 1 2 3 4]
unit=""
info=None
comment="">
"""
gp_nr, ch_nr = self._validate_channel_selection(name, group, index)
grp = self.groups[gp_nr]
# get the channel object
channel = grp.channels[ch_nr]
dependency_list = grp.channel_dependencies[ch_nr]
master_is_required = not samples_only or raster
if dependency_list:
if not isinstance(dependency_list[0], ChannelArrayBlock):
vals, timestamps, invalidation_bits, encoding = self._get_structure(
channel=channel,
group=grp,
group_index=gp_nr,
channel_index=ch_nr,
dependency_list=dependency_list,
raster=raster,
data=data,
ignore_invalidation_bits=ignore_invalidation_bits,
record_offset=record_offset,
record_count=record_count,
master_is_required=master_is_required,
)
else:
vals, timestamps, invalidation_bits, encoding = self._get_array(
channel=channel,
group=grp,
group_index=gp_nr,
channel_index=ch_nr,
dependency_list=dependency_list,
raster=raster,
data=data,
ignore_invalidation_bits=ignore_invalidation_bits,
record_offset=record_offset,
record_count=record_count,
master_is_required=master_is_required,
)
else:
vals, timestamps, invalidation_bits, encoding = self._get_scalar(
channel=channel,
group=grp,
group_index=gp_nr,
channel_index=ch_nr,
dependency_list=dependency_list,
raster=raster,
data=data,
ignore_invalidation_bits=ignore_invalidation_bits,
record_offset=record_offset,
record_count=record_count,
master_is_required=master_is_required,
)
conversion = channel.conversion
if not raw and conversion:
vals = conversion.convert(vals)
conversion = None
if vals.dtype.kind == "S":
encoding = "utf-8"
if not vals.flags.owndata and self.copy_on_get:
vals = vals.copy()
if samples_only:
res = vals, invalidation_bits
else:
# search for unit in conversion texts
channel_type = channel.channel_type
if name is None:
name = channel.name
unit = conversion and conversion.unit or channel.unit
comment = channel.comment
source = channel.source
if source:
source = Source.from_source(source)
else:
cg_source = grp.channel_group.acq_source
if cg_source:
source = Source.from_source(cg_source)
else:
source = None
if channel.attachment is not None:
attachment = self.extract_attachment(
channel.attachment,
decryption_function=self._decryption_function,
)
else:
attachment = None
master_metadata = self._master_channel_metadata.get(gp_nr, None)
stream_sync = channel_type == v4c.CHANNEL_TYPE_SYNC
try:
res = Signal(
samples=vals,
timestamps=timestamps,
unit=unit,
name=name,
comment=comment,
conversion=conversion,
raw=raw,
master_metadata=master_metadata,
attachment=attachment,
source=source,
display_name=channel.display_name,
bit_count=channel.bit_count,
stream_sync=stream_sync,
invalidation_bits=invalidation_bits,
encoding=encoding,
group_index=gp_nr,
channel_index=ch_nr,
)
except:
debug_channel(self, grp, channel, dependency_list)
raise
return res
def _get_structure(
self,
channel,
group,
group_index,
channel_index,
dependency_list,
raster,
data,
ignore_invalidation_bits,
record_offset,
record_count,
master_is_required,
):
grp = group
gp_nr = group_index
# get data group record
parents, dtypes = group.parents, group.types
if parents is None:
parents, dtypes = self._prepare_record(grp)
# get group data
if data is None:
data = self._load_data(
grp, record_offset=record_offset, record_count=record_count
)
else:
data = (data,)
channel_invalidation_present = channel.flags & (
v4c.FLAG_CN_ALL_INVALID | v4c.FLAG_CN_INVALIDATION_PRESENT
)
_dtype = dtype(channel.dtype_fmt)
if _dtype.itemsize == channel.bit_count // 8:
fast_path = True
channel_values = []
timestamps = []
invalidation_bits = []
byte_offset = channel.byte_offset
record_size = (
grp.channel_group.samples_byte_nr
+ grp.channel_group.invalidation_bytes_nr
)
count = 0
for fragment in data:
bts = fragment[0]
types = [
("", f"V{byte_offset}"),
("vals", _dtype),
("", f"V{record_size - _dtype.itemsize - byte_offset}"),
]
channel_values.append(fromstring(bts, types)["vals"].copy())
if master_is_required:
timestamps.append(self.get_master(gp_nr, fragment, one_piece=True))
if channel_invalidation_present:
invalidation_bits.append(
self.get_invalidation_bits(gp_nr, channel, fragment)
)
count += 1
else:
unique_names = UniqueDB()
fast_path = False
names = [
unique_names.get_unique_name(grp.channels[ch_nr].name)
for _, ch_nr in dependency_list
]
channel_values = [[] for _ in dependency_list]
timestamps = []
invalidation_bits = []
count = 0
for fragment in data:
for i, (dg_nr, ch_nr) in enumerate(dependency_list):
vals = self.get(
group=dg_nr,
index=ch_nr,
samples_only=True,
data=fragment,
ignore_invalidation_bits=ignore_invalidation_bits,
record_offset=record_offset,
record_count=record_count,
)[0]
channel_values[i].append(vals)
if master_is_required:
timestamps.append(self.get_master(gp_nr, fragment, one_piece=True))
if channel_invalidation_present:
invalidation_bits.append(
self.get_invalidation_bits(gp_nr, channel, fragment)
)
count += 1
if fast_path:
total_size = sum(len(_) for _ in channel_values)
shape = (total_size,) + channel_values[0].shape[1:]
if count > 1:
out = empty(shape, dtype=channel_values[0].dtype)
vals = concatenate(channel_values, out=out)
else:
vals = channel_values[0]
else:
total_size = sum(len(_) for _ in channel_values[0])
if count > 1:
arrays = [
concatenate(
lst,
out=empty((total_size,) + lst[0].shape[1:], dtype=lst[0].dtype),
)
for lst in channel_values
]
else:
arrays = [lst[0] for lst in channel_values]
types = [
(name_, arr.dtype, arr.shape[1:]) for name_, arr in zip(names, arrays)
]
types = dtype(types)
vals = fromarrays(arrays, dtype=types)
if master_is_required:
if count > 1:
out = empty(total_size, dtype=timestamps[0].dtype)
timestamps = concatenate(timestamps, out=out)
else:
timestamps = timestamps[0]
else:
timestamps = None
if channel_invalidation_present:
if count > 1:
out = empty(total_size, dtype=invalidation_bits[0].dtype)
invalidation_bits = concatenate(invalidation_bits, out=out)
else:
invalidation_bits = invalidation_bits[0]
if not ignore_invalidation_bits:
vals = vals[nonzero(~invalidation_bits)[0]]
if master_is_required:
timestamps = timestamps[nonzero(~invalidation_bits)[0]]
invalidation_bits = None
else:
invalidation_bits = None
if raster and len(timestamps) > 1:
t = arange(timestamps[0], timestamps[-1], raster)
vals = Signal(
vals, timestamps, name="_", invalidation_bits=invalidation_bits
).interp(
t,
integer_interpolation_mode=self._integer_interpolation,
float_interpolation_mode=self._float_interpolation,
)
vals, timestamps, invalidation_bits = (
vals.samples,
vals.timestamps,
vals.invalidation_bits,
)
return vals, timestamps, invalidation_bits, None
def _get_array(
self,
channel,
group,
group_index,
channel_index,
dependency_list,
raster,
data,
ignore_invalidation_bits,
record_offset,
record_count,
master_is_required,
):
grp = group
gp_nr = group_index
ch_nr = channel_index
# get data group record
parents, dtypes = group.parents, group.types
if parents is None:
parents, dtypes = self._prepare_record(grp)
# get group data
if data is None:
data = self._load_data(
grp, record_offset=record_offset, record_count=record_count
)
else:
data = (data,)
channel_invalidation_present = channel.flags & (
v4c.FLAG_CN_ALL_INVALID | v4c.FLAG_CN_INVALIDATION_PRESENT
)
channel_group = grp.channel_group
samples_size = (
channel_group.samples_byte_nr + channel_group.invalidation_bytes_nr
)
channel_values = []
timestamps = []
invalidation_bits = []
count = 0
for fragment in data:
data_bytes, offset, _count, invalidation_bytes = fragment
cycles = len(data_bytes) // samples_size
arrays = []
types = []
try:
parent, bit_offset = parents[ch_nr]
except KeyError:
parent, bit_offset = None, None
if parent is not None:
if grp.record is None:
dtypes = grp.types
if dtypes.itemsize:
record = fromstring(data_bytes, dtype=dtypes)
else:
record = None
else:
record = grp.record
vals = record[parent].copy()
else:
vals = self._get_not_byte_aligned_data(data_bytes, grp, ch_nr)
dep = dependency_list[0]
if dep.flags & v4c.FLAG_CA_INVERSE_LAYOUT:
shape = vals.shape
shape = (shape[0],) + shape[1:][::-1]
vals = vals.reshape(shape)
axes = (0,) + tuple(range(len(shape) - 1, 0, -1))
vals = transpose(vals, axes=axes)
cycles_nr = len(vals)
for ca_block in dependency_list[:1]:
dims_nr = ca_block.dims
if ca_block.ca_type == v4c.CA_TYPE_SCALE_AXIS:
shape = (ca_block.dim_size_0,)
arrays.append(vals)
dtype_pair = channel.name, vals.dtype, shape
types.append(dtype_pair)
elif ca_block.ca_type == v4c.CA_TYPE_LOOKUP:
shape = vals.shape[1:]
arrays.append(vals)
dtype_pair = channel.name, vals.dtype, shape
types.append(dtype_pair)
if ca_block.flags & v4c.FLAG_CA_FIXED_AXIS:
for i in range(dims_nr):
shape = (ca_block[f"dim_size_{i}"],)
axis = []
for j in range(shape[0]):
key = f"axis_{i}_value_{j}"
axis.append(ca_block[key])
axis = array([axis for _ in range(cycles_nr)])
arrays.append(axis)
dtype_pair = (f"axis_{i}", axis.dtype, shape)
types.append(dtype_pair)
else:
for i in range(dims_nr):
axis = ca_block.axis_channels[i]
shape = (ca_block[f"dim_size_{i}"],)
if axis is None:
axisname = f"axis_{i}"
if cycles:
axis_values = array([arange(shape[0])] * cycles)
else:
axis_values = array([], dtype=f"({shape[0]},)f8")
else:
try:
(ref_dg_nr, ref_ch_nr) = ca_block.axis_channels[i]
except:
debug_channel(self, grp, channel, dependency_list)
raise
axisname = (
self.groups[ref_dg_nr].channels[ref_ch_nr].name
)
if ref_dg_nr == gp_nr:
axis_values = self.get(
group=ref_dg_nr,
index=ref_ch_nr,
samples_only=True,
data=fragment,
ignore_invalidation_bits=ignore_invalidation_bits,
record_offset=record_offset,
record_count=cycles,
)[0]
else:
channel_group = grp.channel_group
record_size = channel_group.samples_byte_nr
record_size += channel_group.invalidation_bytes_nr
start = offset // record_size
end = start + len(data_bytes) // record_size + 1
ref = self.get(
group=ref_dg_nr,
index=ref_ch_nr,
samples_only=True,
ignore_invalidation_bits=ignore_invalidation_bits,
record_offset=record_offset,
record_count=cycles,
)[0]
axis_values = ref[start:end].copy()
axis_values = axis_values[axisname]
if len(axis_values) == 0 and cycles:
axis_values = array([arange(shape[0])] * cycles)
arrays.append(axis_values)
dtype_pair = (axisname, axis_values.dtype, shape)
types.append(dtype_pair)
elif ca_block.ca_type == v4c.CA_TYPE_ARRAY:
shape = vals.shape[1:]
arrays.append(vals)
dtype_pair = channel.name, vals.dtype, shape
types.append(dtype_pair)
for ca_block in dependency_list[1:]:
dims_nr = ca_block.dims
if ca_block.flags & v4c.FLAG_CA_FIXED_AXIS:
for i in range(dims_nr):
shape = (ca_block[f"dim_size_{i}"],)
axis = []
for j in range(shape[0]):
key = f"axis_{i}_value_{j}"
axis.append(ca_block[key])
axis = array([axis for _ in range(cycles_nr)])
arrays.append(axis)
types.append((f"axis_{i}", axis.dtype, shape))
else:
for i in range(dims_nr):
axis = ca_block.axis_channels[i]
shape = (ca_block[f"dim_size_{i}"],)
if axis is None:
axisname = f"axis_{i}"
if cycles:
axis_values = array([arange(shape[0])] * cycles)
else:
axis_values = array([], dtype=f"({shape[0]},)f8")
else:
try:
ref_dg_nr, ref_ch_nr = ca_block.axis_channels[i]
except:
debug_channel(self, grp, channel, dependency_list)
raise
axisname = self.groups[ref_dg_nr].channels[ref_ch_nr].name
if ref_dg_nr == gp_nr:
axis_values = self.get(
group=ref_dg_nr,
index=ref_ch_nr,
samples_only=True,
data=fragment,
ignore_invalidation_bits=ignore_invalidation_bits,
record_offset=record_offset,
record_count=cycles,
)[0]
else:
channel_group = grp.channel_group
record_size = channel_group.samples_byte_nr
record_size += channel_group.invalidation_bytes_nr
start = offset // record_size
end = start + len(data_bytes) // record_size + 1
ref = self.get(
group=ref_dg_nr,
index=ref_ch_nr,
samples_only=True,
ignore_invalidation_bits=ignore_invalidation_bits,
record_offset=record_offset,
record_count=cycles,
)[0]
axis_values = ref[start:end].copy()
axis_values = axis_values[axisname]
if len(axis_values) == 0 and cycles:
axis_values = array([arange(shape[0])] * cycles)
arrays.append(axis_values)
dtype_pair = (axisname, axis_values.dtype, shape)
types.append(dtype_pair)
vals = fromarrays(arrays, dtype(types))
if master_is_required:
timestamps.append(self.get_master(gp_nr, fragment, one_piece=True))
if channel_invalidation_present:
invalidation_bits.append(
self.get_invalidation_bits(gp_nr, channel, fragment)
)
channel_values.append(vals)
count += 1
if count > 1:
total_size = sum(len(_) for _ in channel_values)
shape = (total_size,) + channel_values[0].shape[1:]
if count > 1:
out = empty(shape, dtype=channel_values[0].dtype)
vals = concatenate(channel_values, out=out)
elif count == 1:
vals = channel_values[0]
else:
vals = []
if master_is_required:
if count > 1:
out = empty(total_size, dtype=timestamps[0].dtype)
timestamps = concatenate(timestamps, out=out)
else:
timestamps = timestamps[0]
else:
timestamps = None
if channel_invalidation_present:
if count > 1:
out = empty(total_size, dtype=invalidation_bits[0].dtype)
invalidation_bits = concatenate(invalidation_bits, out=out)
else:
invalidation_bits = invalidation_bits[0]
if not ignore_invalidation_bits:
vals = vals[nonzero(~invalidation_bits)[0]]
if master_is_required:
timestamps = timestamps[nonzero(~invalidation_bits)[0]]
invalidation_bits = None
else:
invalidation_bits = None
if raster and len(timestamps) > 1:
t = arange(timestamps[0], timestamps[-1], raster)
vals = Signal(
vals, timestamps, name="_", invalidation_bits=invalidation_bits
).interp(
t,
integer_interpolation_mode=self._integer_interpolation,
float_interpolation_mode=self._float_interpolation,
)
vals, timestamps, invalidation_bits = (
vals.samples,
vals.timestamps,
vals.invalidation_bits,
)
return vals, timestamps, invalidation_bits, None
def _get_scalar(
self,
channel,
group,
group_index,
channel_index,
dependency_list,
raster,
data,
ignore_invalidation_bits,
record_offset,
record_count,
master_is_required,
):
grp = group
gp_nr = group_index
ch_nr = channel_index
# get data group record
parents, dtypes = group.parents, group.types
if parents is None:
parents, dtypes = self._prepare_record(grp)
# get group data
if data is None:
data = self._load_data(
grp, record_offset=record_offset, record_count=record_count
)
one_piece = False
else:
data = (data,)
one_piece = True
channel_invalidation_present = channel.flags & (
v4c.FLAG_CN_ALL_INVALID | v4c.FLAG_CN_INVALIDATION_PRESENT
)
data_type = channel.data_type
channel_type = channel.channel_type
bit_count = channel.bit_count
encoding = None
if channel.dtype_fmt.subdtype:
channel_dtype = channel.dtype_fmt.subdtype[0]
else:
channel_dtype = channel.dtype_fmt
# get channel values
if channel_type in {
v4c.CHANNEL_TYPE_VIRTUAL,
v4c.CHANNEL_TYPE_VIRTUAL_MASTER,
}:
if not channel.dtype_fmt:
channel.dtype_fmt = get_fmt_v4(data_type, 64)
ch_dtype = dtype(channel.dtype_fmt)
channel_values = []
timestamps = []
invalidation_bits = []
channel_group = grp.channel_group
record_size = channel_group.samples_byte_nr
record_size += channel_group.invalidation_bytes_nr
count = 0
for fragment in data:
data_bytes, offset, _count, invalidation_bytes = fragment
offset = offset // record_size
vals = arange(len(data_bytes) // record_size, dtype=ch_dtype)
vals += offset
if master_is_required:
timestamps.append(
self.get_master(
gp_nr,
fragment,
record_offset=offset,
record_count=_count,
one_piece=True,
)
)
if channel_invalidation_present:
invalidation_bits.append(
self.get_invalidation_bits(gp_nr, channel, fragment)
)
channel_values.append(vals)
count += 1
if count > 1:
total_size = sum(len(_) for _ in channel_values)
shape = (total_size,) + channel_values[0].shape[1:]
if count > 1:
out = empty(shape, dtype=channel_values[0].dtype)
vals = concatenate(channel_values, out=out)
elif count == 1:
vals = channel_values[0]
else:
vals = []
if master_is_required:
if count > 1:
out = empty(total_size, dtype=timestamps[0].dtype)
timestamps = concatenate(timestamps, out=out)
else:
timestamps = timestamps[0]
if channel_invalidation_present:
if count > 1:
out = empty(total_size, dtype=invalidation_bits[0].dtype)
invalidation_bits = concatenate(invalidation_bits, out=out)
else:
invalidation_bits = invalidation_bits[0]
if not ignore_invalidation_bits:
vals = vals[nonzero(~invalidation_bits)[0]]
if master_is_required:
timestamps = timestamps[nonzero(~invalidation_bits)[0]]
invalidation_bits = None
else:
invalidation_bits = None
if raster and len(timestamps) > 1:
num = float(float32((timestamps[-1] - timestamps[0]) / raster))
if num.is_integer():
t = linspace(timestamps[0], timestamps[-1], int(num))
else:
t = arange(timestamps[0], timestamps[-1], raster)
vals = Signal(
vals, timestamps, name="_", invalidation_bits=invalidation_bits
).interp(
t,
integer_interpolation_mode=self._integer_interpolation,
float_interpolation_mode=self._float_interpolation,
)
vals, timestamps, invalidation_bits = (
vals.samples,
vals.timestamps,
vals.invalidation_bits,
)
else:
record_size = grp.channel_group.samples_byte_nr
if one_piece:
fragment = data[0]
data_bytes, record_start, record_count, invalidation_bytes = fragment
try:
parent, bit_offset = parents[ch_nr]
except KeyError:
parent, bit_offset = None, None
if parent is not None:
if (
len(grp.channels) == 1
and channel.dtype_fmt.itemsize == record_size
):
vals = frombuffer(data_bytes, dtype=channel.dtype_fmt)
else:
record = grp.record
if record is None:
record = fromstring(data_bytes, dtype=dtypes)
vals = record[parent]
dtype_ = vals.dtype
shape_ = vals.shape
size = dtype_.itemsize
for dim in shape_[1:]:
size *= dim
kind_ = dtype_.kind
vals_dtype = vals.dtype.kind
if kind_ == "b":
pass
elif len(shape_) > 1 and data_type not in (
v4c.DATA_TYPE_BYTEARRAY,
v4c.DATA_TYPE_MIME_SAMPLE,
v4c.DATA_TYPE_MIME_STREAM,
):
vals = self._get_not_byte_aligned_data(data_bytes, grp, ch_nr)
elif vals_dtype not in "ui" and (
bit_offset or not bit_count == size * 8
):
vals = self._get_not_byte_aligned_data(data_bytes, grp, ch_nr)
else:
dtype_ = vals.dtype
kind_ = dtype_.kind
if data_type in v4c.INT_TYPES:
if channel_dtype.byteorder == "|" and data_type in (
v4c.DATA_TYPE_SIGNED_MOTOROLA,
v4c.DATA_TYPE_UNSIGNED_MOTOROLA,
):
view = f">u{vals.itemsize}"
else:
view = f"{channel_dtype.byteorder}u{vals.itemsize}"
if dtype(view) != vals.dtype:
vals = vals.view(view)
if bit_offset:
vals = vals >> bit_offset
if bit_count != size * 8:
if data_type in v4c.SIGNED_INT:
vals = as_non_byte_sized_signed_int(vals, bit_count)
else:
mask = (1 << bit_count) - 1
vals = vals & mask
elif data_type in v4c.SIGNED_INT:
view = f"{channel_dtype.byteorder}i{vals.itemsize}"
if dtype(view) != vals.dtype:
vals = vals.view(view)
else:
if bit_count != size * 8:
vals = self._get_not_byte_aligned_data(
data_bytes, grp, ch_nr
)
else:
if kind_ in "ui":
vals = vals.view(channel_dtype)
else:
vals = self._get_not_byte_aligned_data(data_bytes, grp, ch_nr)
if self._single_bit_uint_as_bool and bit_count == 1:
vals = array(vals, dtype=bool)
else:
if vals.dtype != channel_dtype:
vals = vals.astype(channel_dtype)
if master_is_required:
timestamps = self.get_master(gp_nr, fragment, one_piece=True)
else:
timestamps = None
if channel_invalidation_present:
invalidation_bits = self.get_invalidation_bits(
gp_nr, channel, fragment
)
if not ignore_invalidation_bits:
vals = vals[nonzero(~invalidation_bits)[0]]
if master_is_required:
timestamps = timestamps[nonzero(~invalidation_bits)[0]]
invalidation_bits = None
else:
invalidation_bits = None
else:
channel_values = []
timestamps = []
invalidation_bits = []
for count, fragment in enumerate(data, 1):
data_bytes, offset, _count, invalidation_bytes = fragment
if count == 1:
record_start = offset
record_count = _count
try:
parent, bit_offset = parents[ch_nr]
except KeyError:
parent, bit_offset = None, None
if parent is not None:
if (
len(grp.channels) == 1
and channel.dtype_fmt.itemsize == record_size
):
vals = frombuffer(data_bytes, dtype=channel.dtype_fmt)
else:
record = grp.record
if record is None:
record = fromstring(data_bytes, dtype=dtypes)
vals = record[parent]
dtype_ = vals.dtype
shape_ = vals.shape
size = dtype_.itemsize
for dim in shape_[1:]:
size *= dim
kind_ = dtype_.kind
vals_dtype = vals.dtype.kind
if kind_ == "b":
pass
elif len(shape_) > 1 and data_type not in (
v4c.DATA_TYPE_BYTEARRAY,
v4c.DATA_TYPE_MIME_SAMPLE,
v4c.DATA_TYPE_MIME_STREAM,
):
vals = self._get_not_byte_aligned_data(
data_bytes, grp, ch_nr
)
elif vals_dtype not in "ui" and (
bit_offset or not bit_count == size * 8
):
vals = self._get_not_byte_aligned_data(
data_bytes, grp, ch_nr
)
else:
dtype_ = vals.dtype
kind_ = dtype_.kind
if data_type in v4c.INT_TYPES:
if channel_dtype.byteorder == "|" and data_type in (
v4c.DATA_TYPE_SIGNED_MOTOROLA,
v4c.DATA_TYPE_UNSIGNED_MOTOROLA,
):
view = f">u{vals.itemsize}"
else:
view = f"{channel_dtype.byteorder}u{vals.itemsize}"
if dtype(view) != vals.dtype:
vals = vals.view(view)
if bit_offset:
vals = vals >> bit_offset
if bit_count != size * 8:
if data_type in v4c.SIGNED_INT:
vals = as_non_byte_sized_signed_int(
vals, bit_count
)
else:
mask = (1 << bit_count) - 1
vals = vals & mask
elif data_type in v4c.SIGNED_INT:
view = f"{channel_dtype.byteorder}i{vals.itemsize}"
if dtype(view) != vals.dtype:
vals = vals.view(view)
else:
if bit_count != size * 8:
vals = self._get_not_byte_aligned_data(
data_bytes, grp, ch_nr
)
else:
if kind_ in "ui":
vals = vals.view(channel_dtype)
else:
vals = self._get_not_byte_aligned_data(data_bytes, grp, ch_nr)
if bit_count == 1 and self._single_bit_uint_as_bool:
vals = array(vals, dtype=bool)
else:
if vals.dtype != channel_dtype:
vals = vals.astype(channel_dtype)
if master_is_required:
timestamps.append(
self.get_master(gp_nr, fragment, one_piece=True)
)
if channel_invalidation_present:
invalidation_bits.append(
self.get_invalidation_bits(gp_nr, channel, fragment)
)
if vals.flags.owndata:
channel_values.append(vals)
else:
channel_values.append(vals.copy())
if count > 1:
total_size = sum(len(_) for _ in channel_values)
shape = (total_size,) + channel_values[0].shape[1:]
out = empty(shape, dtype=channel_values[0].dtype)
vals = concatenate(channel_values, out=out)
elif count == 1:
vals = channel_values[0]
else:
vals = array([], dtype=channel.dtype_fmt)
if master_is_required:
if count > 1:
out = empty(total_size, dtype=timestamps[0].dtype)
timestamps = concatenate(timestamps, out=out)
elif count == 1:
timestamps = timestamps[0]
else:
timestamps = []
if channel_invalidation_present:
if count > 1:
out = empty(total_size, dtype=invalidation_bits[0].dtype)
invalidation_bits = concatenate(invalidation_bits, out=out)
elif count == 1:
invalidation_bits = invalidation_bits[0]
else:
invalidation_bits = []
if not ignore_invalidation_bits:
vals = vals[nonzero(~invalidation_bits)[0]]
if master_is_required:
timestamps = timestamps[nonzero(~invalidation_bits)[0]]
invalidation_bits = None
else:
invalidation_bits = None
if raster and len(timestamps) > 1:
num = float(float32((timestamps[-1] - timestamps[0]) / raster))
if num.is_integer():
t = linspace(timestamps[0], timestamps[-1], int(num))
else:
t = arange(timestamps[0], timestamps[-1], raster)
vals = Signal(
vals, timestamps, name="_", invalidation_bits=invalidation_bits
).interp(
t,
integer_interpolation_mode=self._integer_interpolation,
float_interpolation_mode=self._float_interpolation,
)
vals, timestamps, invalidation_bits = (
vals.samples,
vals.timestamps,
vals.invalidation_bits,
)
if channel_type == v4c.CHANNEL_TYPE_VLSD:
count_ = len(vals)
signal_data, with_bounds = self._load_signal_data(
group=grp, index=ch_nr, offset=record_start, count=count_
)
if signal_data:
if data_type in (
v4c.DATA_TYPE_BYTEARRAY,
v4c.DATA_TYPE_UNSIGNED_INTEL,
v4c.DATA_TYPE_UNSIGNED_MOTOROLA,
):
vals = extract(signal_data, 1)
else:
vals = extract(signal_data, 0)
if not with_bounds:
vals = vals[record_start : record_start + count_]
if data_type not in (
v4c.DATA_TYPE_BYTEARRAY,
v4c.DATA_TYPE_UNSIGNED_INTEL,
v4c.DATA_TYPE_UNSIGNED_MOTOROLA,
):
if data_type == v4c.DATA_TYPE_STRING_UTF_16_BE:
encoding = "utf-16-be"
elif data_type == v4c.DATA_TYPE_STRING_UTF_16_LE:
encoding = "utf-16-le"
elif data_type == v4c.DATA_TYPE_STRING_UTF_8:
encoding = "utf-8"
elif data_type == v4c.DATA_TYPE_STRING_LATIN_1:
encoding = "latin-1"
else:
raise MdfException(
f'wrong data type "{data_type}" for vlsd channel'
)
else:
if len(vals):
raise MdfException(
f'Wrong signal data block refence (0x{channel.data_block_addr:X}) for VLSD channel "{channel.name}"'
)
# no VLSD signal data samples
if data_type != v4c.DATA_TYPE_BYTEARRAY:
vals = array([], dtype="S")
if data_type == v4c.DATA_TYPE_STRING_UTF_16_BE:
encoding = "utf-16-be"
elif data_type == v4c.DATA_TYPE_STRING_UTF_16_LE:
encoding = "utf-16-le"
elif data_type == v4c.DATA_TYPE_STRING_UTF_8:
encoding = "utf-8"
elif data_type == v4c.DATA_TYPE_STRING_LATIN_1:
encoding = "latin-1"
else:
raise MdfException(
f'wrong data type "{data_type}" for vlsd channel'
)
else:
vals = array(
[],
dtype=get_fmt_v4(data_type, bit_count, v4c.CHANNEL_TYPE_VALUE),
)
elif not (
v4c.DATA_TYPE_STRING_LATIN_1 <= data_type <= v4c.DATA_TYPE_STRING_UTF_16_BE
):
pass
elif channel_type in {v4c.CHANNEL_TYPE_VALUE, v4c.CHANNEL_TYPE_MLSD}:
if data_type == v4c.DATA_TYPE_STRING_UTF_16_BE:
encoding = "utf-16-be"
elif data_type == v4c.DATA_TYPE_STRING_UTF_16_LE:
encoding = "utf-16-le"
elif data_type == v4c.DATA_TYPE_STRING_UTF_8:
encoding = "utf-8"
elif data_type == v4c.DATA_TYPE_STRING_LATIN_1:
encoding = "latin-1"
else:
raise MdfException(f'wrong data type "{data_type}" for string channel')
if (
data_type < v4c.DATA_TYPE_CANOPEN_DATE
or data_type > v4c.DATA_TYPE_CANOPEN_TIME
):
pass
else:
# CANopen date
if data_type == v4c.DATA_TYPE_CANOPEN_DATE:
types = dtype(
[
("ms", "<u2"),
("min", "<u1"),
("hour", "<u1"),
("day", "<u1"),
("month", "<u1"),
("year", "<u1"),
]
)
vals = vals.view(types)
arrays = []
arrays.append(vals["ms"])
# bit 6 and 7 of minutes are reserved
arrays.append(vals["min"] & 0x3F)
# only firt 4 bits of hour are used
arrays.append(vals["hour"] & 0xF)
# the first 4 bits are the day number
arrays.append(vals["day"] & 0xF)
# bit 6 and 7 of month are reserved
arrays.append(vals["month"] & 0x3F)
# bit 7 of year is reserved
arrays.append(vals["year"] & 0x7F)
# add summer or standard time information for hour
arrays.append((vals["hour"] & 0x80) >> 7)
# add day of week information
arrays.append((vals["day"] & 0xF0) >> 4)
names = [
"ms",
"min",
"hour",
"day",
"month",
"year",
"summer_time",
"day_of_week",
]
vals = fromarrays(arrays, names=names)
# CANopen time
elif data_type == v4c.DATA_TYPE_CANOPEN_TIME:
types = dtype([("ms", "<u4"), ("days", "<u2")])
vals = vals.view(types)
arrays = []
# bits 28 to 31 are reserverd for ms
arrays.append(vals["ms"] & 0xFFFFFFF)
arrays.append(vals["days"] & 0x3F)
names = ["ms", "days"]
vals = fromarrays(arrays, names=names)
return vals, timestamps, invalidation_bits, encoding
def _get_not_byte_aligned_data(self, data, group, ch_nr):
big_endian_types = (
v4c.DATA_TYPE_UNSIGNED_MOTOROLA,
v4c.DATA_TYPE_REAL_MOTOROLA,
v4c.DATA_TYPE_SIGNED_MOTOROLA,
)
if group.uses_ld:
record_size = group.channel_group.samples_byte_nr
else:
record_size = (
group.channel_group.samples_byte_nr
+ group.channel_group.invalidation_bytes_nr
)
channel = group.channels[ch_nr]
bit_offset = channel.bit_offset
byte_offset = channel.byte_offset
bit_count = channel.bit_count
if ch_nr >= 0:
dependencies = group.channel_dependencies[ch_nr]
if dependencies and isinstance(dependencies[0], ChannelArrayBlock):
ca_block = dependencies[0]
size = bit_count // 8
shape = tuple(ca_block[f"dim_size_{i}"] for i in range(ca_block.dims))
if ca_block.byte_offset_base // size > 1 and len(shape) == 1:
shape += (ca_block.byte_offset_base // size,)
dim = 1
for d in shape:
dim *= d
size *= dim
bit_count = size * 8
byte_size = bit_offset + bit_count
if byte_size % 8:
byte_size = (byte_size // 8) + 1
else:
byte_size //= 8
types = [
("", f"a{byte_offset}"),
("vals", f"({byte_size},)u1"),
("", f"a{record_size - byte_size - byte_offset}"),
]
vals = fromstring(data, dtype=dtype(types))
vals = vals["vals"]
if byte_size in {1, 2, 4, 8}:
extra_bytes = 0
else:
extra_bytes = 4 - (byte_size % 4)
std_size = byte_size + extra_bytes
big_endian = channel.data_type in big_endian_types
# prepend or append extra bytes columns
# to get a standard size number of bytes
if extra_bytes:
if big_endian:
vals = column_stack(
[vals, zeros(len(vals), dtype=f"<({extra_bytes},)u1")]
)
try:
vals = vals.view(f">u{std_size}").ravel()
except:
vals = frombuffer(vals.tobytes(), dtype=f">u{std_size}")
vals = vals >> (extra_bytes * 8 + bit_offset)
vals &= (1 << bit_count) - 1
else:
vals = column_stack(
[vals, zeros(len(vals), dtype=f"<({extra_bytes},)u1")]
)
try:
vals = vals.view(f"<u{std_size}").ravel()
except:
vals = frombuffer(vals.tobytes(), dtype=f"<u{std_size}")
vals = vals >> bit_offset
vals &= (1 << bit_count) - 1
else:
if big_endian:
try:
vals = vals.view(f">u{std_size}").ravel()
except:
vals = frombuffer(vals.tobytes(), dtype=f">u{std_size}")
vals = vals >> bit_offset
vals &= (1 << bit_count) - 1
else:
try:
vals = vals.view(f"<u{std_size}").ravel()
except:
vals = frombuffer(vals.tobytes(), dtype=f"<u{std_size}")
vals = vals >> bit_offset
vals &= (1 << bit_count) - 1
data_type = channel.data_type
if data_type in v4c.SIGNED_INT:
return as_non_byte_sized_signed_int(vals, bit_count)
elif data_type in v4c.FLOATS:
return vals.view(get_fmt_v4(data_type, bit_count))
else:
return vals
def included_channels(
self, index=None, channels=None, skip_master=True, minimal=True
):
if channels is None:
virtual_channel_group = self.virtual_groups[index]
groups = virtual_channel_group.groups
gps = {}
for gp_index in groups:
group = self.groups[gp_index]
included_channels = set(range(len(group.channels)))
master_index = self.masters_db.get(gp_index, None)
if master_index is not None:
included_channels.remove(master_index)
channels = group.channels
for dependencies in group.channel_dependencies:
if dependencies is None:
continue
if all(
not isinstance(dep, ChannelArrayBlock) for dep in dependencies
):
for _, ch_nr in dependencies:
try:
included_channels.remove(ch_nr)
except KeyError:
pass
else:
for dep in dependencies:
for referenced_channels in (
dep.axis_channels,
dep.dynamic_size_channels,
dep.input_quantity_channels,
):
for gp_nr, ch_nr in referenced_channels:
if gp_nr == gp_index:
try:
included_channels.remove(ch_nr)
except KeyError:
pass
if dep.output_quantity_channel:
gp_nr, ch_nr = dep.output_quantity_channel
if gp_nr == gp_index:
try:
included_channels.remove(ch_nr)
except KeyError:
pass
if dep.comparison_quantity_channel:
gp_nr, ch_nr = dep.comparison_quantity_channel
if gp_nr == gp_index:
try:
included_channels.remove(ch_nr)
except KeyError:
pass
gps[gp_index] = sorted(included_channels)
result = {index: gps}
else:
gps = {}
for item in channels:
if isinstance(item, (list, tuple)):
if len(item) not in (2, 3):
raise MdfException(
"The items used for filtering must be strings, "
"or they must match the first 3 argumens of the get "
"method"
)
else:
group, idx = self._validate_channel_selection(*item)
gps_idx = gps.setdefault(group, set())
gps_idx.add(idx)
else:
name = item
group, idx = self._validate_channel_selection(name)
gps_idx = gps.setdefault(group, set())
gps_idx.add(idx)
result = {}
for gp_index, channels in gps.items():
master = self.virtual_groups_map[gp_index]
group = self.groups[gp_index]
if minimal:
channel_dependencies = [
group.channel_dependencies[ch_nr] for ch_nr in channels
]
for dependencies in channel_dependencies:
if dependencies is None:
continue
if all(
not isinstance(dep, ChannelArrayBlock)
for dep in dependencies
):
for _, ch_nr in dependencies:
try:
channels.remove(ch_nr)
except KeyError:
pass
else:
for dep in dependencies:
for referenced_channels in (
dep.axis_channels,
dep.dynamic_size_channels,
dep.input_quantity_channels,
):
for gp_nr, ch_nr in referenced_channels:
if gp_nr == gp_index:
try:
channels.remove(ch_nr)
except KeyError:
pass
if dep.output_quantity_channel:
gp_nr, ch_nr = dep.output_quantity_channel
if gp_nr == gp_index:
try:
channels.remove(ch_nr)
except KeyError:
pass
if dep.comparison_quantity_channel:
gp_nr, ch_nr = dep.comparison_quantity_channel
if gp_nr == gp_index:
try:
channels.remove(ch_nr)
except KeyError:
pass
gp_master = self.masters_db.get(gp_index, None)
if gp_master is not None and gp_master in channels:
channels.remove(gp_master)
if master not in result:
result[master] = {}
result[master][master] = [self.masters_db.get(master, None)]
result[master][gp_index] = sorted(channels)
return result
def _yield_selected_signals(
self,
index,
groups=None,
record_offset=0,
record_count=None,
skip_master=True,
version=None,
):
version = version or self.version
virtual_channel_group = self.virtual_groups[index]
record_size = virtual_channel_group.record_size
if groups is None:
groups = self.included_channels(index, skip_master=skip_master)[index]
record_size = 0
for group_index in groups:
grp = self.groups[group_index]
record_size += (
grp.channel_group.samples_byte_nr
+ grp.channel_group.invalidation_bytes_nr
)
record_size = record_size or 1
if self._read_fragment_size:
count = self._read_fragment_size // record_size or 1
else:
if version < "4.20":
count = 16 * 1024 * 1024 // record_size or 1
else:
count = 128 * 1024 * 1024 // record_size or 1
data_streams = []
for idx, group_index in enumerate(groups):
grp = self.groups[group_index]
grp.read_split_count = count
data_streams.append(
self._load_data(
grp, record_offset=record_offset, record_count=record_count
)
)
if group_index == index:
master_index = idx
encodings = {group_index: [None] for groups_index in groups}
self._set_temporary_master(None)
idx = 0
while True:
try:
fragments = [next(stream) for stream in data_streams]
except:
break
_master = self.get_master(index, data=fragments[master_index])
self._set_temporary_master(_master)
if idx == 0:
signals = []
else:
signals = [(_master, None)]
for fragment, (group_index, channels) in zip(fragments, groups.items()):
grp = self.groups[group_index]
if not grp.single_channel_dtype:
parents, dtypes = self._prepare_record(grp)
if dtypes.itemsize:
grp.record = fromstring(fragment[0], dtype=dtypes)
else:
grp.record = None
continue
if idx == 0:
for channel_index in channels:
signals.append(
self.get(
group=group_index,
index=channel_index,
data=fragment,
raw=True,
ignore_invalidation_bits=True,
samples_only=False,
)
)
else:
for channel_index in channels:
signals.append(
self.get(
group=group_index,
index=channel_index,
data=fragment,
raw=True,
ignore_invalidation_bits=True,
samples_only=True,
)
)
if version < "4.00":
if idx == 0:
for sig, channel_index in zip(signals, channels):
if sig.samples.dtype.kind == "S":
strsig = self.get(
group=group_index,
index=channel_index,
samples_only=True,
ignore_invalidation_bits=True,
)[0]
_dtype = strsig.dtype
sig.samples = sig.samples.astype(_dtype)
encodings[group_index].append((sig.encoding, _dtype))
del strsig
if sig.encoding != "latin-1":
if sig.encoding == "utf-16-le":
sig.samples = (
sig.samples.view(uint16)
.byteswap()
.view(sig.samples.dtype)
)
sig.samples = encode(
decode(sig.samples, "utf-16-be"), "latin-1"
)
else:
sig.samples = encode(
decode(sig.samples, sig.encoding),
"latin-1",
)
sig.samples = sig.samples.astype(_dtype)
else:
encodings[group_index].append(None)
else:
for i, (sig, encoding_tuple) in enumerate(
zip(signals, encodings[group_index])
):
if encoding_tuple:
encoding, _dtype = encoding_tuple
samples = sig[0]
if encoding != "latin-1":
if encoding == "utf-16-le":
samples = (
samples.view(uint16)
.byteswap()
.view(samples.dtype)
)
samples = encode(
decode(samples, "utf-16-be"), "latin-1"
)
else:
samples = encode(
decode(samples, encoding), "latin-1"
)
samples = samples.astype(_dtype)
signals[i] = (samples, sig[1])
grp.record = None
self._set_temporary_master(None)
idx += 1
yield signals
def get_master(
self,
index,
data=None,
raster=None,
record_offset=0,
record_count=None,
one_piece=False,
):
"""returns master channel samples for given group
Parameters
----------
index : int
group index
data : (bytes, int, int, bytes|None)
(data block raw bytes, fragment offset, count, invalidation bytes); default None
raster : float
raster to be used for interpolation; default None
.. deprecated:: 5.13.0
record_offset : int
if *data=None* use this to select the record offset from which the
group data should be loaded
record_count : int
number of records to read; default *None* and in this case all
available records are used
Returns
-------
t : numpy.array
master channel samples
"""
if raster is not None:
PendingDeprecationWarning(
"the argument raster is depreacted since version 5.13.0 "
"and will be removed in a future release"
)
if self._master is not None:
return self._master
group = self.groups[index]
if group.channel_group.flags & v4c.FLAG_CG_REMOTE_MASTER:
if data is not None:
record_offset = data[1]
record_count = data[2]
return self.get_master(
group.channel_group.cg_master_index,
record_offset=record_offset,
record_count=record_count,
)
time_ch_nr = self.masters_db.get(index, None)
channel_group = group.channel_group
record_size = channel_group.samples_byte_nr
record_size += channel_group.invalidation_bytes_nr
if record_count is not None:
cycles_nr = record_count
else:
cycles_nr = group.channel_group.cycles_nr
fragment = data
if fragment:
data_bytes, offset, _count, invalidation_bytes = fragment
cycles_nr = len(data_bytes) // record_size if record_size else 0
else:
offset = 0
_count = record_count
if time_ch_nr is None:
if record_size:
t = arange(cycles_nr, dtype=float64)
t += offset
else:
t = array([], dtype=float64)
metadata = ("timestamps", v4c.SYNC_TYPE_TIME)
else:
time_ch = group.channels[time_ch_nr]
time_conv = time_ch.conversion
time_name = time_ch.name
metadata = (time_name, time_ch.sync_type)
if time_ch.channel_type == v4c.CHANNEL_TYPE_VIRTUAL_MASTER:
time_a = time_conv["a"]
time_b = time_conv["b"]
t = arange(cycles_nr, dtype=float64)
t += offset
t *= time_a
t += time_b
if record_count is None:
t = t[record_offset:]
else:
t = t[record_offset : record_offset + record_count]
else:
# check if the channel group contains just the master channel
# and that there are no padding bytes
if (
len(group.channels) == 1
and time_ch.dtype_fmt.itemsize == record_size
):
if one_piece:
data_bytes, offset, _count, _ = data
t = frombuffer(data_bytes, dtype=time_ch.dtype_fmt)
else:
# get data
if fragment is None:
data = self._load_data(
group,
record_offset=record_offset,
record_count=record_count,
)
else:
data = (fragment,)
time_values = [
frombuffer(fragment[0], dtype=time_ch.dtype_fmt)
for fragment in data
]
if len(time_values) > 1:
total_size = sum(len(_) for _ in time_values)
out = empty(total_size, dtype=time_ch.dtype_fmt)
t = concatenate(time_values, out=out)
else:
t = time_values[0]
else:
# get data group parents and dtypes
parents, dtypes = group.parents, group.types
if parents is None:
parents, dtypes = self._prepare_record(group)
if one_piece:
data_bytes, offset, _count, _ = data
try:
parent, _ = parents[time_ch_nr]
except KeyError:
parent = None
if parent is not None:
if group.record is None:
dtypes = group.types
if dtypes.itemsize:
record = fromstring(data_bytes, dtype=dtypes)
else:
record = None
else:
record = group.record
t = record[parent].copy()
else:
t = self._get_not_byte_aligned_data(
data_bytes, group, time_ch_nr
)
else:
# get data
if fragment is None:
data = self._load_data(
group,
record_offset=record_offset,
record_count=record_count,
)
else:
data = (fragment,)
time_values = []
count = 0
for fragment in data:
data_bytes, offset, _count, invalidation_bytes = fragment
try:
parent, _ = parents[time_ch_nr]
except KeyError:
parent = None
if parent is not None:
if group.record is None:
dtypes = group.types
if dtypes.itemsize:
record = fromstring(data_bytes, dtype=dtypes)
else:
record = None
else:
record = group.record
t = record[parent].copy()
else:
t = self._get_not_byte_aligned_data(
data_bytes, group, time_ch_nr
)
time_values.append(t)
count += 1
if count > 1:
total_size = sum(len(_) for _ in time_values)
if len(time_values) > 1:
out = empty(total_size, dtype=time_values[0].dtype)
t = concatenate(time_values, out=out)
else:
t = time_values[0]
# get timestamps
if time_conv:
t = time_conv.convert(t)
self._master_channel_metadata[index] = metadata
if not t.dtype == float64:
t = t.astype(float64)
if raster and t.size:
timestamps = t
if len(t) > 1:
num = float(float32((timestamps[-1] - timestamps[0]) / raster))
if int(num) == num:
timestamps = linspace(t[0], t[-1], int(num))
else:
timestamps = arange(t[0], t[-1], raster)
else:
timestamps = t
return timestamps
def get_bus_signal(
self,
bus,
name,
database=None,
ignore_invalidation_bits=False,
data=None,
raw=False,
ignore_value2text_conversion=True,
):
"""get a signal decoded from a raw bus logging. The currently supported buses are
CAN and LIN (LDF databases are not supported, they need to be converted to DBC and
feed to this function)
.. versionadded:: 6.0.0
Parameters
----------
bus : str
"CAN" or "LIN"
name : str
signal name
database : str
path of external CAN/LIN database file (.dbc or .arxml) or canmatrix.CanMatrix; default *None*
.. versionchanged:: 6.0.0
`db` and `database` arguments were merged into this single argument
ignore_invalidation_bits : bool
option to ignore invalidation bits
raw : bool
return channel samples without appling the conversion rule; default
`False`
ignore_value2text_conversion : bool
return channel samples without values that have a description in .dbc or .arxml file
`True`
Returns
-------
sig : Signal
Signal object with the physical values
"""
if bus == "CAN":
return self.get_can_signal(
name,
database=database,
ignore_invalidation_bits=ignore_invalidation_bits,
data=data,
raw=raw,
ignore_value2text_conversion=ignore_value2text_conversion,
)
elif bus == "LIN":
return self.get_lin_signal(
name,
database=database,
ignore_invalidation_bits=ignore_invalidation_bits,
data=data,
raw=raw,
ignore_value2text_conversion=ignore_value2text_conversion,
)
def get_can_signal(
self,
name,
database=None,
ignore_invalidation_bits=False,
data=None,
raw=False,
ignore_value2text_conversion=True,
):
"""get CAN message signal. You can specify an external CAN database (
*database* argument) or canmatrix databse object that has already been
loaded from a file (*db* argument).
The signal name can be specified in the following ways
* ``CAN<ID>.<MESSAGE_NAME>.<SIGNAL_NAME>`` - the `ID` value starts from 1
and must match the ID found in the measurement (the source CAN bus ID)
Example: CAN1.Wheels.FL_WheelSpeed
* ``CAN<ID>.CAN_DataFrame_<MESSAGE_ID>.<SIGNAL_NAME>`` - the `ID` value
starts from 1 and the `MESSAGE_ID` is the decimal message ID as found
in the database. Example: CAN1.CAN_DataFrame_218.FL_WheelSpeed
* ``<MESSAGE_NAME>.<SIGNAL_NAME>`` - in this case the first occurrence of
the message name and signal are returned (the same message could be
found on muplit CAN buses; for example on CAN1 and CAN3)
Example: Wheels.FL_WheelSpeed
* ``CAN_DataFrame_<MESSAGE_ID>.<SIGNAL_NAME>`` - in this case the first
occurrence of the message name and signal are returned (the same
message could be found on muplit CAN buses; for example on CAN1 and
CAN3). Example: CAN_DataFrame_218.FL_WheelSpeed
* ``<SIGNAL_NAME>`` - in this case the first occurrence of the signal
name is returned ( the same signal anme coudl be found in multiple
messages and on multiple CAN buses). Example: FL_WheelSpeed
Parameters
----------
name : str
signal name
database : str
path of external CAN database file (.dbc or .arxml) or canmatrix.CanMatrix; default *None*
.. versionchanged:: 6.0.0
`db` and `database` arguments were merged into this single argument
ignore_invalidation_bits : bool
option to ignore invalidation bits
raw : bool
return channel samples without appling the conversion rule; default
`False`
ignore_value2text_conversion : bool
return channel samples without values that have a description in .dbc or .arxml file
`True`
Returns
-------
sig : Signal
Signal object with the physical values
"""
if database is None:
return self.get(name)
if isinstance(database, (str, Path)):
database_path = Path(database)
if database_path.suffix.lower() not in (".arxml", ".dbc"):
message = f'Expected .dbc or .arxml file as CAN channel attachment but got "{database_path}"'
logger.exception(message)
raise MdfException(message)
else:
db_string = database_path.read_bytes()
md5_sum = md5(db_string).digest()
if md5_sum in self._external_dbc_cache:
db = self._external_dbc_cache[md5_sum]
else:
db = load_can_database(database_path, contents=db_string)
if db is None:
raise MdfException("failed to load database")
else:
db = database
is_j1939 = db.contains_j1939
name_ = name.split(".")
if len(name_) == 3:
can_id_str, message_id_str, signal = name_
can_id = v4c.CAN_ID_PATTERN.search(can_id_str)
if can_id is None:
raise MdfException(
f'CAN id "{can_id_str}" of signal name "{name}" is not recognised by this library'
)
else:
can_id = int(can_id.group("id"))
message_id = v4c.CAN_DATA_FRAME_PATTERN.search(message_id_str)
if message_id is None:
message_id = message_id_str
else:
message_id = int(message_id)
if isinstance(message_id, str):
message = db.frame_by_name(message_id)
else:
message = db.frame_by_id(message_id)
elif len(name_) == 2:
message_id_str, signal = name_
can_id = None
message_id = v4c.CAN_DATA_FRAME_PATTERN.search(message_id_str)
if message_id is None:
message_id = message_id_str
else:
message_id = int(message_id.group("id"))
if isinstance(message_id, str):
message = db.frame_by_name(message_id)
else:
message = db.frame_by_id(message_id)
else:
message = None
for msg in db:
for signal in msg:
if signal.name == name:
message = msg
can_id = None
signal = name
if message is None:
raise MdfException(f"Could not find signal {name} in {database}")
for sig in message.signals:
if sig.name == signal:
signal = sig
break
else:
raise MdfException(
f'Signal "{signal}" not found in message "{message.name}" of "{database}"'
)
if can_id is None:
index = None
for _can_id, messages in self.bus_logging_map["CAN"].items():
if is_j1939:
test_ids = [
canmatrix.ArbitrationId(id_, extended=True).pgn
for id_ in self.bus_logging_map["CAN"][_can_id]
]
id_ = message.arbitration_id.pgn
else:
id_ = message.arbitration_id.id
test_ids = self.bus_logging_map["CAN"][_can_id]
if id_ in test_ids:
if is_j1939:
for id__, idx in self.bus_logging_map["CAN"][_can_id].items():
if canmatrix.ArbitrationId(id__, extended=True).pgn == id_:
index = idx
break
else:
index = self.bus_logging_map["CAN"][_can_id][
message.arbitration_id.id
]
if index is not None:
break
else:
raise MdfException(
f'Message "{message.name}" (ID={hex(message.arbitration_id.id)}) not found in the measurement'
)
else:
if can_id in self.bus_logging_map["CAN"]:
if is_j1939:
test_ids = [
canmatrix.ArbitrationId(id_, extended=True).pgn
for id_ in self.bus_logging_map["CAN"][can_id]
]
id_ = message.arbitration_id.pgn
else:
id_ = message.arbitration_id.id
test_ids = self.bus_logging_map["CAN"][can_id]
if id_ in test_ids:
if is_j1939:
for id__, idx in self.bus_logging_map["CAN"][can_id].items():
if canmatrix.ArbitrationId(id__, extended=True).pgn == id_:
index = idx
break
else:
index = self.bus_logging_map["CAN"][can_id][
message.arbitration_id.id
]
else:
raise MdfException(
f'Message "{message.name}" (ID={hex(message.arbitration_id.id)}) not found in the measurement'
)
else:
raise MdfException(
f'No logging from "{can_id}" was found in the measurement'
)
can_ids = self.get(
"CAN_DataFrame.ID",
group=index,
ignore_invalidation_bits=ignore_invalidation_bits,
data=data,
)
can_ids.samples = can_ids.samples.astype("<u4") & 0x1FFFFFFF
payload = self.get(
"CAN_DataFrame.DataBytes",
group=index,
samples_only=True,
ignore_invalidation_bits=ignore_invalidation_bits,
data=data,
)[0]
if is_j1939:
ps = (can_ids.samples >> 8) & 0xFF
pf = (can_ids.samples >> 16) & 0xFF
_pgn = pf << 8
_pgn = where(pf >= 240, _pgn + ps, _pgn)
idx = argwhere(_pgn == message.arbitration_id.pgn).ravel()
else:
idx = argwhere(can_ids.samples == message.arbitration_id.id).ravel()
payload = payload[idx]
t = can_ids.timestamps[idx].copy()
if can_ids.invalidation_bits is not None:
invalidation_bits = can_ids.invalidation_bits[idx]
else:
invalidation_bits = None
if not ignore_invalidation_bits and invalidation_bits is not None:
payload = payload[nonzero(~invalidation_bits)[0]]
t = t[nonzero(~invalidation_bits)[0]]
extracted_signals = extract_mux(
payload,
message,
None,
None,
t,
original_message_id=None,
ignore_value2text_conversion=ignore_value2text_conversion,
raw=raw,
)
comment = signal.comment or ""
for entry, signals in extracted_signals.items():
for name_, sig in signals.items():
if name_ == signal.name:
sig = Signal(
samples=sig["samples"],
timestamps=sig["t"],
name=name,
unit=signal.unit or "",
comment=comment,
)
if len(sig):
return sig
else:
raise MdfException(
f'No logging from "{signal}" was found in the measurement'
)
raise MdfException(f'No logging from "{signal}" was found in the measurement')
def get_lin_signal(
self,
name,
database=None,
ignore_invalidation_bits=False,
data=None,
raw=False,
ignore_value2text_conversion=True,
):
"""get LIN message signal. You can specify an external LIN database (
*database* argument) or canmatrix databse object that has already been
loaded from a file (*db* argument).
The signal name can be specified in the following ways
* ``LIN_Frame_<MESSAGE_ID>.<SIGNAL_NAME>`` - Example: LIN_Frame_218.FL_WheelSpeed
* ``<MESSAGE_NAME>.<SIGNAL_NAME>`` - Example: Wheels.FL_WheelSpeed
* ``<SIGNAL_NAME>`` - Example: FL_WheelSpeed
.. versionadded:: 6.0.0
Parameters
----------
name : str
signal name
database : str
path of external LIN database file (.dbc or .arxml) or canmatrix.CanMatrix; default *None*
ignore_invalidation_bits : bool
option to ignore invalidation bits
raw : bool
return channel samples without appling the conversion rule; default
`False`
ignore_value2text_conversion : bool
return channel samples without values that have a description in .dbc or .arxml file
`True`
Returns
-------
sig : Signal
Signal object with the physical values
"""
if database is None:
return self.get(name)
if isinstance(database, (str, Path)):
database_path = Path(database)
if database_path.suffix.lower() not in (".arxml", ".dbc"):
message = f'Expected .dbc or .arxml file as LIN channel attachment but got "{database_path}"'
logger.exception(message)
raise MdfException(message)
else:
db_string = database_path.read_bytes()
md5_sum = md5(db_string).digest()
if md5_sum in self._external_dbc_cache:
db = self._external_dbc_cache[md5_sum]
else:
db = load_can_database(database_path, contents=db_string)
if db is None:
raise MdfException("failed to load database")
else:
db = database
name_ = name.split(".")
if len(name_) == 2:
message_id_str, signal = name_
message_id = v4c.LIN_DATA_FRAME_PATTERN.search(message_id_str)
if message_id is None:
message_id = message_id_str
else:
message_id = int(message_id.group("id"))
if isinstance(message_id, str):
message = db.frame_by_name(message_id)
else:
message = db.frame_by_id(message_id)
else:
message = None
for msg in db:
for signal in msg:
if signal.name == name:
message = msg
signal = name
if message is None:
raise MdfException(f"Could not find signal {name} in {database}")
for sig in message.signals:
if sig.name == signal:
signal = sig
break
else:
raise MdfException(
f'Signal "{signal}" not found in message "{message.name}" of "{database}"'
)
id_ = message.arbitration_id.id
if id_ in self.bus_logging_map["LIN"]:
index = self.bus_logging_map["LIN"][id_]
else:
raise MdfException(
f'Message "{message.name}" (ID={hex(message.arbitration_id.id)}) not found in the measurement'
)
can_ids = self.get(
"LIN_Frame.ID",
group=index,
ignore_invalidation_bits=ignore_invalidation_bits,
data=data,
)
can_ids.samples = can_ids.samples.astype("<u4") & 0x1FFFFFFF
payload = self.get(
"LIN_Frame.DataBytes",
group=index,
samples_only=True,
ignore_invalidation_bits=ignore_invalidation_bits,
data=data,
)[0]
idx = argwhere(can_ids.samples == message.arbitration_id.id).ravel()
payload = payload[idx]
t = can_ids.timestamps[idx].copy()
if can_ids.invalidation_bits is not None:
invalidation_bits = can_ids.invalidation_bits[idx]
else:
invalidation_bits = None
if not ignore_invalidation_bits and invalidation_bits is not None:
payload = payload[nonzero(~invalidation_bits)[0]]
t = t[nonzero(~invalidation_bits)[0]]
extracted_signals = extract_mux(
payload,
message,
None,
None,
t,
original_message_id=None,
ignore_value2text_conversion=ignore_value2text_conversion,
raw=raw,
)
comment = signal.comment or ""
for entry, signals in extracted_signals.items():
for name_, sig in signals.items():
if name_ == signal.name:
sig = Signal(
samples=sig["samples"],
timestamps=sig["t"],
name=name,
unit=signal.unit or "",
comment=comment,
)
if len(sig):
return sig
else:
raise MdfException(
f'No logging from "{signal}" was found in the measurement'
)
raise MdfException(f'No logging from "{signal}" was found in the measurement')
def info(self):
"""get MDF information as a dict
Examples
--------
>>> mdf = MDF4('test.mdf')
>>> mdf.info()
"""
info = {
"version": self.version,
"program": self.identification.program_identification.decode("utf-8").strip(
" \0\n\r\t"
),
"comment": self.header.comment,
}
info["groups"] = len(self.groups)
for i, gp in enumerate(self.groups):
inf = {}
info[f"group {i}"] = inf
inf["cycles"] = gp.channel_group.cycles_nr
inf["comment"] = gp.channel_group.comment
inf["channels count"] = len(gp.channels)
for j, channel in enumerate(gp.channels):
name = channel.name
ch_type = v4c.CHANNEL_TYPE_TO_DESCRIPTION[channel.channel_type]
inf[f"channel {j}"] = f'name="{name}" type={ch_type}'
return info
def save(self, dst, overwrite=False, compression=0):
"""Save MDF to *dst*. If overwrite is *True* then the destination file
is overwritten, otherwise the file name is appened with '.<cntr>', were
'<cntr>' is the first conter that produces a new file name
(that does not already exist in the filesystem)
Parameters
----------
dst : str
destination file name, Default ''
overwrite : bool
overwrite flag, default *False*
compression : int
use compressed data blocks, default 0; valid since version 4.10
* 0 - no compression
* 1 - deflate (slower, but produces smaller files)
* 2 - transposition + deflate (slowest, but produces
the smallest files)
Returns
-------
output_file : pathlib.Path
path to saved file
"""
if is_file_like(dst):
dst_ = dst
file_like = True
if hasattr(dst, "name"):
dst = Path(dst.name)
else:
dst = Path("__file_like.mf4")
dst_.seek(0)
else:
file_like = False
dst = Path(dst).with_suffix(".mf4")
destination_dir = dst.parent
destination_dir.mkdir(parents=True, exist_ok=True)
if overwrite is False:
if dst.is_file():
cntr = 0
while True:
name = dst.with_suffix(f".{cntr}.mf4")
if not name.exists():
break
else:
cntr += 1
message = (
f'Destination file "{dst}" already exists '
f'and "overwrite" is False. Saving MDF file as "{name}"'
)
logger.warning(message)
dst = name
if dst == self.name:
destination = dst.with_suffix(".savetemp")
else:
destination = dst
dst_ = open(destination, "wb+")
if not self.file_history:
comment = "created"
else:
comment = "updated"
fh = FileHistory()
fh.comment = f"""<FHcomment>
<TX>{comment}</TX>
<tool_id>asammdf</tool_id>
<tool_vendor>asammdf</tool_vendor>
<tool_version>{__version__}</tool_version>
</FHcomment>"""
self.file_history.append(fh)
cg_map = {}
try:
defined_texts = {}
cc_map = {}
si_map = {}
groups_nr = len(self.groups)
write = dst_.write
tell = dst_.tell
seek = dst_.seek
blocks = []
write(bytes(self.identification))
self.header.to_blocks(dst_.tell(), blocks)
for block in blocks:
write(bytes(block))
original_data_addresses = []
if compression == 1:
zip_type = v4c.FLAG_DZ_DEFLATE
else:
zip_type = v4c.FLAG_DZ_TRANPOSED_DEFLATE
# write DataBlocks first
for gp_nr, gp in enumerate(self.groups):
original_data_addresses.append(gp.data_group.data_block_addr)
if gp.channel_group.flags & v4c.FLAG_CG_VLSD:
continue
address = tell()
total_size = (
gp.channel_group.samples_byte_nr
+ gp.channel_group.invalidation_bytes_nr
) * gp.channel_group.cycles_nr
if total_size:
if self._write_fragment_size:
samples_size = (
gp.channel_group.samples_byte_nr
+ gp.channel_group.invalidation_bytes_nr
)
if samples_size:
split_size = self._write_fragment_size // samples_size
split_size *= samples_size
if split_size == 0:
split_size = samples_size
chunks = float(total_size) / split_size
chunks = int(ceil(chunks))
else:
chunks = 1
else:
chunks = 1
self.configure(read_fragment_size=split_size)
data = self._load_data(gp)
if chunks == 1:
data_, _1, _2, inval_ = next(data)
if self.version >= "4.20" and gp.uses_ld:
if compression:
if compression == 1:
param = 0
else:
param = gp.channel_group.samples_byte_nr
kwargs = {
"data": data_,
"zip_type": zip_type,
"param": param,
"original_type": b"DV",
}
data_block = DataZippedBlock(**kwargs)
else:
data_block = DataBlock(data=data_, type="DV")
write(bytes(data_block))
data_address = address
align = data_block.block_len % 8
if align:
write(b"\0" * (8 - align))
if inval_ is not None:
inval_address = address = tell()
if compression:
if compression == 1:
param = 0
else:
param = gp.channel_group.invalidation_bytes_nr
kwargs = {
"data": inval_,
"zip_type": zip_type,
"param": param,
"original_type": b"DI",
}
inval_block = DataZippedBlock(**kwargs)
else:
inval_block = DataBlock(data=inval_, type="DI")
write(bytes(inval_block))
align = inval_block.block_len % 8
if align:
write(b"\0" * (8 - align))
address = tell()
kwargs = {
"flags": v4c.FLAG_LD_EQUAL_LENGHT,
"data_block_nr": 1,
"data_block_len": len(data_),
"data_block_addr_0": data_address,
}
if inval_:
kwargs["flags"] |= v4c.FLAG_LD_INVALIDATION_PRESENT
kwargs["invalidation_bits_addr_0"] = inval_address
ld_block = ListData(**kwargs)
write(bytes(ld_block))
align = ld_block.block_len % 8
if align:
write(b"\0" * (8 - align))
if gp.channel_group.cycles_nr:
gp.data_group.data_block_addr = address
else:
gp.data_group.data_block_addr = 0
else:
if compression and self.version >= "4.10":
if compression == 1:
param = 0
else:
param = (
gp.channel_group.samples_byte_nr
+ gp.channel_group.invalidation_bytes_nr
)
kwargs = {
"data": data_,
"zip_type": zip_type,
"param": param,
}
data_block = DataZippedBlock(**kwargs)
else:
data_block = DataBlock(data=data_)
write(bytes(data_block))
align = data_block.block_len % 8
if align:
write(b"\0" * (8 - align))
if gp.channel_group.cycles_nr:
gp.data_group.data_block_addr = address
else:
gp.data_group.data_block_addr = 0
else:
if self.version >= "4.20" and gp.uses_ld:
dv_addr = []
di_addr = []
block_size = 0
for i, (data_, _1, _2, inval_) in enumerate(data):
if i == 0:
block_size = len(data_)
if compression:
if compression == 1:
param = 0
else:
param = gp.channel_group.samples_byte_nr
kwargs = {
"data": data_,
"zip_type": zip_type,
"param": param,
"original_type": b"DV",
}
data_block = DataZippedBlock(**kwargs)
else:
data_block = DataBlock(data=data_, type="DV")
dv_addr.append(tell())
write(bytes(data_block))
align = data_block.block_len % 8
if align:
write(b"\0" * (8 - align))
if inval_ is not None:
if compression:
if compression == 1:
param = 0
else:
param = (
gp.channel_group.invalidation_bytes_nr
)
kwargs = {
"data": inval_,
"zip_type": zip_type,
"param": param,
"original_type": b"DI",
}
inval_block = DataZippedBlock(**kwargs)
else:
inval_block = DataBlock(data=inval_, type="DI")
di_addr.append(tell())
write(bytes(inval_block))
align = inval_block.block_len % 8
if align:
write(b"\0" * (8 - align))
address = tell()
kwargs = {
"flags": v4c.FLAG_LD_EQUAL_LENGHT,
"data_block_nr": len(dv_addr),
"data_block_len": block_size,
}
for i, addr in enumerate(dv_addr):
kwargs[f"data_block_addr_{i}"] = addr
if di_addr:
kwargs["flags"] |= v4c.FLAG_LD_INVALIDATION_PRESENT
for i, addr in enumerate(di_addr):
kwargs[f"invalidation_bits_addr_{i}"] = addr
ld_block = ListData(**kwargs)
write(bytes(ld_block))
align = ld_block.block_len % 8
if align:
write(b"\0" * (8 - align))
if gp.channel_group.cycles_nr:
gp.data_group.data_block_addr = address
else:
gp.data_group.data_block_addr = 0
else:
kwargs = {
"flags": v4c.FLAG_DL_EQUAL_LENGHT,
"zip_type": zip_type,
}
hl_block = HeaderList(**kwargs)
kwargs = {
"flags": v4c.FLAG_DL_EQUAL_LENGHT,
"links_nr": chunks + 1,
"data_block_nr": chunks,
"data_block_len": split_size,
}
dl_block = DataList(**kwargs)
for i, data__ in enumerate(data):
data_ = data__[0]
if compression and self.version >= "4.10":
if compression == 1:
zip_type = v4c.FLAG_DZ_DEFLATE
else:
zip_type = v4c.FLAG_DZ_TRANPOSED_DEFLATE
if compression == 1:
param = 0
else:
param = (
gp.channel_group.samples_byte_nr
+ gp.channel_group.invalidation_bytes_nr
)
kwargs = {
"data": data_,
"zip_type": zip_type,
"param": param,
}
block = DataZippedBlock(**kwargs)
else:
block = DataBlock(data=data_)
address = tell()
block.address = address
write(bytes(block))
align = block.block_len % 8
if align:
write(b"\0" * (8 - align))
dl_block[f"data_block_addr{i}"] = address
address = tell()
dl_block.address = address
write(bytes(dl_block))
if compression and self.version != "4.00":
hl_block.first_dl_addr = address
address = tell()
hl_block.address = address
write(bytes(hl_block))
gp.data_group.data_block_addr = address
else:
gp.data_group.data_block_addr = 0
if self._callback:
self._callback(int(50 * (gp_nr + 1) / groups_nr), 100)
if self._terminate:
dst_.close()
self.close()
return
address = tell()
blocks = []
# file history blocks
for fh in self.file_history:
address = fh.to_blocks(address, blocks, defined_texts)
for i, fh in enumerate(self.file_history[:-1]):
fh.next_fh_addr = self.file_history[i + 1].address
self.file_history[-1].next_fh_addr = 0
# data groups
gp_rec_ids = []
valid_data_groups = []
for gp in self.groups:
if gp.channel_group.flags & v4c.FLAG_CG_VLSD:
continue
valid_data_groups.append(gp.data_group)
gp_rec_ids.append(gp.data_group.record_id_len)
address = gp.data_group.to_blocks(address, blocks, defined_texts)
if valid_data_groups:
for i, dg in enumerate(valid_data_groups[:-1]):
addr_ = valid_data_groups[i + 1].address
dg.next_dg_addr = addr_
valid_data_groups[-1].next_dg_addr = 0
# go through each data group and append the rest of the blocks
for i, gp in enumerate(self.groups):
channels = gp.channels
for j, channel in enumerate(channels):
if channel.attachment is not None:
channel.attachment_addr = self.attachments[
channel.attachment
].address
elif channel.attachment_nr:
channel.attachment_addr = 0
address = channel.to_blocks(
address, blocks, defined_texts, cc_map, si_map
)
if channel.channel_type == v4c.CHANNEL_TYPE_SYNC:
if channel.attachment is not None:
channel.data_block_addr = self.attachments[
channel.attachment
].address
else:
sdata, with_bounds = self._load_signal_data(group=gp, index=j)
if sdata:
split_size = self._write_fragment_size
if self._write_fragment_size:
chunks = float(len(sdata)) / split_size
chunks = int(ceil(chunks))
else:
chunks = 1
if chunks == 1:
if compression and self.version > "4.00":
signal_data = DataZippedBlock(
data=sdata,
zip_type=v4c.FLAG_DZ_DEFLATE,
original_type=b"SD",
)
signal_data.address = address
address += signal_data.block_len
blocks.append(signal_data)
align = signal_data.block_len % 8
if align:
blocks.append(b"\0" * (8 - align))
address += 8 - align
else:
signal_data = DataBlock(data=sdata, type="SD")
signal_data.address = address
address += signal_data.block_len
blocks.append(signal_data)
align = signal_data.block_len % 8
if align:
blocks.append(b"\0" * (8 - align))
address += 8 - align
channel.data_block_addr = signal_data.address
else:
kwargs = {
"flags": v4c.FLAG_DL_EQUAL_LENGHT,
"links_nr": chunks + 1,
"data_block_nr": chunks,
"data_block_len": self._write_fragment_size,
}
dl_block = DataList(**kwargs)
for k in range(chunks):
data_ = sdata[k * split_size : (k + 1) * split_size]
if compression and self.version > "4.00":
zip_type = v4c.FLAG_DZ_DEFLATE
param = 0
kwargs = {
"data": data_,
"zip_type": zip_type,
"param": param,
"original_type": b"SD",
}
block = DataZippedBlock(**kwargs)
else:
block = DataBlock(data=data_, type="SD")
blocks.append(block)
block.address = address
address += block.block_len
align = block.block_len % 8
if align:
blocks.append(b"\0" * (8 - align))
address += 8 - align
dl_block[f"data_block_addr{k}"] = block.address
dl_block.address = address
blocks.append(dl_block)
address += dl_block.block_len
if compression and self.version > "4.00":
kwargs = {
"flags": v4c.FLAG_DL_EQUAL_LENGHT,
"zip_type": v4c.FLAG_DZ_DEFLATE,
"first_dl_addr": dl_block.address,
}
hl_block = HeaderList(**kwargs)
hl_block.address = address
address += hl_block.block_len
blocks.append(hl_block)
channel.data_block_addr = hl_block.address
else:
channel.data_block_addr = dl_block.address
else:
channel.data_block_addr = 0
dep_list = gp.channel_dependencies[j]
if dep_list:
if all(isinstance(dep, ChannelArrayBlock) for dep in dep_list):
for dep in dep_list:
dep.address = address
address += dep.block_len
blocks.append(dep)
for k, dep in enumerate(dep_list[:-1]):
dep.composition_addr = dep_list[k + 1].address
dep_list[-1].composition_addr = 0
channel.component_addr = dep_list[0].address
else:
index = dep_list[0][1]
addr_ = gp.channels[index].address
group_channels = gp.channels
if group_channels:
for j, channel in enumerate(group_channels[:-1]):
channel.next_ch_addr = group_channels[j + 1].address
group_channels[-1].next_ch_addr = 0
# channel dependecies
j = len(channels) - 1
while j >= 0:
dep_list = gp.channel_dependencies[j]
if dep_list and all(isinstance(dep, tuple) for dep in dep_list):
index = dep_list[0][1]
channels[j].component_addr = channels[index].address
index = dep_list[-1][1]
channels[j].next_ch_addr = channels[index].next_ch_addr
channels[index].next_ch_addr = 0
for _, ch_nr in dep_list:
channels[ch_nr].source_addr = 0
j -= 1
# channel group
if gp.channel_group.flags & v4c.FLAG_CG_VLSD:
continue
gp.channel_group.first_sample_reduction_addr = 0
if channels:
gp.channel_group.first_ch_addr = gp.channels[0].address
else:
gp.channel_group.first_ch_addr = 0
gp.channel_group.next_cg_addr = 0
address = gp.channel_group.to_blocks(
address, blocks, defined_texts, si_map
)
gp.data_group.first_cg_addr = gp.channel_group.address
cg_map[i] = gp.channel_group.address
if self._callback:
self._callback(int(50 * (i + 1) / groups_nr) + 25, 100)
if self._terminate:
dst_.close()
self.close()
return
for gp in self.groups:
for dep_list in gp.channel_dependencies:
if dep_list:
if all(isinstance(dep, ChannelArrayBlock) for dep in dep_list):
for dep in dep_list:
for i, (gp_nr, ch_nr) in enumerate(
dep.dynamic_size_channels
):
grp = self.groups[gp_nr]
ch = grp.channels[ch_nr]
dep[
f"dynamic_size_{i}_dg_addr"
] = grp.data_group.address
dep[
f"dynamic_size_{i}_cg_addr"
] = grp.channel_group.address
dep[f"dynamic_size_{i}_ch_addr"] = ch.address
for i, (gp_nr, ch_nr) in enumerate(
dep.input_quantity_channels
):
grp = self.groups[gp_nr]
ch = grp.channels[ch_nr]
dep[
f"input_quantity_{i}_dg_addr"
] = grp.data_group.address
dep[
f"input_quantity_{i}_cg_addr"
] = grp.channel_group.address
dep[f"input_quantity_{i}_ch_addr"] = ch.address
for i, conversion in enumerate(dep.axis_conversions):
if conversion:
address = conversion.to_blocks(
address, blocks, defined_texts, cc_map
)
dep[f"axis_conversion_{i}"] = conversion.address
else:
dep[f"axis_conversion_{i}"] = 0
if dep.output_quantity_channel:
gp_nr, ch_nr = dep.output_quantity_channel
grp = self.groups[gp_nr]
ch = grp.channels[ch_nr]
dep[
f"output_quantity_dg_addr"
] = grp.data_group.address
dep[
f"output_quantity_cg_addr"
] = grp.channel_group.address
dep[f"output_quantity_ch_addr"] = ch.address
if dep.comparison_quantity_channel:
gp_nr, ch_nr = dep.comparison_quantity_channel
grp = self.groups[gp_nr]
ch = grp.channels[ch_nr]
dep[
f"comparison_quantity_dg_addr"
] = grp.data_group.address
dep[
f"comparison_quantity_cg_addr"
] = grp.channel_group.address
dep[f"comparison_quantity_ch_addr"] = ch.address
for i, (gp_nr, ch_nr) in enumerate(dep.axis_channels):
grp = self.groups[gp_nr]
ch = grp.channels[ch_nr]
dep[
f"scale_axis_{i}_dg_addr"
] = grp.data_group.address
dep[
f"scale_axis_{i}_cg_addr"
] = grp.channel_group.address
dep[f"scale_axis_{i}_ch_addr"] = ch.address
position = tell()
for gp in self.groups:
gp.data_group.record_id_len = 0
cg_master_index = gp.channel_group.cg_master_index
if cg_master_index is not None:
gp.channel_group.cg_master_addr = cg_map[cg_master_index]
seek(gp.channel_group.address)
write(bytes(gp.channel_group))
seek(position)
ev_map = []
if self.events:
for event in self.events:
for i, ref in enumerate(event.scopes):
try:
dg_cntr, ch_cntr = ref
event[f"scope_{i}_addr"] = (
self.groups[dg_cntr].channels[ch_cntr].address
)
except TypeError:
dg_cntr = ref
event[f"scope_{i}_addr"] = self.groups[
dg_cntr
].channel_group.address
blocks.append(event)
ev_map.append(address)
event.address = address
address += event.block_len
if event.name:
tx_block = TextBlock(text=event.name)
tx_block.address = address
blocks.append(tx_block)
address += tx_block.block_len
event.name_addr = tx_block.address
else:
event.name_addr = 0
if event.comment:
meta = event.comment.startswith("<EVcomment")
tx_block = TextBlock(text=event.comment, meta=meta)
tx_block.address = address
blocks.append(tx_block)
address += tx_block.block_len
event.comment_addr = tx_block.address
else:
event.comment_addr = 0
if event.parent is not None:
event.parent_ev_addr = ev_map[event.parent]
if event.range_start is not None:
event.range_start_ev_addr = ev_map[event.range_start]
for i in range(len(self.events) - 1):
self.events[i].next_ev_addr = self.events[i + 1].address
self.events[-1].next_ev_addr = 0
self.header.first_event_addr = self.events[0].address
if self._terminate:
dst_.close()
self.close()
return
# attachments
at_map = {}
if self.attachments:
# put the attachment texts before the attachments
for at_block in self.attachments:
for text in (at_block.file_name, at_block.mime, at_block.comment):
if text not in defined_texts:
tx_block = TextBlock(text=str(text))
defined_texts[text] = address
tx_block.address = address
address += tx_block.block_len
blocks.append(tx_block)
for at_block in self.attachments:
address = at_block.to_blocks(address, blocks, defined_texts)
for i in range(len(self.attachments) - 1):
at_block = self.attachments[i]
at_block.next_at_addr = self.attachments[i + 1].address
self.attachments[-1].next_at_addr = 0
if self.events:
for event in self.events:
for i in range(event.attachment_nr):
key = f"attachment_{i}_addr"
addr = event[key]
event[key] = at_map[addr]
for i, gp in enumerate(self.groups):
for j, channel in enumerate(gp.channels):
if channel.attachment is not None:
channel.attachment_addr = self.attachments[
channel.attachment
].address
elif channel.attachment_nr:
channel.attachment_addr = 0
if channel.channel_type == v4c.CHANNEL_TYPE_SYNC and channel.attachment is not None:
channel.data_block_addr = self.attachments[
channel.attachment
].address
if self._callback:
blocks_nr = len(blocks)
threshold = blocks_nr / 25
count = 1
for i, block in enumerate(blocks):
write(bytes(block))
if i >= threshold:
self._callback(75 + count, 100)
count += 1
threshold += blocks_nr / 25
else:
for block in blocks:
write(bytes(block))
for gp, rec_id in zip(self.groups, gp_rec_ids):
gp.data_group.record_id_len = rec_id
if valid_data_groups:
addr_ = valid_data_groups[0].address
self.header.first_dg_addr = addr_
else:
self.header.first_dg_addr = 0
self.header.file_history_addr = self.file_history[0].address
if self.attachments:
first_attachment = self.attachments[0]
addr_ = first_attachment.address
self.header.first_attachment_addr = addr_
else:
self.header.first_attachment_addr = 0
seek(v4c.IDENTIFICATION_BLOCK_SIZE)
write(bytes(self.header))
for orig_addr, gp in zip(original_data_addresses, self.groups):
gp.data_group.data_block_addr = orig_addr
at_map = {value: key for key, value in at_map.items()}
for event in self.events:
for i in range(event.attachment_nr):
key = f"attachment_{i}_addr"
addr = event[key]
event[key] = at_map[addr]
except:
if not file_like:
dst_.close()
raise
else:
if not file_like:
dst_.close()
if dst == self.name:
self.close()
try:
Path.unlink(self.name)
Path.rename(destination, self.name)
except:
pass
self.groups.clear()
self.header = None
self.identification = None
self.file_history.clear()
self.channels_db.clear()
self.masters_db.clear()
self.attachments.clear()
self.file_comment = None
self._ch_map.clear()
self._tempfile = TemporaryFile()
self._file = open(self.name, "rb")
self._read()
if self._callback:
self._callback(100, 100)
if self.name == Path("__new__.mf4"):
self.name = dst
return dst
def get_channel_name(self, group, index):
"""Gets channel name.
Parameters
----------
group : int
0-based group index
index : int
0-based channel index
Returns
-------
name : str
found channel name
"""
gp_nr, ch_nr = self._validate_channel_selection(None, group, index)
return self.groups[gp_nr].channels[ch_nr].name
def get_channel_metadata(self, name=None, group=None, index=None):
gp_nr, ch_nr = self._validate_channel_selection(name, group, index)
grp = self.groups[gp_nr]
channel = grp.channels[ch_nr]
return channel
def get_channel_unit(self, name=None, group=None, index=None):
"""Gets channel unit.
Channel can be specified in two ways:
* using the first positional argument *name*
* if there are multiple occurrences for this channel then the
*group* and *index* arguments can be used to select a specific
group.
* if there are multiple occurrences for this channel and either the
*group* or *index* arguments is None then a warning is issued
* using the group number (keyword argument *group*) and the channel
number (keyword argument *index*). Use *info* method for group and
channel numbers
If the *raster* keyword argument is not *None* the output is
interpolated accordingly.
Parameters
----------
name : string
name of channel
group : int
0-based group index
index : int
0-based channel index
Returns
-------
unit : str
found channel unit
"""
gp_nr, ch_nr = self._validate_channel_selection(name, group, index)
grp = self.groups[gp_nr]
channel = grp.channels[ch_nr]
conversion = channel.conversion
unit = conversion and conversion.unit or channel.unit or ""
return unit
def get_channel_comment(self, name=None, group=None, index=None):
"""Gets channel comment.
Channel can be specified in two ways:
* using the first positional argument *name*
* if there are multiple occurrences for this channel then the
*group* and *index* arguments can be used to select a specific
group.
* if there are multiple occurrences for this channel and either the
*group* or *index* arguments is None then a warning is issued
* using the group number (keyword argument *group*) and the channel
number (keyword argument *index*). Use *info* method for group and
channel numbers
If the *raster* keyword argument is not *None* the output is
interpolated accordingly.
Parameters
----------
name : string
name of channel
group : int
0-based group index
index : int
0-based channel index
Returns
-------
comment : str
found channel comment
"""
gp_nr, ch_nr = self._validate_channel_selection(name, group, index)
grp = self.groups[gp_nr]
channel = grp.channels[ch_nr]
return extract_cncomment_xml(channel.comment)
def _finalize(self):
"""
Attempt finalization of the file.
:return: None
"""
flags = self.identification.unfinalized_standard_flags
stream = self._file
blocks, addresses = all_blocks_addresses(stream)
stream.seek(0, 2)
limit = stream.tell()
mapped = self._mapped
if flags & v4c.FLAG_UNFIN_UPDATE_LAST_DL:
for dg_addr in blocks[b"##DG"]:
group = DataGroup(address=dg_addr, stream=stream, mapped=mapped)
data_addr = group.data_block_addr
if not data_addr:
continue
stream.seek(data_addr)
blk_id = stream.read(4)
if blk_id == b"##DT":
continue
elif blk_id == b"##DL":
while True:
dl = DataList(address=data_addr, stream=stream, mapped=mapped)
if not dl.next_dl_addr:
break
count = dl.links_nr - 1
valid_count = 0
for i in range(count):
dt_addr = dl[f"data_block_addr{i}"]
if dt_addr:
valid_count += 1
else:
break
kwargs = {
f"data_block_addr{i}": dl[f"data_block_addr{i}"]
for i in range(valid_count)
}
kwargs["links_nr"] = valid_count + 1
kwargs["flags"] = dl.flags
if self.flags & v4c.FLAG_DL_EQUAL_LENGHT:
kwargs["data_block_len"] = dl.data_block_len
else:
for i in enumerate(valid_count):
kwargs[f"offset_{i}"] = dl[f"offset_{i}"]
stream.seek(data_addr)
stream.write(bytes(DataList(**kwargs)))
elif blk_id == b"##HL":
hl = HeaderList(address=data_addr, stream=stream, mapped=mapped)
data_addr = hl.first_dl_addr
while True:
dl = DataList(address=data_addr, stream=stream, mapped=mapped)
if not dl.next_dl_addr:
break
count = dl.links_nr - 1
valid_count = 0
for i in range(count):
dt_addr = dl[f"data_block_addr{i}"]
if dt_addr:
valid_count += 1
else:
break
kwargs = {
f"data_block_addr{i}": dl[f"data_block_addr{i}"]
for i in range(valid_count)
}
kwargs["links_nr"] = valid_count + 1
kwargs["flags"] = dl.flags
if self.flags & v4c.FLAG_DL_EQUAL_LENGHT:
kwargs["data_block_len"] = dl.data_block_len
else:
for i in enumerate(valid_count):
kwargs[f"offset_{i}"] = dl[f"offset_{i}"]
stream.seek(data_addr)
stream.write(bytes(DataList(**kwargs)))
self.identification[
"unfinalized_standard_flags"
] -= v4c.FLAG_UNFIN_UPDATE_LAST_DL
if flags & v4c.FLAG_UNFIN_UPDATE_LAST_DT_LENGTH:
try:
for dg_addr in blocks[b"##DG"]:
group = DataGroup(address=dg_addr, stream=stream, mapped=mapped)
data_addr = group.data_block_addr
if not data_addr:
continue
stream.seek(data_addr)
blk_id = stream.read(4)
if blk_id == b"##DT":
blk = DataBlock(address=data_addr, stream=stream, mapped=mapped)
elif blk_id == b"##DL":
while True:
dl = DataList(
address=data_addr, stream=stream, mapped=mapped
)
if not dl.next_dl_addr:
break
data_addr = dl[f"data_block_addr{dl.links_nr - 2}"]
blk = DataBlock(address=data_addr, stream=stream, mapped=mapped)
elif blk_id == b"##HL":
hl = HeaderList(address=data_addr, stream=stream, mapped=mapped)
data_addr = hl.first_dl_addr
while True:
dl = DataList(
address=data_addr, stream=stream, mapped=mapped
)
if not dl.next_dl_addr:
break
data_addr = dl[f"data_block_addr{dl.links_nr - 2}"]
blk = DataBlock(address=data_addr, stream=stream, mapped=mapped)
next_block = bisect.bisect_right(addresses, data_addr)
if next_block == len(addresses):
block_len = limit - data_addr
else:
block_len = addresses[next_block] - data_addr
blk.block_len = block_len
stream.seek(data_addr)
stream.write(bytes(blk))
except:
print(format_exc())
raise
self.identification.unfinalized_standard_flags -= (
v4c.FLAG_UNFIN_UPDATE_LAST_DT_LENGTH
)
self.identification.file_identification = b"MDF "
def _sort(self):
if self._file is None:
return
flags = self.identification["unfinalized_standard_flags"]
common = defaultdict(list)
for i, group in enumerate(self.groups):
if group.sorted:
continue
if group.data_blocks:
address = group.data_blocks[0].address
common[address].append((i, group.channel_group.record_id))
read = self._file.read
seek = self._file.seek
self._tempfile.seek(0, 2)
tell = self._tempfile.tell
write = self._tempfile.write
for address, groups in common.items():
cg_map = {
rec_id: self.groups[index_].channel_group for index_, rec_id in groups
}
final_records = {id_: [] for (_, id_) in groups}
for rec_id, channel_group in cg_map.items():
if channel_group.address in self._cn_data_map:
dg_cntr, ch_cntr = self._cn_data_map[channel_group.address]
self.groups[dg_cntr].signal_data[ch_cntr] = []
group = self.groups[groups[0][0]]
record_id_nr = group.data_group.record_id_len
cg_size = group.record_size
if record_id_nr == 1:
_unpack_stuct = UINT8_uf
elif record_id_nr == 2:
_unpack_stuct = UINT16_uf
elif record_id_nr == 4:
_unpack_stuct = UINT32_uf
elif record_id_nr == 8:
_unpack_stuct = UINT64_uf
else:
message = f"invalid record id size {record_id_nr}"
raise MdfException(message)
rem = b""
for info in group.data_blocks:
dtblock_address, dtblock_raw_size, dtblock_size, block_type, param = (
info.address,
info.raw_size,
info.size,
info.block_type,
info.param,
)
if block_type != v4c.DT_BLOCK:
partial_records = {id_: [] for _, id_ in groups}
new_data = read(dtblock_size)
if block_type == v4c.DZ_BLOCK_DEFLATE:
new_data = decompress(new_data, 0, dtblock_raw_size)
elif block_type == v4c.DZ_BLOCK_TRANSPOSED:
new_data = decompress(new_data, 0, dtblock_raw_size)
cols = param
lines = dtblock_raw_size // cols
nd = fromstring(new_data[: lines * cols], dtype=uint8)
nd = nd.reshape((cols, lines))
new_data = nd.T.tobytes() + new_data[lines * cols :]
new_data = rem + new_data
try:
rem = sort_data_block(
new_data,
partial_records,
cg_size,
record_id_nr,
_unpack_stuct,
)
except:
print(format_exc())
raise
for rec_id, new_data in partial_records.items():
channel_group = cg_map[rec_id]
if channel_group.address in self._cn_data_map:
dg_cntr, ch_cntr = self._cn_data_map[channel_group.address]
else:
dg_cntr, ch_cntr = None, None
if new_data:
tempfile_address = tell()
dtblock_temp_size = write(b"".join(new_data))
if dg_cntr is not None:
offsets, size = get_vlsd_offsets(new_data)
if dtblock_temp_size:
info = SignalDataBlockInfo(
address=tempfile_address,
size=dtblock_temp_size,
count=len(offsets),
offsets=offsets,
)
self.groups[dg_cntr].signal_data[ch_cntr].append(
info
)
else:
if dtblock_temp_size:
block_info = DataBlockInfo(
address=tempfile_address,
block_type=v4c.DT_BLOCK,
raw_size=dtblock_temp_size,
size=dtblock_temp_size,
param=0,
)
final_records[rec_id].append(block_info)
dtblock_temp_size = 0
else: # DTBLOCK
seek(dtblock_address)
limit = 32 * 1024 * 1024 # 32MB
while dtblock_size:
if dtblock_size > limit:
dtblock_size -= limit
new_data = rem + read(limit)
else:
new_data = rem + read(dtblock_size)
dtblock_size = 0
partial_records = {id_: [] for _, id_ in groups}
rem = sort_data_block(
new_data,
partial_records,
cg_size,
record_id_nr,
_unpack_stuct,
)
for rec_id, new_data in partial_records.items():
channel_group = cg_map[rec_id]
if channel_group.address in self._cn_data_map:
dg_cntr, ch_cntr = self._cn_data_map[
channel_group.address
]
else:
dg_cntr, ch_cntr = None, None
if new_data:
if dg_cntr is not None:
tempfile_address = tell()
size = write(b"".join(new_data))
offsets, size = get_vlsd_offsets(new_data)
if size:
info = SignalDataBlockInfo(
address=tempfile_address,
size=size,
count=len(offsets),
offsets=offsets,
)
self.groups[dg_cntr].signal_data[
ch_cntr
].append(info)
else:
if dtblock_raw_size:
tempfile_address = tell()
new_data = b"".join(new_data)
raw_size = len(new_data)
new_data = lz_compress(new_data)
compressed_size = write(new_data)
block_info = DataBlockInfo(
address=tempfile_address,
block_type=v4c.DZ_BLOCK_LZ,
raw_size=raw_size,
size=compressed_size,
param=None,
)
final_records[rec_id].append(block_info)
raw_size = 0
# after we read all DTBLOCKs in the original file,
# we assign freshly created blocks from temporary file to
# corresponding groups.
for idx, rec_id in groups:
group = self.groups[idx]
group.data_location = v4c.LOCATION_TEMPORARY_FILE
group.set_blocks_info(final_records[rec_id])
group.sorted = True
for i, group in enumerate(self.groups):
if flags & v4c.FLAG_UNFIN_UPDATE_CG_COUNTER:
channel_group = group.channel_group
if channel_group.flags & v4c.FLAG_CG_VLSD:
continue
if (
self.version >= "4.20"
and channel_group.flags & v4c.FLAG_CG_REMOTE_MASTER
):
index = channel_group.cg_master_index
else:
index = i
if group.uses_ld:
samples_size = channel_group.samples_byte_nr
else:
samples_size = (
channel_group.samples_byte_nr
+ channel_group.invalidation_bytes_nr
)
total_size = sum(blk.raw_size for blk in group.data_blocks)
cycles_nr = total_size // samples_size
virtual_channel_group = self.virtual_groups[index]
virtual_channel_group.cycles_nr = cycles_nr
channel_group.cycles_nr = cycles_nr
if (
self.identification["unfinalized_standard_flags"]
& v4c.FLAG_UNFIN_UPDATE_CG_COUNTER
):
self.identification[
"unfinalized_standard_flags"
] -= v4c.FLAG_UNFIN_UPDATE_CG_COUNTER
if (
self.identification["unfinalized_standard_flags"]
& v4c.FLAG_UNFIN_UPDATE_VLSD_BYTES
):
self.identification[
"unfinalized_standard_flags"
] -= v4c.FLAG_UNFIN_UPDATE_VLSD_BYTES
def _process_bus_logging(self):
groups_count = len(self.groups)
for index in range(groups_count):
group = self.groups[index]
if group.channel_group.flags & v4c.FLAG_CG_BUS_EVENT:
source = group.channel_group.acq_source
if (
source
and source.bus_type in (v4c.BUS_TYPE_CAN, v4c.BUS_TYPE_OTHER)
and "CAN_DataFrame" in [ch.name for ch in group.channels]
):
self._process_can_logging(index, group)
if (
source
and source.bus_type in (v4c.BUS_TYPE_LIN, v4c.BUS_TYPE_OTHER)
and "LIN_Frame" in [ch.name for ch in group.channels]
):
self._process_lin_logging(index, group)
def _process_can_logging(self, group_index, grp):
channels = grp.channels
group = grp
dbc = None
for i, channel in enumerate(channels):
if channel.name == "CAN_DataFrame":
attachment_addr = channel.attachment
if attachment_addr is not None:
if attachment_addr not in self._dbc_cache:
attachment, at_name, md5_sum = self.extract_attachment(
index=attachment_addr,
decryption_function=self._decryption_function,
)
if at_name.suffix.lower() not in (".arxml", ".dbc"):
message = f'Expected .dbc or .arxml file as CAN channel attachment but got "{at_name}"'
logger.warning(message)
elif not attachment:
message = f'Attachment "{at_name}" not found'
logger.warning(message)
else:
dbc = load_can_database(at_name, contents=attachment)
if dbc:
self._dbc_cache[attachment_addr] = dbc
else:
dbc = self._dbc_cache[attachment_addr]
break
if dbc is None:
parents, dtypes = self._prepare_record(group)
data = self._load_data(group, optimize_read=False)
for fragment_index, fragment in enumerate(data):
if dtypes.itemsize:
group.record = fromstring(fragment[0], dtype=dtypes)
else:
group.record = None
return
self._set_temporary_master(None)
self._set_temporary_master(self.get_master(group_index, data=fragment))
bus_ids = self.get(
"CAN_DataFrame.BusChannel",
group=group_index,
data=fragment,
samples_only=True,
)[0].astype("<u1")
msg_ids = (
self.get(
"CAN_DataFrame.ID",
group=group_index,
data=fragment,
samples_only=True,
)[0].astype("<u4")
& 0x1FFFFFFF
)
if len(bus_ids) == 0:
continue
buses = unique(bus_ids)
for bus in buses:
bus_msg_ids = msg_ids[bus_ids == bus]
unique_ids = sorted(unique(bus_msg_ids).astype("<u8"))
bus_map = self.bus_logging_map["CAN"].setdefault(bus, {})
for msg_id in unique_ids:
bus_map[int(msg_id)] = group_index
self._set_temporary_master(None)
group.record = None
else:
is_j1939 = dbc.contains_j1939
if is_j1939:
messages = {message.arbitration_id.pgn: message for message in dbc}
else:
messages = {message.arbitration_id.id: message for message in dbc}
msg_map = {}
parents, dtypes = self._prepare_record(group)
data = self._load_data(group, optimize_read=False)
for fragment_index, fragment in enumerate(data):
if dtypes.itemsize:
group.record = fromstring(fragment[0], dtype=dtypes)
else:
group.record = None
return
self._set_temporary_master(None)
self._set_temporary_master(self.get_master(group_index, data=fragment))
bus_ids = self.get(
"CAN_DataFrame.BusChannel",
group=group_index,
data=fragment,
samples_only=True,
)[0].astype("<u1")
msg_ids = (
self.get(
"CAN_DataFrame.ID", group=group_index, data=fragment
).astype("<u4")
& 0x1FFFFFFF
)
if is_j1939:
ps = (msg_ids.samples >> 8) & 0xFF
pf = (msg_ids.samples >> 16) & 0xFF
_pgn = pf << 8
msg_ids.samples = where(pf >= 240, _pgn + ps, _pgn)
data_bytes = self.get(
"CAN_DataFrame.DataBytes",
group=group_index,
data=fragment,
samples_only=True,
)[0]
buses = unique(bus_ids)
if len(bus_ids) == 0:
continue
for bus in buses:
idx_ = bus_ids == bus
bus_msg_ids = msg_ids.samples[idx_]
bus_t = msg_ids.timestamps[idx_]
bus_data_bytes = data_bytes[idx_]
unique_ids = sorted(unique(bus_msg_ids).astype("<u8"))
bus_map = self.bus_logging_map["CAN"].setdefault(bus, {})
for msg_id in unique_ids:
bus_map[int(msg_id)] = group_index
for msg_id in unique_ids:
message = messages.get(msg_id, None)
if message is None:
continue
idx = bus_msg_ids == msg_id
payload = bus_data_bytes[idx]
t = bus_t[idx]
extracted_signals = extract_mux(
payload, message, msg_id, bus, t
)
for entry, signals in extracted_signals.items():
if len(next(iter(signals.values()))["samples"]) == 0:
continue
if entry not in msg_map:
sigs = []
for name_, signal in signals.items():
sig = Signal(
samples=signal["samples"],
timestamps=signal["t"],
name=signal["name"],
comment=signal["comment"],
unit=signal["unit"],
invalidation_bits=signal["invalidation_bits"],
display_name=f"CAN{bus}.{message.name}.{signal['name']}",
)
sigs.append(sig)
cg_nr = self.append(
sigs,
acq_name=f"from CAN{bus} message ID=0x{msg_id:X}",
comment=f"{message} 0x{msg_id:X}",
common_timebase=True,
)
msg_map[entry] = cg_nr
for ch_index, ch in enumerate(
self.groups[cg_nr].channels
):
if ch_index == 0:
continue
entry = cg_nr, ch_index
name_ = f"{message}.{ch.name}"
self.channels_db.add(name_, entry)
name_ = f"CAN{bus}.{message}.{ch.name}"
self.channels_db.add(name_, entry)
name_ = f"CAN_DataFrame_{msg_id}.{ch.name}"
self.channels_db.add(name_, entry)
name_ = f"CAN{bus}.CAN_DataFrame_{msg_id}.{ch.name}"
self.channels_db.add(name_, entry)
else:
index = msg_map[entry]
sigs = []
for name_, signal in signals.items():
sigs.append(
(signal["samples"], signal["invalidation_bits"])
)
t = signal["t"]
sigs.insert(0, (t, None))
self.extend(index, sigs)
self._set_temporary_master(None)
group.record = None
def _process_lin_logging(self, group_index, grp):
channels = grp.channels
group = grp
dbc = None
for i, channel in enumerate(channels):
if channel.name == "LIN_Frame":
attachment_addr = channel.attachment
if attachment_addr is not None:
if attachment_addr not in self._dbc_cache:
attachment, at_name, md5_sum = self.extract_attachment(
index=attachment_addr,
decryption_function=self._decryption_function,
)
if at_name.suffix.lower() not in (".arxml", ".dbc"):
message = f'Expected .dbc or .arxml file as LIN channel attachment but got "{at_name}"'
logger.warning(message)
elif not attachment:
message = f'Attachment "{at_name}" not found'
logger.warning(message)
else:
dbc = load_can_database(at_name, contents=attachment)
if dbc:
self._dbc_cache[attachment_addr] = dbc
else:
dbc = self._dbc_cache[attachment_addr]
break
if dbc is None:
parents, dtypes = self._prepare_record(group)
data = self._load_data(group, optimize_read=False)
for fragment_index, fragment in enumerate(data):
if dtypes.itemsize:
group.record = fromstring(fragment[0], dtype=dtypes)
else:
group.record = None
return
self._set_temporary_master(None)
self._set_temporary_master(self.get_master(group_index, data=fragment))
msg_ids = (
self.get(
"LIN_Frame.ID",
group=group_index,
data=fragment,
samples_only=True,
)[0].astype("<u4")
& 0x1FFFFFFF
)
unique_ids = sorted(unique(msg_ids).astype("<u8"))
lin_map = self.bus_logging_map["LIN"]
for msg_id in unique_ids:
lin_map[int(msg_id)] = group_index
self._set_temporary_master(None)
group.record = None
else:
messages = {message.arbitration_id.id: message for message in dbc}
msg_map = {}
parents, dtypes = self._prepare_record(group)
data = self._load_data(group, optimize_read=False)
for fragment_index, fragment in enumerate(data):
if dtypes.itemsize:
group.record = fromstring(fragment[0], dtype=dtypes)
else:
group.record = None
return
self._set_temporary_master(None)
self._set_temporary_master(self.get_master(group_index, data=fragment))
msg_ids = (
self.get("LIN_Frame.ID", group=group_index, data=fragment).astype(
"<u4"
)
& 0x1FFFFFFF
)
data_bytes = self.get(
"LIN_Frame.DataBytes",
group=group_index,
data=fragment,
samples_only=True,
)[0]
bus_msg_ids = msg_ids.samples
bus_t = msg_ids.timestamps
bus_data_bytes = data_bytes
unique_ids = sorted(unique(bus_msg_ids).astype("<u8"))
lin_map = self.bus_logging_map["LIN"]
for msg_id in unique_ids:
lin_map[int(msg_id)] = group_index
for msg_id in unique_ids:
message = messages.get(msg_id, None)
if message is None:
continue
idx = bus_msg_ids == msg_id
payload = bus_data_bytes[idx]
t = bus_t[idx]
extracted_signals = extract_mux(payload, message, msg_id, 0, t)
for entry, signals in extracted_signals.items():
if len(next(iter(signals.values()))["samples"]) == 0:
continue
if entry not in msg_map:
sigs = []
for name_, signal in signals.items():
sig = Signal(
samples=signal["samples"],
timestamps=signal["t"],
name=signal["name"],
comment=signal["comment"],
unit=signal["unit"],
invalidation_bits=signal["invalidation_bits"],
display_name=f"LIN.{message.name}.{signal['name']}",
)
sigs.append(sig)
cg_nr = self.append(
sigs,
acq_name=f"from LIN message ID=0x{msg_id:X}",
comment=f"{message} 0x{msg_id:X}",
common_timebase=True,
)
msg_map[entry] = cg_nr
for ch_index, ch in enumerate(self.groups[cg_nr].channels):
if ch_index == 0:
continue
entry = cg_nr, ch_index
name_ = f"{message}.{ch.name}"
self.channels_db.add(name_, entry)
name_ = f"LIN.{message}.{ch.name}"
self.channels_db.add(name_, entry)
name_ = f"LIN_Frame_{msg_id}.{ch.name}"
self.channels_db.add(name_, entry)
name_ = f"LIN.LIN_Frame_{msg_id}.{ch.name}"
self.channels_db.add(name_, entry)
else:
index = msg_map[entry]
sigs = []
for name_, signal in signals.items():
sigs.append(
(signal["samples"], signal["invalidation_bits"])
)
t = signal["t"]
sigs.insert(0, (t, None))
self.extend(index, sigs)
self._set_temporary_master(None)
group.record = None
|
lgpl-3.0
|
spmjc/plugin.video.freplay
|
resources/lib/channels/allocine2.py
|
2
|
5177
|
#-*- coding: utf-8 -*-
import urllib2
import re
import CommonFunctions
common = CommonFunctions
from resources.lib import utils
from resources.lib import globalvar
title=['Allocine']
img=['allocine']
readyForUse=True
def list_shows(channel,folder):
shows=[]
if folder=='none' :
shows.append( [channel,'ba', 'Bandes Annonces'.encode('utf-8') , '','folder'] )
shows.append( [channel,'158001|1', 'Webseries'.encode('utf-8') , '','folder'] )
shows.append( [channel,'158002|1', 'Mangas'.encode('utf-8') , '','folder'] )
shows.append( [channel,'158003|1', 'Parodies'.encode('utf-8') , '','folder'] )
shows.append( [channel,'158004|1', 'Emissions dActu'.encode('utf-8') , '','folder'] )
shows.append( [channel,'158005|1', 'Emissions Bonus'.encode('utf-8') , '','folder'] )
shows.append( [channel,'158006|1', 'Stars'.encode('utf-8') , '','folder'] )
else:
if folder=='ba':
shows.append( [channel,'video/bandes-annonces/|1', 'A ne pas manquer'.encode('utf-8') , '','shows'] )
shows.append( [channel,'/bandes-annonces/plus-recentes/|1', 'Les plus recentes'.encode('utf-8') , '','shows'] )
shows.append( [channel,'/bandes-annonces/prochainement/|1', 'Bientot au cinema'.encode('utf-8') , '','shows'] )
else:
if 'programme' in folder:
filePath=utils.downloadCatalog('http://www.allocine.fr/' + folder ,'allocine' + folder.replace('\\','') +'.html',False,{})
html=open(filePath).read().replace('\xe9', 'e').replace('\xe0', 'a').replace('\n', ' ').replace('\r', '')
match = re.compile(r'<a class="button btn-primary btn-large" href="(.*?)">(.*?)</a>',re.DOTALL).findall(html)
for url,title in match:
shows.append( [channel,url + '|1', title.replace("<i class='icon-sign-plus'></i>","") ,'' ,'shows'] )
else:
cat,page=folder.split('|')
filePath=utils.downloadCatalog('http://www.allocine.fr/video/prgcat-' + cat + '/?page=' + page ,'allocine' + cat + '-' + page +'.html',False,{})
html=open(filePath).read().replace('\xe9', 'e').replace('\xe0', 'a').replace('\n', ' ').replace('\r', '')
match = re.compile(r'btn-primary btn-large (.*?)">(.*?)<i class="icon-arrow-(.*?)"></i>',re.DOTALL).findall(html)
prev=False
next=False
for status,empty,arrow in match:
if arrow=='left':
prev=('disabled' not in status)
if arrow=='right':
next=('disabled' not in status)
if prev:
shows.append( [channel,cat + '|' + str(int(page)-1), '<<Page Precedente' ,'' ,'folder'] )
match = re.compile(r'<h2 class="title "> <span > <a href="(.*?)">(.*?)</a> </span> </h2>',re.DOTALL).findall(html)
for url,title in match:
shows.append( [channel,url, title ,'' ,'folder'] )
if next :
shows.append( [channel,cat + '|' + str(int(page)+1), 'Page Suivante>>' ,'' ,'folder'] )
return shows
def list_videos(channel,folder):
cat,page=folder.split('|')
videos=[]
filePath=utils.downloadCatalog('http://www.allocine.fr/' + cat + '/?page=' + page ,'allocine' + cat + '-' + page +'.html',False,{})
html=open(filePath).read().replace('\xe9', 'e').replace('\xe0', 'a').replace('\n', ' ').replace('\r', '')
match = re.compile(r'btn-primary btn-large (.*?)">(.*?)<i class="icon-arrow-(.*?)"></i>',re.DOTALL).findall(html)
prev=False
next=False
for status,empty,arrow in match:
if arrow=='left':
prev=('disabled' not in status)
if arrow=='right':
next=('disabled' not in status)
if prev:
shows.append( [channel,cat + '-' + str(int(page)-1), '<<Page Precedente' ,'',{} ,'folder'] )
match = re.compile(r'<div class="layer-link-holder"><a href="/video/player_gen_cmedia=(.*?)&cfilm=(.*?).html" class="layer-link">(.*?)</a></div>',re.DOTALL).findall(html)
if match:
for id,movie,title in match:
title=title.replace('<strong>','').replace('</strong>','')
infoLabels={ "Title": title}
videos.append( [channel, id , title , '',infoLabels,'play'] )
match = re.compile(r'<h3 class="title "> <span > <a href="/video/video-(.*?)/" itemprop="url">(.*?)</a> </span> </h3>',re.DOTALL).findall(html)
if match:
for idVideo,title in match:
title=title.replace('<strong>','').replace('</strong>','')
infoLabels={ "Title": title}
videos.append( [channel, idVideo , title , '',infoLabels,'play'] )
if next :
shows.append( [channel,cat + '-' + str(int(page)+1), 'Page Suivante>>' ,'',{} ,'folder'] )
return videos
def getVideoURL(channel,idVideo):
filePath=utils.downloadCatalog('http://www.allocine.fr/ws/AcVisiondataV4.ashx?media=%s' % (idVideo),'allocine%s.xml' % (idVideo),False,{})
xml=open(filePath).read()
ld=re.findall('ld_path="(.*?)"', xml)[0]
md=re.findall('md_path="(.*?)"', xml)[0]
hd=re.findall('hd_path="(.*?)"', xml)[0]
return hd
|
gpl-2.0
|
kopchik/qtile
|
libqtile/widget/notify.py
|
10
|
4444
|
# Copyright (c) 2011 Florian Mounier
# Copyright (c) 2011 Mounier Florian
# Copyright (c) 2012 roger
# Copyright (c) 2012-2014 Tycho Andersen
# Copyright (c) 2012-2013 Craig Barnes
# Copyright (c) 2013 Tao Sauvage
# Copyright (c) 2014 Sean Vig
# Copyright (c) 2014 Adi Sieker
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -*- coding: utf-8 -*-
from . import base
from .. import bar, utils, pangocffi
from libqtile.notify import notifier
class Notify(base._TextBox):
"""
A notify widget.
"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
("foreground_urgent", "ff0000", "Foreground urgent priority colour"),
("foreground_low", "dddddd", "Foreground low priority colour"),
(
"default_timeout",
None,
"Default timeout (seconds) for notifications"
),
]
def __init__(self, width=bar.CALCULATED, **config):
base._TextBox.__init__(self, "", width, **config)
self.add_defaults(Notify.defaults)
notifier.register(self.update)
self.current_id = 0
def _configure(self, qtile, bar):
base._TextBox._configure(self, qtile, bar)
self.layout = self.drawer.textlayout(
self.text,
self.foreground,
self.font,
self.fontsize,
self.fontshadow,
markup=True
)
def set_notif_text(self, notif):
self.text = pangocffi.markup_escape_text(notif.summary)
urgency = notif.hints.get('urgency', 1)
if urgency != 1:
self.text = '<span color="%s">%s</span>' % (
utils.hex(
self.foreground_urgent if urgency == 2
else self.foreground_low
),
self.text
)
if notif.body:
self.text = '<span weight="bold">%s</span> - %s' % (
self.text, pangocffi.markup_escape_text(notif.body)
)
def update(self, notif):
self.qtile.call_soon_threadsafe(self.real_update, notif)
def real_update(self, notif):
self.set_notif_text(notif)
self.current_id = notif.id - 1
if notif.timeout and notif.timeout > 0:
self.timeout_add(notif.timeout / 1000, self.clear)
elif self.default_timeout:
self.timeout_add(self.default_timeout, self.clear)
self.bar.draw()
return True
def display(self):
self.set_notif_text(notifier.notifications[self.current_id])
self.bar.draw()
def clear(self):
self.text = ''
self.current_id = len(notifier.notifications) - 1
self.bar.draw()
def prev(self):
if self.current_id > 0:
self.current_id -= 1
self.display()
def next(self):
if self.current_id < len(notifier.notifications) - 1:
self.current_id += 1
self.display()
def button_press(self, x, y, button):
if button == 1:
self.clear()
elif button == 4:
self.prev()
elif button == 5:
self.next()
def cmd_display(self):
self.display()
def cmd_clear(self):
self.clear()
def cmd_toggle(self):
if self.text == '':
self.display()
else:
self.clear()
def cmd_prev(self):
self.prev()
def cmd_next(self):
self.next()
|
mit
|
alexdglover/shill-isms
|
venv/lib/python2.7/site-packages/pip/commands/__init__.py
|
344
|
2244
|
"""
Package containing all pip commands
"""
from __future__ import absolute_import
from pip.commands.completion import CompletionCommand
from pip.commands.download import DownloadCommand
from pip.commands.freeze import FreezeCommand
from pip.commands.hash import HashCommand
from pip.commands.help import HelpCommand
from pip.commands.list import ListCommand
from pip.commands.check import CheckCommand
from pip.commands.search import SearchCommand
from pip.commands.show import ShowCommand
from pip.commands.install import InstallCommand
from pip.commands.uninstall import UninstallCommand
from pip.commands.wheel import WheelCommand
commands_dict = {
CompletionCommand.name: CompletionCommand,
FreezeCommand.name: FreezeCommand,
HashCommand.name: HashCommand,
HelpCommand.name: HelpCommand,
SearchCommand.name: SearchCommand,
ShowCommand.name: ShowCommand,
InstallCommand.name: InstallCommand,
UninstallCommand.name: UninstallCommand,
DownloadCommand.name: DownloadCommand,
ListCommand.name: ListCommand,
CheckCommand.name: CheckCommand,
WheelCommand.name: WheelCommand,
}
commands_order = [
InstallCommand,
DownloadCommand,
UninstallCommand,
FreezeCommand,
ListCommand,
ShowCommand,
CheckCommand,
SearchCommand,
WheelCommand,
HashCommand,
CompletionCommand,
HelpCommand,
]
def get_summaries(ordered=True):
"""Yields sorted (command name, command summary) tuples."""
if ordered:
cmditems = _sort_commands(commands_dict, commands_order)
else:
cmditems = commands_dict.items()
for name, command_class in cmditems:
yield (name, command_class.summary)
def get_similar_commands(name):
"""Command name auto-correct."""
from difflib import get_close_matches
name = name.lower()
close_commands = get_close_matches(name, commands_dict.keys())
if close_commands:
return close_commands[0]
else:
return False
def _sort_commands(cmddict, order):
def keyfn(key):
try:
return order.index(key[1])
except ValueError:
# unordered items should come last
return 0xff
return sorted(cmddict.items(), key=keyfn)
|
mit
|
garaden/flask
|
flask/views.py
|
149
|
5644
|
# -*- coding: utf-8 -*-
"""
flask.views
~~~~~~~~~~~
This module provides class-based views inspired by the ones in Django.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from .globals import request
from ._compat import with_metaclass
http_method_funcs = frozenset(['get', 'post', 'head', 'options',
'delete', 'put', 'trace', 'patch'])
class View(object):
"""Alternative way to use view functions. A subclass has to implement
:meth:`dispatch_request` which is called with the view arguments from
the URL routing system. If :attr:`methods` is provided the methods
do not have to be passed to the :meth:`~flask.Flask.add_url_rule`
method explicitly::
class MyView(View):
methods = ['GET']
def dispatch_request(self, name):
return 'Hello %s!' % name
app.add_url_rule('/hello/<name>', view_func=MyView.as_view('myview'))
When you want to decorate a pluggable view you will have to either do that
when the view function is created (by wrapping the return value of
:meth:`as_view`) or you can use the :attr:`decorators` attribute::
class SecretView(View):
methods = ['GET']
decorators = [superuser_required]
def dispatch_request(self):
...
The decorators stored in the decorators list are applied one after another
when the view function is created. Note that you can *not* use the class
based decorators since those would decorate the view class and not the
generated view function!
"""
#: A for which methods this pluggable view can handle.
methods = None
#: The canonical way to decorate class-based views is to decorate the
#: return value of as_view(). However since this moves parts of the
#: logic from the class declaration to the place where it's hooked
#: into the routing system.
#:
#: You can place one or more decorators in this list and whenever the
#: view function is created the result is automatically decorated.
#:
#: .. versionadded:: 0.8
decorators = ()
def dispatch_request(self):
"""Subclasses have to override this method to implement the
actual view function code. This method is called with all
the arguments from the URL rule.
"""
raise NotImplementedError()
@classmethod
def as_view(cls, name, *class_args, **class_kwargs):
"""Converts the class into an actual view function that can be used
with the routing system. Internally this generates a function on the
fly which will instantiate the :class:`View` on each request and call
the :meth:`dispatch_request` method on it.
The arguments passed to :meth:`as_view` are forwarded to the
constructor of the class.
"""
def view(*args, **kwargs):
self = view.view_class(*class_args, **class_kwargs)
return self.dispatch_request(*args, **kwargs)
if cls.decorators:
view.__name__ = name
view.__module__ = cls.__module__
for decorator in cls.decorators:
view = decorator(view)
# We attach the view class to the view function for two reasons:
# first of all it allows us to easily figure out what class-based
# view this thing came from, secondly it's also used for instantiating
# the view class so you can actually replace it with something else
# for testing purposes and debugging.
view.view_class = cls
view.__name__ = name
view.__doc__ = cls.__doc__
view.__module__ = cls.__module__
view.methods = cls.methods
return view
class MethodViewType(type):
def __new__(cls, name, bases, d):
rv = type.__new__(cls, name, bases, d)
if 'methods' not in d:
methods = set(rv.methods or [])
for key in d:
if key in http_method_funcs:
methods.add(key.upper())
# If we have no method at all in there we don't want to
# add a method list. (This is for instance the case for
# the base class or another subclass of a base method view
# that does not introduce new methods).
if methods:
rv.methods = sorted(methods)
return rv
class MethodView(with_metaclass(MethodViewType, View)):
"""Like a regular class-based view but that dispatches requests to
particular methods. For instance if you implement a method called
:meth:`get` it means you will response to ``'GET'`` requests and
the :meth:`dispatch_request` implementation will automatically
forward your request to that. Also :attr:`options` is set for you
automatically::
class CounterAPI(MethodView):
def get(self):
return session.get('counter', 0)
def post(self):
session['counter'] = session.get('counter', 0) + 1
return 'OK'
app.add_url_rule('/counter', view_func=CounterAPI.as_view('counter'))
"""
def dispatch_request(self, *args, **kwargs):
meth = getattr(self, request.method.lower(), None)
# If the request method is HEAD and we don't have a handler for it
# retry with GET.
if meth is None and request.method == 'HEAD':
meth = getattr(self, 'get', None)
assert meth is not None, 'Unimplemented method %r' % request.method
return meth(*args, **kwargs)
|
bsd-3-clause
|
ff0000/red-fab-deploy
|
fab_deploy/joyent/postgres.py
|
1
|
5514
|
import os
import sys
import tempfile
from fabric.api import run, sudo, env, local, hide, settings
from fabric.contrib.files import append, sed, exists, contains
from fabric.context_managers import prefix
from fabric.operations import get, put
from fabric.context_managers import cd
from fabric.tasks import Task
from fab_deploy.functions import random_password
from fab_deploy.base import postgres as base_postgres
class JoyentMixin(object):
version_directory_join = ''
def _get_data_dir(self, db_version):
# Try to get from svc first
output = run('svcprop -p config/data postgresql')
if output.stdout and exists(output.stdout, use_sudo=True):
return output.stdout
return base_postgres.PostgresInstall._get_data_dir(self, db_version)
def _install_package(self, db_version):
sudo("pkg_add postgresql%s-server" %db_version)
sudo("pkg_add postgresql%s-replicationtools" %db_version)
sudo("svcadm enable postgresql")
def _restart_db_server(self, db_version):
sudo('svcadm restart postgresql')
def _stop_db_server(self, db_version):
sudo('svcadm disable postgresql')
def _start_db_server(self, db_version):
sudo('svcadm enable postgresql')
class PostgresInstall(JoyentMixin, base_postgres.PostgresInstall):
"""
Install postgresql on server
install postgresql package;
enable postgres access from localhost without password;
enable all other user access from other machines with password;
setup a few parameters related with streaming replication;
database server listen to all machines '*';
create a user for database with password.
"""
name = 'master_setup'
db_version = '9.1'
class SlaveSetup(JoyentMixin, base_postgres.SlaveSetup):
"""
Set up master-slave streaming replication: slave node
"""
name = 'slave_setup'
class PGBouncerInstall(Task):
"""
Set up PGBouncer on a database server
"""
name = 'setup_pgbouncer'
pgbouncer_src = 'http://pkgsrc.smartos.org/packages/SmartOS/2012Q2/databases/pgbouncer-1.4.2.tgz'
pkg_name = 'pgbouncer-1.4.2.tgz'
config_dir = '/etc/opt/pkg'
config = {
'*': 'host=127.0.0.1',
'logfile': '/var/log/pgbouncer/pgbouncer.log',
'listen_addr': '*',
'listen_port': '6432',
'unix_socket_dir': '/tmp',
'auth_type': 'md5',
'auth_file': '%s/pgbouncer.userlist' %config_dir,
'pool_mode': 'session',
'admin_users': 'postgres',
'stats_users': 'postgres',
}
def install_package(self):
sudo('pkg_add libevent')
with cd('/tmp'):
run('wget %s' %self.pgbouncer_src)
sudo('pkg_add %s' %self.pkg_name)
def _setup_parameter(self, file_name, **kwargs):
for key, value in kwargs.items():
origin = "%s =" %key
new = "%s = %s" %(key, value)
sudo('sed -i "/%s/ c\%s" %s' %(origin, new, file_name))
def _get_passwd(self, username):
with hide('output'):
string = run('echo "select usename, passwd from pg_shadow where '
'usename=\'%s\' order by 1" | sudo su postgres -c '
'"psql"' %username)
user, passwd = string.split('\n')[2].split('|')
user = user.strip()
passwd = passwd.strip()
__, tmp_name = tempfile.mkstemp()
fn = open(tmp_name, 'w')
fn.write('"%s" "%s" ""\n' %(user, passwd))
fn.close()
put(tmp_name, '%s/pgbouncer.userlist'%self.config_dir, use_sudo=True)
local('rm %s' %tmp_name)
def _get_username(self, section=None):
try:
names = env.config_object.get_list(section, env.config_object.USERNAME)
username = names[0]
except:
print ('You must first set up a database server on this machine, '
'and create a database user')
raise
return username
def run(self, section=None):
"""
"""
sudo('mkdir -p /opt/pkg/bin')
sudo("ln -sf /opt/local/bin/awk /opt/pkg/bin/nawk")
sudo("ln -sf /opt/local/bin/sed /opt/pkg/bin/nbsed")
self.install_package()
svc_method = os.path.join(env.configs_dir, 'pgbouncer.xml')
put(svc_method, self.config_dir, use_sudo=True)
home = run('bash -c "echo ~postgres"')
bounce_home = os.path.join(home, 'pgbouncer')
pidfile = os.path.join(bounce_home, 'pgbouncer.pid')
self._setup_parameter('%s/pgbouncer.ini' %self.config_dir,
pidfile=pidfile, **self.config)
if not section:
section = 'db-server'
username = self._get_username(section)
self._get_passwd(username)
# postgres should be the owner of these config files
sudo('chown -R postgres:postgres %s' %self.config_dir)
sudo('mkdir -p %s' % bounce_home)
sudo('chown postgres:postgres %s' % bounce_home)
sudo('mkdir -p /var/log/pgbouncer')
sudo('chown postgres:postgres /var/log/pgbouncer')
# set up log
sudo('logadm -C 3 -p1d -c -w /var/log/pgbouncer/pgbouncer.log -z 1')
run('svccfg import %s/pgbouncer.xml' %self.config_dir)
# start pgbouncer
sudo('svcadm enable pgbouncer')
setup = PostgresInstall()
slave_setup = SlaveSetup()
setup_pgbouncer = PGBouncerInstall()
|
mit
|
atlassian/boto
|
boto/ec2/launchspecification.py
|
170
|
3829
|
# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents a launch specification for Spot instances.
"""
from boto.ec2.ec2object import EC2Object
from boto.resultset import ResultSet
from boto.ec2.blockdevicemapping import BlockDeviceMapping
from boto.ec2.group import Group
from boto.ec2.instance import SubParse
class GroupList(list):
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'groupId':
self.append(value)
class LaunchSpecification(EC2Object):
def __init__(self, connection=None):
super(LaunchSpecification, self).__init__(connection)
self.key_name = None
self.instance_type = None
self.image_id = None
self.groups = []
self.placement = None
self.kernel = None
self.ramdisk = None
self.monitored = False
self.subnet_id = None
self._in_monitoring_element = False
self.block_device_mapping = None
self.instance_profile = None
self.ebs_optimized = False
def __repr__(self):
return 'LaunchSpecification(%s)' % self.image_id
def startElement(self, name, attrs, connection):
if name == 'groupSet':
self.groups = ResultSet([('item', Group)])
return self.groups
elif name == 'monitoring':
self._in_monitoring_element = True
elif name == 'blockDeviceMapping':
self.block_device_mapping = BlockDeviceMapping()
return self.block_device_mapping
elif name == 'iamInstanceProfile':
self.instance_profile = SubParse('iamInstanceProfile')
return self.instance_profile
else:
return None
def endElement(self, name, value, connection):
if name == 'imageId':
self.image_id = value
elif name == 'keyName':
self.key_name = value
elif name == 'instanceType':
self.instance_type = value
elif name == 'availabilityZone':
self.placement = value
elif name == 'placement':
pass
elif name == 'kernelId':
self.kernel = value
elif name == 'ramdiskId':
self.ramdisk = value
elif name == 'subnetId':
self.subnet_id = value
elif name == 'state':
if self._in_monitoring_element:
if value == 'enabled':
self.monitored = True
self._in_monitoring_element = False
elif name == 'ebsOptimized':
self.ebs_optimized = (value == 'true')
else:
setattr(self, name, value)
|
mit
|
sporkchops81/titleplant
|
lib/python3.5/site-packages/pip/_vendor/requests/packages/chardet/euckrprober.py
|
2931
|
1675
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCKRDistributionAnalysis
from .mbcssm import EUCKRSMModel
class EUCKRProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCKRSMModel)
self._mDistributionAnalyzer = EUCKRDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "EUC-KR"
|
gpl-3.0
|
dlab-projects/python-taq
|
marketflow/clean_dsenames.py
|
3
|
12852
|
import pandas as pd
class Permno_Map(object):
"""1. Reads in dsenames file from crsp
2. Subsets """
def __init__(self, dsefile='crsp/dsenames.csv'):
self.dsenames = pd.read_csv(dsefile)
# Once everything is working, perhaps make this automatic.
# For now, it's easier to debug with smaller functions run one at a
# time.
# self.process(dsenames)
self.nasdaq = dsenames.PRIMEXCH == "Q"
def get_permno(self, cd, root, date):
'''Get the permno for a given symbol root.
Remember, permno does not change with suffix.'''
that_permno = cd[cd.SYM_ROOT == root]
permno_on_date = that_permno.loc[(cd.BEGDATE <= date)
& (cd.ENDDATE >= date)
, 'PERMNO']
if len(permno_on_date > 0):
return permno_on_date
else:
raise NotImplementedError
def process(self, dsenames):
'''Run all processing steps in a reasonable order'''
dsenames = self.dse_subset(dsenames)
dsenames = self.dse_rootsplit(dsenames)
dsenames = self.drop_dups(dsenames)
self.clean_dsenames = dsenames
def dse_subset(self, dsenames, date=20100101, regular=True,
active=True, beneficial=False, when_issued=False):
'''Limit to our "good" set of securities.
Default settings include securites that are actively trading in normal
fashion on some exchange
date : int
Not really an int, but the naïve conversion from the datestring.
regular : bool
Limit to "regular" stocks. i.e. the security is past the "When-Issued"
stage and the company is not going through bankruptcy proceedings)
active : bool
Limit to entries for stocks that are actively trading
beneficial : bool
If =False, we exclude stocks that are "shares of beneficial interest",
which indicates that the stocks are not trading normally due to their
inclusion in some sort of trust.
when_issued : bool
If =False, we exclude when_issued shares, which have been approved for
trading but have not yet begun trading actively
'''
bad_permnos = [14209, 15141, 91845]
# Here, we index out any securities whose records don't make sense
dsenames = dsenames[~dsenames['PERMNO'].isin(bad_permnos)]
# By default, we only include securities that were trading at some
# point from January 2010 onwards
dsenames = dsenames[dsenames['NAMEENDT'] >= date]
if regular:
# SECSTAT == "R" indicates that the security is "Regular" (i.e. the
# security is past the "When-Issued" stage and the company is not
# going through bankruptcy proceedings)
dsenames = dsenames[dsenames['SECSTAT'] == "R"]
if active:
# TRDSTAT == "A" indicates that the security is actively trading
dsenames = dsenames[dsenames['TRDSTAT'] == "A"]
dsenames['SYMBOL_LEN'] = dsenames['TSYMBOL'].str.len()
dsenames['LAST'] = dsenames['TSYMBOL'].str[-1]
dsenames['LAST2'] = dsenames['TSYMBOL'].str[-2:]
# The 'S' suffix indicates the "Shares of Beneficial Interest, which do
# not trade like regular securities The 'V' and 'WI' suffixes indicate
# "When-Issued" shares, which have been authorized to trade, but have
# not actually begun trading
# XXX Maybe make this attribute defined at a "higher level"?
if beneficial == False:
ben = (dsenames['LAST'] == "S") & (dsenames['SYMBOL_LEN'] == 5)
dsename = dsenames[~ben]
if when_issued == False:
whenissued_nasdaq = ((dsenames['LAST'] == "V") &
(dsenames['SYMBOL_LEN'] == 5) & self.nasdaq)
whenissued_nonnasdaq = ((dsenames['LAST2'] == "WI") &
(dsenames['SYMBOL_LEN'] > 3) & ~self.nasdaq)
dsenames = dsenames[~(whenissued_nasdaq & whenissued_nonnasdaq)]
return dsenames
def dse_rootsplit(self, dsenames):
'''Splits the root and the suffix into two separate variables,
SYM_ROOT and SYM_SUFFIX and flags suffix extraction cases
FLAG index:
=0 : base case, symbol has no suffix
=1 : NASDAQ, share class
=2 : NASDAQ, foreign shares or voting/non-voting shares
=3 : NASDAQ, reverse stock split
=4 : non-NASDAQ, share class suffix
Includes manual adjustments for idiosyncratic securities, should
be re-evaluated from time to time
'''
# Flag = 0 is our base case (i.e. the ticker symbol has no suffix)
dsenames['FLAG'] = 0
# When the ticker has no suffix, the root is just the ticker symbol, and the
# suffix is an empty string
dsenames['SYM_ROOT'] = dsenames['TSYMBOL']
dsenames['SYM_SUFFIX'] = ""
dsenames['TICKER_LEN'] = dsenames['TICKER'].str.len()
class_equal_last = dsenames.SHRCLS == dsenames.LAST
# nasdaq_long true for NASDAQ securities with a ticker symbol
# longer than 4 characters. 4 is the maximum number of characters for
# a ticker symbol on the NASDAQ.
nasdaq_long = self.nasdaq & (dsenames.SYMBOL_LEN > 4)
# flag1 denotes securities with share class suffixes,
# e.g. a company on the NASDAQ that has Class A and Class B shares
flag1 = nasdaq_long & class_equal_last
# flag2 denotes two cases:
# - Suffixes Y and F denote shares in foreign companies
# - Suffixes J and K denote voting and non-voting shares, respectively
flag2 = ~flag1 & nasdaq_long & dsenames.LAST.isin(["Y", "J", "F", "K"])
# flag3 denotes stocks going through reverse stock split
# these securities keep this ticker symbol for ~3 weeks post-split
flag3 = ~flag1 & nasdaq_long & (dsenames.LAST == "D")
# flag4 denotes non-NASDAQ stocks w/ share class suffix
flag4 = ~self.nasdaq & (dsenames.SYMBOL_LEN > 3) & class_equal_last
# There is a fifth set of suffixed ticker symbols that do not fit into
# the above categories, but they do have a unifying manual adjustment.
# We denote this set as "funny" (not "funny" ha ha).
funny_permnos = [85254, 29938, 29946, 93093, 92118, 83275, 82924,
82932, 77158, 46950, 90655]
funny = (dsenames.PERMNO.isin(funny_permnos) &
(dsenames.SYMBOL_LEN - dsenames.TICKER_LEN == 1) &
dsenames.LAST.isin(["A", "B", "C", "S"])
)
dsenames.loc[flag4, "FLAG"] = 4
dsenames.loc[flag3, "FLAG"] = 3
dsenames.loc[flag2, "FLAG"] = 2
dsenames.loc[flag1, "FLAG"] = 1
# Here, we group together the symboled suffixes to make the final
# root-suffix separation cleaner. `sym5_with_suffix` is the set of
# special cases with more than 4 characters in the symbol
sym5_with_suffix = flag1 | flag2 | flag3
symbol_with_suffix = flag4 | funny | sym5_with_suffix
# Finally, the big enchilada, the separation of each ticker symbol into
# its root and its symbol. Since we are only dealing with suffixes of
# length 1, the root will consist of all but the last character, and
# the root will be the ticker symbol's last character
dsenames.loc[symbol_with_suffix, "SYM_ROOT"] = \
dsenames.loc[symbol_with_suffix, "TSYMBOL"].str[0:-1]
dsenames.loc[symbol_with_suffix, "SYM_SUFFIX"] = \
dsenames.loc[symbol_with_suffix, "TSYMBOL"].str[-1]
# There were a few wonky observations, so we do some additional manual
# adjustments
dsenames.loc[dsenames.PERMNO == 14461, "SYM_ROOT"] = \
dsenames.loc[dsenames.PERMNO == 14461, "TSYMBOL"].str[0:-1]
dsenames.loc[dsenames.PERMNO == 14461, "SYM_SUFFIX"] = \
dsenames.loc[dsenames.PERMNO == 14461, "TSYMBOL"].str[-1]
dsenames.loc[dsenames.PERMNO == 13914, "SYM_ROOT"] = \
dsenames.loc[dsenames.PERMNO == 13914, "TSYMBOL"]
dsenames.loc[dsenames.PERMNO == 13914, "SYM_SUFFIX"] = ""
dsenames.loc[dsenames.PERMNO == 92895, "SYM_ROOT"] = "SAPX"
dsenames.loc[dsenames.PERMNO == 92895, "SYM_SUFFIX"] = ""
return dsenames
def drop_dups(self, dsenames):
'''Consolidates multiple records for same ticker symbol into one
by collapsing trading date range
'''
# Finally, we want to ensure that, when the same information is
# recorded, the date range listed for the record reflects the entire
# range over which the security was actively trading.
# For instance, if a security stopped trading for a six month period,
# it has two entries in this file. We want both of those entries to
# include beginning date for the security's trading before the six
# month break and the end date for the security's trading after the six
# month break.
# To do this, we first want to reset the index in the dsenames dataframe
dsenames = dsenames.reset_index(drop=True)
# When we say that we want to adjust the dates 'when the same
# information is recorded,' we make that adjustment based on the
# following seven variables in the data frame:
# - Permno, the two components of the ticker symbol, the name of the
# company the CUSIP number (current and historical), and
# - the primary exchange on which the security trades
# We first create a new data frame sorted on these 7 columns, which
# only includes said 7 columns
levels_sort = ['PERMNO', 'SYM_ROOT', 'SYM_SUFFIX', 'COMNAM', 'CUSIP',
'NCUSIP', 'PRIMEXCH']
dsenames_sort = dsenames.sort_values(by=levels_sort).loc[:, levels_sort]
dsenames = dsenames.sort_values(by=levels_sort)
# We create two new variables, begdate and enddate, to capture the full
# range of dates for which each security trades. The default case, when
# a security only matches with itself based on the 7 sort levels, is
# that the beginning date is the same as the beginning effective name
# date, and the end date is the same as the end effective name date.
dsenames['BEGDATE'] = dsenames.NAMEDT
dsenames['ENDDATE'] = dsenames.NAMEENDT
# We create a new dataframe that only includes the sort variables
dsenames_sort_squish = dsenames_sort.loc[:, levels_sort]
# Here, we create two copies of the dataframe:
# 1. One without the first record, and
# 2. one without the last
dsenames_nofirst = dsenames_sort_squish.iloc[1:].reset_index(drop=True)
dsenames_nolast = dsenames_sort_squish.iloc[:-1].reset_index(drop=True)
# We then create a boolean matrix based on whether the entries of each
# matrix match
compare_matrix = (dsenames_nofirst == dsenames_nolast)
# If the i-th record matches the next record for all 7 variables, then
# the i-th row of the compare matrix will be all true. We extract the
# index for subsetting purposes
same_as_below = compare_matrix.all(axis=1)
same_as_below_index = same_as_below.index[same_as_below]
# In order to collapse the end dates, we will also need an index to
# indicate if a record is the same as the one above. This is simply
# caputured by adding 1 to the first index we found
same_as_above_index = same_as_below_index + 1
# Finally, we loop through the first Int64Index we found to bring the
# earliest `BEGDATE` for a record down to all of its matches. Doing
# this matching iteratively mitigates the issue of a particular
# security having more than 2 records match based on the 7 variables.
for i in same_as_above_index:
dsenames['BEGDATE'].iat[i] = dsenames['BEGDATE'].iat[i-1]
# Similar logic is used to bring the latest ENDDATE up - we just do it
# backwards
for i in same_as_below_index[::-1]:
dsenames['ENDDATE'].iat[i] = dsenames['ENDDATE'].iat[i+1]
# Finally, we output a final dataframe that includes only the columns
# we sorted on and our new date variables. Since the same information
# is recorded for these files now, we drop the duplicates
final_columns = levels_sort + ['BEGDATE', 'ENDDATE']
return dsenames.drop_duplicates(subset=final_columns).loc[:, final_columns]
|
bsd-2-clause
|
yglazko/socorro
|
socorro/unittest/testlib/testCreateJsonDumpStore.py
|
11
|
5682
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import socorro.unittest.testlib.createJsonDumpStore as createJDS
import os
import shutil
from nose.tools import *
def testGetSlot():
testData = [
(0,1,0),
(0,30,0),
(1,5,0),
(1,12,0),
(4,5,0),
(5,5,5),
(29,30,0),
(30,30,30),
(59,5,55),
(59,12,48),
]
for minutes, size, expected in testData:
got = createJDS.getSlot(size,minutes)
assert expected == got, 'expected %s from getSlot(%s,%s), got %s'%(expected,minute,size,got)
assert_raises(ZeroDivisionError, createJDS.getSlot, 0, 12)
def testMinimalJsonFileContents():
testMap = {'first':'a%d'}
gen = createJDS.minimalJsonFileContents(testMap)
for i in range(3):
expected = '{"first": "a%d"}'%i
got = gen.next()
assert expected == got
gen = createJDS.minimalJsonFileContents()
for i in range(3):
expected = '{"BuildID": "bogusBuildID-%02d", "Version": "bogusVersion-%02d", "ProductName": "bogusName-%02d"}'%(i,i,i)
got = gen.next()
assert expected == got
def testCreateTestSet():
testDir = "./TEST_CREATE_DIR"
try:
shutil.rmtree(testDir)
except:
pass
assert not os.path.exists(testDir)
try:
createJDS.createTestSet({},{},testDir)
assert os.path.isdir(testDir)
finally:
try:
shutil.rmtree(testDir)
except:
pass
expected = {
'%s/20071025/date/05'%testDir:(set(['04']), set([])),
'%s/20071025/date'%testDir:(set(['05']), set([])),
'%s/20071025/name/0b/ba/61/c5'%testDir:(set(['0bba61c5-dfc3-43e7-effe-8afd20071025']), set(['0bba61c5-dfc3-43e7-effe-8afd20071025.dump', '0bba61c5-dfc3-43e7-effe-8afd20071025.json'])),
'%s/20071025/name/0b'%testDir:(set(['ba']), set([])),
'%s/20071025/date/05/04'%testDir:(set(['webhead02_0']), set([])),
'%s/20071025/name/0b/ba/61'%testDir:(set(['c5']), set([])),
'%s/20071025'%testDir:(set(['date', 'name']), set([])),
'%s/20071025/date/05/04/webhead02_0'%testDir:(set(['0bba61c5-dfc3-43e7-effe-8afd20071025']), set([])),
'%s/20071025/name'%testDir:(set(['0b']), set([])),
'%s'%testDir:(set(['20071025']), set([])),
'%s/20071025/name/0b/ba'%testDir:(set(['61']), set([])),
}
minSet = {'0bba61c5-dfc3-43e7-effe-8afd20071025': ('2007-10-25-05-04','webhead02','0b/ba/61/c5','2007/10/25/05/00/webhead02_0')}
try:
createJDS.createTestSet(minSet,{},testDir)
got = {}
for dirpath, files, dirs in os.walk(testDir):
got[dirpath] = (set(files),set(dirs))
if expected != got:
print
for k, v in expected.items():
print ' X %s: %s'%(k,v)
if k in got:
if got[k] == expected[k]:
print ' G %s: %s'%(k,got[k])
else:
print 'xx G %s: %s'%(k,got[k])
else:
print 'xx G %s: (IS MISSING)'%(k)
for k,v in got.items():
if not k in expected:
print '++ G %s: %s'%(k,v)
assert expected == got
f = open(os.path.join(testDir,'20071025/name/0b/ba/61/c5/0bba61c5-dfc3-43e7-effe-8afd20071025.dump'))
data = f.readlines()
assert 1 == len(data)
assert 'dump test of 0bba61c5-dfc3-43e7-effe-8afd20071025' == data[0].strip()
f.close()
f = open(os.path.join(testDir,'20071025/name/0b/ba/61/c5/0bba61c5-dfc3-43e7-effe-8afd20071025.json'))
data = f.readlines()
assert 1 == len(data)
assert 'json test of 0bba61c5-dfc3-43e7-effe-8afd20071025' == data[0].strip()
f.close()
finally:
try:
shutil.rmtree(testDir)
except:
pass
try:
createJDS.createTestSet(minSet,{'jsonIsEmpty':True},testDir)
f = open(os.path.join(testDir,'20071025/name/0b/ba/61/c5/0bba61c5-dfc3-43e7-effe-8afd20071025.dump'))
data = f.readlines()
assert 1 == len(data)
assert 'dump test of 0bba61c5-dfc3-43e7-effe-8afd20071025' == data[0].strip()
f.close()
f = open(os.path.join(testDir,'20071025/name/0b/ba/61/c5/0bba61c5-dfc3-43e7-effe-8afd20071025.json'))
data = f.readlines()
assert 0 == len(data)
f.close()
finally:
try:
shutil.rmtree(testDir)
except:
pass
try:
createJDS.createTestSet(minSet,{'jsonIsBogus':False, 'jsonFileGenerator':'default'},testDir)
f = open(os.path.join(testDir,'20071025/name/0b/ba/61/c5/0bba61c5-dfc3-43e7-effe-8afd20071025.dump'))
data = f.readlines()
assert 1 == len(data)
assert 'dump test of 0bba61c5-dfc3-43e7-effe-8afd20071025' == data[0].strip()
f.close()
f = open(os.path.join(testDir,'20071025/name/0b/ba/61/c5//0bba61c5-dfc3-43e7-effe-8afd20071025.json'))
data = f.readlines()
assert 1 == len(data)
expect='{"BuildID": "bogusBuildID-00", "Version": "bogusVersion-00", "ProductName": "bogusName-00"}'
assert expect == data[0].strip()
f.close()
finally:
try:
shutil.rmtree(testDir)
except:
pass
try:
createJDS.createTestSet(minSet,{'jsonIsBogus':False},testDir)
f = open(os.path.join(testDir,'20071025/name/0b/ba/61/c5/0bba61c5-dfc3-43e7-effe-8afd20071025.dump'))
data = f.readlines()
assert 1 == len(data)
assert 'dump test of 0bba61c5-dfc3-43e7-effe-8afd20071025' == data[0].strip()
f.close()
f = open(os.path.join(testDir,'20071025/name/0b/ba/61/c5/0bba61c5-dfc3-43e7-effe-8afd20071025.json'))
data = f.readlines()
assert 1 == len(data)
expect='{"what": "legal json, bad contents", "uuid": "0bba61c5-dfc3-43e7-effe-8afd20071025"}'
assert expect == data[0].strip()
f.close()
finally:
try:
shutil.rmtree(testDir)
except:
pass
|
mpl-2.0
|
quanvm009/codev7
|
openerp/addons/sale_margin/sale_margin.py
|
17
|
4272
|
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class sale_order_line(osv.osv):
_inherit = "sale.order.line"
def product_id_change(self, cr, uid, ids, pricelist, product, qty=0,
uom=False, qty_uos=0, uos=False, name='', partner_id=False,
lang=False, update_tax=True, date_order=False, packaging=False, fiscal_position=False, flag=False, context=None):
res = super(sale_order_line, self).product_id_change(cr, uid, ids, pricelist, product, qty=qty,
uom=uom, qty_uos=qty_uos, uos=uos, name=name, partner_id=partner_id,
lang=lang, update_tax=update_tax, date_order=date_order, packaging=packaging, fiscal_position=fiscal_position, flag=flag, context=context)
if not pricelist:
return res
if context is None:
context = {}
frm_cur = self.pool.get('res.users').browse(cr, uid, uid).company_id.currency_id.id
to_cur = self.pool.get('product.pricelist').browse(cr, uid, [pricelist])[0].currency_id.id
if product:
product = self.pool['product.product'].browse(cr, uid, product, context=context)
purchase_price = product.standard_price
to_uom = res.get('product_uom', uom)
if to_uom != product.uom_id.id:
purchase_price = self.pool['product.uom']._compute_price(cr, uid, product.uom_id.id, purchase_price, to_uom)
ctx = context.copy()
ctx['date'] = date_order
price = self.pool.get('res.currency').compute(cr, uid, frm_cur, to_cur, purchase_price, round=False, context=ctx)
res['value'].update({'purchase_price': price})
return res
def _product_margin(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for line in self.browse(cr, uid, ids, context=context):
res[line.id] = 0
if line.product_id:
res[line.id] = round(line.price_subtotal - ((line.purchase_price or line.product_id.standard_price) * line.product_uos_qty), 2)
return res
_columns = {
'margin': fields.function(_product_margin, string='Margin',
store = True),
'purchase_price': fields.float('Cost Price', digits=(16,2))
}
sale_order_line()
class sale_order(osv.osv):
_inherit = "sale.order"
def _product_margin(self, cr, uid, ids, field_name, arg, context=None):
result = {}
for sale in self.browse(cr, uid, ids, context=context):
result[sale.id] = 0.0
for line in sale.order_line:
result[sale.id] += line.margin or 0.0
return result
def _get_order(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('sale.order.line').browse(cr, uid, ids, context=context):
result[line.order_id.id] = True
return result.keys()
_columns = {
'margin': fields.function(_product_margin, string='Margin', help="It gives profitability by calculating the difference between the Unit Price and the cost price.", store={
'sale.order.line': (_get_order, ['margin'], 20),
'sale.order': (lambda self, cr, uid, ids, c={}: ids, ['order_line'], 20),
}),
}
sale_order()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
halberom/ansible
|
lib/ansible/modules/network/iosxr/iosxr_config.py
|
2
|
11001
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'core',
'version': '1.0'}
DOCUMENTATION = """
---
module: iosxr_config
version_added: "2.1"
author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
short_description: Manage Cisco IOS XR configuration sections
description:
- Cisco IOS XR configurations use a simple block indent file syntax
for segmenting configuration into sections. This module provides
an implementation for working with IOS XR configuration sections in
a deterministic way.
extends_documentation_fragment: iosxr
options:
lines:
description:
- The ordered set of commands that should be configured in the
section. The commands must be the exact same commands as found
in the device running-config. Be sure to note the configuration
command syntax as some commands are automatically modified by the
device config parser.
required: false
default: null
aliases: ['commands']
parents:
description:
- The ordered set of parents that uniquely identify the section
the commands should be checked against. If the parents argument
is omitted, the commands are checked against the set of top
level or global commands.
required: false
default: null
src:
description:
- Specifies the source path to the file that contains the configuration
or configuration template to load. The path to the source file can
either be the full path on the Ansible control host or a relative
path from the playbook or role root directory. This argument is mutually
exclusive with I(lines).
required: false
default: null
version_added: "2.2"
before:
description:
- The ordered set of commands to push on to the command stack if
a change needs to be made. This allows the playbook designer
the opportunity to perform configuration commands prior to pushing
any changes without affecting how the set of commands are matched
against the system.
required: false
default: null
after:
description:
- The ordered set of commands to append to the end of the command
stack if a change needs to be made. Just like with I(before) this
allows the playbook designer to append a set of commands to be
executed after the command set.
required: false
default: null
match:
description:
- Instructs the module on the way to perform the matching of
the set of commands against the current device config. If
match is set to I(line), commands are matched line by line. If
match is set to I(strict), command lines are matched with respect
to position. If match is set to I(exact), command lines
must be an equal match. Finally, if match is set to I(none), the
module will not attempt to compare the source configuration with
the running configuration on the remote device.
required: false
default: line
choices: ['line', 'strict', 'exact', 'none']
replace:
description:
- Instructs the module on the way to perform the configuration
on the device. If the replace argument is set to I(line) then
the modified lines are pushed to the device in configuration
mode. If the replace argument is set to I(block) then the entire
command block is pushed to the device in configuration mode if any
line is not correct.
required: false
default: line
choices: ['line', 'block', 'config']
force:
description:
- The force argument instructs the module to not consider the
current devices running-config. When set to true, this will
cause the module to push the contents of I(src) into the device
without first checking if already configured.
- Note this argument should be considered deprecated. To achieve
the equivalent, set the C(match=none) which is idempotent. This argument
will be removed in a future release.
required: false
default: false
choices: [ "yes", "no" ]
version_added: "2.2"
config:
description:
- The module, by default, will connect to the remote device and
retrieve the current running-config to use as a base for comparing
against the contents of source. There are times when it is not
desirable to have the task get the current running-config for
every task in a playbook. The I(config) argument allows the
implementer to pass in the configuration to use as the base
config for comparison.
required: false
default: null
backup:
description:
- This argument will cause the module to create a full backup of
the current C(running-config) from the remote device before any
changes are made. The backup file is written to the C(backup)
folder in the playbook root directory. If the directory does not
exist, it is created.
required: false
default: no
choices: ['yes', 'no']
version_added: "2.2"
comment:
description:
- Allows a commit description to be specified to be included
when the configuration is committed. If the configuration is
not changed or committed, this argument is ignored.
required: false
default: 'configured by iosxr_config'
version_added: "2.2"
"""
EXAMPLES = """
- name: configure top level configuration
iosxr_config:
lines: hostname {{ inventory_hostname }}
- name: configure interface settings
iosxr_config:
lines:
- description test interface
- ip address 172.31.1.1 255.255.255.0
parents: interface GigabitEthernet0/0/0/0
- name: load a config from disk and replace the current config
iosxr_config:
src: config.cfg
update: replace
backup: yes
"""
RETURN = """
updates:
description: The set of commands that will be pushed to the remote device
returned: Only when lines is specified.
type: list
sample: ['...', '...']
backup_path:
description: The full path to the backup file
returned: when backup is yes
type: path
sample: /playbooks/ansible/backup/iosxr01.2016-07-16@22:28:34
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netcfg import NetworkConfig, dumps
from ansible.module_utils.iosxr import load_config,get_config
from ansible.module_utils.iosxr import iosxr_argument_spec
DEFAULT_COMMIT_COMMENT = 'configured by iosxr_config'
def check_args(module, warnings):
if module.params['comment']:
if len(module.params['comment']) > 60:
module.fail_json(msg='comment argument cannot be more than 60 characters')
if module.params['force']:
warnings.append('The force argument is deprecated, please use '
'match=none instead. This argument will be '
'removed in the future')
def get_running_config(module):
contents = module.params['config']
if not contents:
contents = get_config(module)
return NetworkConfig(indent=1, contents=contents)
def get_candidate(module):
candidate = NetworkConfig(indent=1)
if module.params['src']:
candidate.load(module.params['src'])
elif module.params['lines']:
parents = module.params['parents'] or list()
candidate.add(module.params['lines'], parents=parents)
return candidate
def run(module, result):
match = module.params['match']
replace = module.params['replace']
replace_config = replace == 'config'
path = module.params['parents']
comment = module.params['comment']
check_mode = module.check_mode
candidate = get_candidate(module)
if match != 'none' and replace != 'config':
contents = get_running_config(module)
configobj = NetworkConfig(contents=contents, indent=1)
commands = candidate.difference(configobj, path=path, match=match,
replace=replace)
else:
commands = candidate.items
if commands:
commands = dumps(commands, 'commands').split('\n')
if any((module.params['lines'], module.params['src'])):
if module.params['before']:
commands[:0] = module.params['before']
if module.params['after']:
commands.extend(module.params['after'])
result['commands'] = commands
diff = load_config(module, commands, not check_mode,
replace_config, comment)
if diff:
result['diff'] = dict(prepared=diff)
result['changed'] = True
def main():
"""main entry point for module execution
"""
argument_spec = dict(
src=dict(type='path'),
lines=dict(aliases=['commands'], type='list'),
parents=dict(type='list'),
before=dict(type='list'),
after=dict(type='list'),
match=dict(default='line', choices=['line', 'strict', 'exact', 'none']),
replace=dict(default='line', choices=['line', 'block', 'config']),
# this argument is deprecated in favor of setting match: none
# it will be removed in a future version
force=dict(default=False, type='bool'),
config=dict(),
backup=dict(type='bool', default=False),
comment=dict(default=DEFAULT_COMMIT_COMMENT),
)
argument_spec.update(iosxr_argument_spec)
mutually_exclusive = [('lines', 'src')]
required_if = [('match', 'strict', ['lines']),
('match', 'exact', ['lines']),
('replace', 'block', ['lines']),
('replace', 'config', ['src'])]
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
required_if=required_if,
supports_check_mode=True)
if module.params['force'] is True:
module.params['match'] = 'none'
warnings = list()
check_args(module, warnings)
result = dict(changed=False, warnings=warnings)
if module.params['backup']:
result['__backup__'] = get_config(module)
run(module, result)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
zero-rp/miniblink49
|
v8_7_5/tools/gen-inlining-tests.py
|
14
|
15813
|
#!/usr/bin/env python
# Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# for py2/py3 compatibility
from __future__ import print_function
from collections import namedtuple
import textwrap
import sys
SHARD_FILENAME_TEMPLATE = "test/mjsunit/compiler/inline-exception-{shard}.js"
# Generates 2 files. Found by trial and error.
SHARD_SIZE = 97
PREAMBLE = """
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --allow-natives-syntax --no-always-opt
// This test file was generated by tools/gen-inlining-tests.py .
// Global variables
var deopt = undefined; // either true or false
var counter = 0;
function resetState() {
counter = 0;
}
function warmUp(f) {
try {
f();
} catch (ex) {
// ok
}
try {
f();
} catch (ex) {
// ok
}
}
function resetOptAndAssertResultEquals(expected, f) {
warmUp(f);
resetState();
// %DebugPrint(f);
eval("'dont optimize this function itself please, but do optimize f'");
%OptimizeFunctionOnNextCall(f);
assertEquals(expected, f());
}
function resetOptAndAssertThrowsWith(expected, f) {
warmUp(f);
resetState();
// %DebugPrint(f);
eval("'dont optimize this function itself please, but do optimize f'");
%OptimizeFunctionOnNextCall(f);
try {
var result = f();
fail("resetOptAndAssertThrowsWith",
"exception: " + expected,
"result: " + result);
} catch (ex) {
assertEquals(expected, ex);
}
}
function increaseAndReturn15() {
if (deopt) %DeoptimizeFunction(f);
counter++;
return 15;
}
function increaseAndThrow42() {
if (deopt) %DeoptimizeFunction(f);
counter++;
throw 42;
}
function increaseAndReturn15_noopt_inner() {
if (deopt) %DeoptimizeFunction(f);
counter++;
return 15;
}
%NeverOptimizeFunction(increaseAndReturn15_noopt_inner);
function increaseAndThrow42_noopt_inner() {
if (deopt) %DeoptimizeFunction(f);
counter++;
throw 42;
}
%NeverOptimizeFunction(increaseAndThrow42_noopt_inner);
// Alternative 1
function returnOrThrow(doReturn) {
if (doReturn) {
return increaseAndReturn15();
} else {
return increaseAndThrow42();
}
}
// Alternative 2
function increaseAndReturn15_calls_noopt() {
return increaseAndReturn15_noopt_inner();
}
function increaseAndThrow42_calls_noopt() {
return increaseAndThrow42_noopt_inner();
}
// Alternative 3.
// When passed either {increaseAndReturn15} or {increaseAndThrow42}, it acts
// as the other one.
function invertFunctionCall(f) {
var result;
try {
result = f();
} catch (ex) {
return ex - 27;
}
throw result + 27;
}
// Alternative 4: constructor
function increaseAndStore15Constructor() {
if (deopt) %DeoptimizeFunction(f);
++counter;
this.x = 15;
}
function increaseAndThrow42Constructor() {
if (deopt) %DeoptimizeFunction(f);
++counter;
this.x = 42;
throw this.x;
}
// Alternative 5: property
var magic = {};
Object.defineProperty(magic, 'prop', {
get: function () {
if (deopt) %DeoptimizeFunction(f);
return 15 + 0 * ++counter;
},
set: function(x) {
// argument should be 37
if (deopt) %DeoptimizeFunction(f);
counter -= 36 - x; // increments counter
throw 42;
}
})
// Generate type feedback.
assertEquals(15, increaseAndReturn15_calls_noopt());
assertThrowsEquals(function() { return increaseAndThrow42_noopt_inner() }, 42);
assertEquals(15, (new increaseAndStore15Constructor()).x);
assertThrowsEquals(function() {
return (new increaseAndThrow42Constructor()).x;
},
42);
function runThisShard() {
""".strip()
def booltuples(n):
"""booltuples(2) yields 4 tuples: (False, False), (False, True),
(True, False), (True, True)."""
assert isinstance(n, int)
if n <= 0:
yield ()
else:
for initial in booltuples(n-1):
yield initial + (False,)
yield initial + (True,)
def fnname(flags):
assert len(FLAGLETTERS) == len(flags)
return "f_" + ''.join(
FLAGLETTERS[i] if b else '_'
for (i, b) in enumerate(flags))
NUM_TESTS_PRINTED = 0
NUM_TESTS_IN_SHARD = 0
def printtest(flags):
"""Print a test case. Takes a couple of boolean flags, on which the
printed Javascript code depends."""
assert all(isinstance(flag, bool) for flag in flags)
# The alternative flags are in reverse order so that if we take all possible
# tuples, ordered lexicographically from false to true, we get first the
# default, then alternative 1, then 2, etc.
(
alternativeFn5, # use alternative #5 for returning/throwing:
# return/throw using property
alternativeFn4, # use alternative #4 for returning/throwing:
# return/throw using constructor
alternativeFn3, # use alternative #3 for returning/throwing:
# return/throw indirectly, based on function argument
alternativeFn2, # use alternative #2 for returning/throwing:
# return/throw indirectly in unoptimized code,
# no branching
alternativeFn1, # use alternative #1 for returning/throwing:
# return/throw indirectly, based on boolean arg
tryThrows, # in try block, call throwing function
tryReturns, # in try block, call returning function
tryFirstReturns, # in try block, returning goes before throwing
tryResultToLocal, # in try block, result goes to local variable
doCatch, # include catch block
catchReturns, # in catch block, return
catchWithLocal, # in catch block, modify or return the local variable
catchThrows, # in catch block, throw
doFinally, # include finally block
finallyReturns, # in finally block, return local variable
finallyThrows, # in finally block, throw
endReturnLocal, # at very end, return variable local
deopt, # deopt inside inlined function
) = flags
# BASIC RULES
# Only one alternative can be applied at any time.
if (alternativeFn1 + alternativeFn2 + alternativeFn3 + alternativeFn4
+ alternativeFn5 > 1):
return
# In try, return or throw, or both.
if not (tryReturns or tryThrows): return
# Either doCatch or doFinally.
if not doCatch and not doFinally: return
# Catch flags only make sense when catching
if not doCatch and (catchReturns or catchWithLocal or catchThrows):
return
# Finally flags only make sense when finallying
if not doFinally and (finallyReturns or finallyThrows):
return
# tryFirstReturns is only relevant when both tryReturns and tryThrows are
# true.
if tryFirstReturns and not (tryReturns and tryThrows): return
# From the try and finally block, we can return or throw, but not both.
if catchReturns and catchThrows: return
if finallyReturns and finallyThrows: return
# If at the end we return the local, we need to have touched it.
if endReturnLocal and not (tryResultToLocal or catchWithLocal): return
# PRUNING
anyAlternative = any([alternativeFn1, alternativeFn2, alternativeFn3,
alternativeFn4, alternativeFn5])
specificAlternative = any([alternativeFn2, alternativeFn3])
rareAlternative = not specificAlternative
# If try returns and throws, then don't catchWithLocal, endReturnLocal, or
# deopt, or do any alternative.
if (tryReturns and tryThrows and
(catchWithLocal or endReturnLocal or deopt or anyAlternative)):
return
# We don't do any alternative if we do a finally.
if doFinally and anyAlternative: return
# We only use the local variable if we do alternative #2 or #3.
if ((tryResultToLocal or catchWithLocal or endReturnLocal) and
not specificAlternative):
return
# We don't need to test deopting into a finally.
if doFinally and deopt: return
# We're only interested in alternative #2 if we have endReturnLocal, no
# catchReturns, and no catchThrows, and deopt.
if (alternativeFn2 and
(not endReturnLocal or catchReturns or catchThrows or not deopt)):
return
# Flag check succeeded.
trueFlagNames = [name for (name, value) in flags._asdict().items() if value]
flagsMsgLine = " // Variant flags: [{}]".format(', '.join(trueFlagNames))
write(textwrap.fill(flagsMsgLine, subsequent_indent=' // '))
write("")
if not anyAlternative:
fragments = {
'increaseAndReturn15': 'increaseAndReturn15()',
'increaseAndThrow42': 'increaseAndThrow42()',
}
elif alternativeFn1:
fragments = {
'increaseAndReturn15': 'returnOrThrow(true)',
'increaseAndThrow42': 'returnOrThrow(false)',
}
elif alternativeFn2:
fragments = {
'increaseAndReturn15': 'increaseAndReturn15_calls_noopt()',
'increaseAndThrow42': 'increaseAndThrow42_calls_noopt()',
}
elif alternativeFn3:
fragments = {
'increaseAndReturn15': 'invertFunctionCall(increaseAndThrow42)',
'increaseAndThrow42': 'invertFunctionCall(increaseAndReturn15)',
}
elif alternativeFn4:
fragments = {
'increaseAndReturn15': '(new increaseAndStore15Constructor()).x',
'increaseAndThrow42': '(new increaseAndThrow42Constructor()).x',
}
else:
assert alternativeFn5
fragments = {
'increaseAndReturn15': 'magic.prop /* returns 15 */',
'increaseAndThrow42': '(magic.prop = 37 /* throws 42 */)',
}
# As we print code, we also maintain what the result should be. Variable
# {result} can be one of three things:
#
# - None, indicating returning JS null
# - ("return", n) with n an integer
# - ("throw", n), with n an integer
result = None
# We also maintain what the counter should be at the end.
# The counter is reset just before f is called.
counter = 0
write( " f = function {} () {{".format(fnname(flags)))
write( " var local = 888;")
write( " deopt = {};".format("true" if deopt else "false"))
local = 888
write( " try {")
write( " counter++;")
counter += 1
resultTo = "local +=" if tryResultToLocal else "return"
if tryReturns and not (tryThrows and not tryFirstReturns):
write( " {} 4 + {increaseAndReturn15};".format(resultTo, **fragments))
if result == None:
counter += 1
if tryResultToLocal:
local += 19
else:
result = ("return", 19)
if tryThrows:
write( " {} 4 + {increaseAndThrow42};".format(resultTo, **fragments))
if result == None:
counter += 1
result = ("throw", 42)
if tryReturns and tryThrows and not tryFirstReturns:
write( " {} 4 + {increaseAndReturn15};".format(resultTo, **fragments))
if result == None:
counter += 1
if tryResultToLocal:
local += 19
else:
result = ("return", 19)
write( " counter++;")
if result == None:
counter += 1
if doCatch:
write( " } catch (ex) {")
write( " counter++;")
if isinstance(result, tuple) and result[0] == 'throw':
counter += 1
if catchThrows:
write(" throw 2 + ex;")
if isinstance(result, tuple) and result[0] == "throw":
result = ('throw', 2 + result[1])
elif catchReturns and catchWithLocal:
write(" return 2 + local;")
if isinstance(result, tuple) and result[0] == "throw":
result = ('return', 2 + local)
elif catchReturns and not catchWithLocal:
write(" return 2 + ex;");
if isinstance(result, tuple) and result[0] == "throw":
result = ('return', 2 + result[1])
elif catchWithLocal:
write(" local += ex;");
if isinstance(result, tuple) and result[0] == "throw":
local += result[1]
result = None
counter += 1
else:
if isinstance(result, tuple) and result[0] == "throw":
result = None
counter += 1
write( " counter++;")
if doFinally:
write( " } finally {")
write( " counter++;")
counter += 1
if finallyThrows:
write(" throw 25;")
result = ('throw', 25)
elif finallyReturns:
write(" return 3 + local;")
result = ('return', 3 + local)
elif not finallyReturns and not finallyThrows:
write(" local += 2;")
local += 2
counter += 1
else: assert False # unreachable
write( " counter++;")
write( " }")
write( " counter++;")
if result == None:
counter += 1
if endReturnLocal:
write( " return 5 + local;")
if result == None:
result = ('return', 5 + local)
write( " }")
if result == None:
write( " resetOptAndAssertResultEquals(undefined, f);")
else:
tag, value = result
if tag == "return":
write( " resetOptAndAssertResultEquals({}, f);".format(value))
else:
assert tag == "throw"
write( " resetOptAndAssertThrowsWith({}, f);".format(value))
write( " assertEquals({}, counter);".format(counter))
write( "")
global NUM_TESTS_PRINTED, NUM_TESTS_IN_SHARD
NUM_TESTS_PRINTED += 1
NUM_TESTS_IN_SHARD += 1
FILE = None # to be initialised to an open file
SHARD_NUM = 1
def write(*args):
return print(*args, file=FILE)
def rotateshard():
global FILE, NUM_TESTS_IN_SHARD, SHARD_SIZE
if MODE != 'shard':
return
if FILE != None and NUM_TESTS_IN_SHARD < SHARD_SIZE:
return
if FILE != None:
finishshard()
assert FILE == None
FILE = open(SHARD_FILENAME_TEMPLATE.format(shard=SHARD_NUM), 'w')
write_shard_header()
NUM_TESTS_IN_SHARD = 0
def finishshard():
global FILE, SHARD_NUM, MODE
assert FILE
write_shard_footer()
if MODE == 'shard':
print("Wrote shard {}.".format(SHARD_NUM))
FILE.close()
FILE = None
SHARD_NUM += 1
def write_shard_header():
if MODE == 'shard':
write("// Shard {}.".format(SHARD_NUM))
write("")
write(PREAMBLE)
write("")
def write_shard_footer():
write("}")
write("%NeverOptimizeFunction(runThisShard);")
write("")
write("// {} tests in this shard.".format(NUM_TESTS_IN_SHARD))
write("// {} tests up to here.".format(NUM_TESTS_PRINTED))
write("")
write("runThisShard();")
FLAGLETTERS="54321trflcrltfrtld"
flagtuple = namedtuple('flagtuple', (
"alternativeFn5",
"alternativeFn4",
"alternativeFn3",
"alternativeFn2",
"alternativeFn1",
"tryThrows",
"tryReturns",
"tryFirstReturns",
"tryResultToLocal",
"doCatch",
"catchReturns",
"catchWithLocal",
"catchThrows",
"doFinally",
"finallyReturns",
"finallyThrows",
"endReturnLocal",
"deopt"
))
emptyflags = flagtuple(*((False,) * len(flagtuple._fields)))
f1 = emptyflags._replace(tryReturns=True, doCatch=True)
# You can test function printtest with f1.
allFlagCombinations = [
flagtuple(*bools)
for bools in booltuples(len(flagtuple._fields))
]
if __name__ == '__main__':
global MODE
if sys.argv[1:] == []:
MODE = 'stdout'
print("// Printing all shards together to stdout.")
print("")
write_shard_header()
FILE = sys.stdout
elif sys.argv[1:] == ['--shard-and-overwrite']:
MODE = 'shard'
else:
print("Usage:")
print("")
print(" python {}".format(sys.argv[0]))
print(" print all tests to standard output")
print(" python {} --shard-and-overwrite".format(sys.argv[0]))
print(" print all tests to {}".format(SHARD_FILENAME_TEMPLATE))
print("")
print(sys.argv[1:])
print("")
sys.exit(1)
rotateshard()
for flags in allFlagCombinations:
printtest(flags)
rotateshard()
finishshard()
if MODE == 'shard':
print("Total: {} tests.".format(NUM_TESTS_PRINTED))
|
apache-2.0
|
ojengwa/talk
|
venv/lib/python2.7/site-packages/markdown/extensions/abbr.py
|
13
|
2786
|
'''
Abbreviation Extension for Python-Markdown
==========================================
This extension adds abbreviation handling to Python-Markdown.
See <https://pythonhosted.org/Markdown/extensions/abbreviations.html>
for documentation.
Oringinal code Copyright 2007-2008 [Waylan Limberg](http://achinghead.com/) and
[Seemant Kulleen](http://www.kulleen.org/)
All changes Copyright 2008-2014 The Python Markdown Project
License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
'''
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..preprocessors import Preprocessor
from ..inlinepatterns import Pattern
from ..util import etree, AtomicString
import re
# Global Vars
ABBR_REF_RE = re.compile(r'[*]\[(?P<abbr>[^\]]*)\][ ]?:\s*(?P<title>.*)')
class AbbrExtension(Extension):
""" Abbreviation Extension for Python-Markdown. """
def extendMarkdown(self, md, md_globals):
""" Insert AbbrPreprocessor before ReferencePreprocessor. """
md.preprocessors.add('abbr', AbbrPreprocessor(md), '<reference')
class AbbrPreprocessor(Preprocessor):
""" Abbreviation Preprocessor - parse text for abbr references. """
def run(self, lines):
'''
Find and remove all Abbreviation references from the text.
Each reference is set as a new AbbrPattern in the markdown instance.
'''
new_text = []
for line in lines:
m = ABBR_REF_RE.match(line)
if m:
abbr = m.group('abbr').strip()
title = m.group('title').strip()
self.markdown.inlinePatterns['abbr-%s'%abbr] = \
AbbrPattern(self._generate_pattern(abbr), title)
else:
new_text.append(line)
return new_text
def _generate_pattern(self, text):
'''
Given a string, returns an regex pattern to match that string.
'HTML' -> r'(?P<abbr>[H][T][M][L])'
Note: we force each char as a literal match (in brackets) as we don't
know what they will be beforehand.
'''
chars = list(text)
for i in range(len(chars)):
chars[i] = r'[%s]' % chars[i]
return r'(?P<abbr>\b%s\b)' % (r''.join(chars))
class AbbrPattern(Pattern):
""" Abbreviation inline pattern. """
def __init__(self, pattern, title):
super(AbbrPattern, self).__init__(pattern)
self.title = title
def handleMatch(self, m):
abbr = etree.Element('abbr')
abbr.text = AtomicString(m.group('abbr'))
abbr.set('title', self.title)
return abbr
def makeExtension(*args, **kwargs):
return AbbrExtension(*args, **kwargs)
|
mit
|
stuntman723/rap-analyzer
|
rap_analyzer/lib/python2.7/site-packages/django/template/base.py
|
29
|
52382
|
"""
This is the Django template system.
How it works:
The Lexer.tokenize() function converts a template string (i.e., a string containing
markup with custom template tags) to tokens, which can be either plain text
(TOKEN_TEXT), variables (TOKEN_VAR) or block statements (TOKEN_BLOCK).
The Parser() class takes a list of tokens in its constructor, and its parse()
method returns a compiled template -- which is, under the hood, a list of
Node objects.
Each Node is responsible for creating some sort of output -- e.g. simple text
(TextNode), variable values in a given context (VariableNode), results of basic
logic (IfNode), results of looping (ForNode), or anything else. The core Node
types are TextNode, VariableNode, IfNode and ForNode, but plugin modules can
define their own custom node types.
Each Node has a render() method, which takes a Context and returns a string of
the rendered node. For example, the render() method of a Variable Node returns
the variable's value as a string. The render() method of a ForNode returns the
rendered output of whatever was inside the loop, recursively.
The Template class is a convenient wrapper that takes care of template
compilation and rendering.
Usage:
The only thing you should ever use directly in this file is the Template class.
Create a compiled template object with a template_string, then call render()
with a context. In the compilation stage, the TemplateSyntaxError exception
will be raised if the template doesn't have proper syntax.
Sample code:
>>> from django import template
>>> s = u'<html>{% if test %}<h1>{{ varvalue }}</h1>{% endif %}</html>'
>>> t = template.Template(s)
(t is now a compiled template, and its render() method can be called multiple
times with multiple contexts)
>>> c = template.Context({'test':True, 'varvalue': 'Hello'})
>>> t.render(c)
u'<html><h1>Hello</h1></html>'
>>> c = template.Context({'test':False, 'varvalue': 'Hello'})
>>> t.render(c)
u'<html></html>'
"""
from __future__ import unicode_literals
import re
import warnings
from functools import partial
from importlib import import_module
from inspect import getargspec, getcallargs
from django.apps import apps
from django.template.context import ( # NOQA: imported for backwards compatibility
BaseContext, Context, ContextPopException, RequestContext,
)
from django.utils import lru_cache, six
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.encoding import (
force_str, force_text, python_2_unicode_compatible,
)
from django.utils.formats import localize
from django.utils.html import conditional_escape
from django.utils.itercompat import is_iterable
from django.utils.module_loading import module_has_submodule
from django.utils.safestring import (
EscapeData, SafeData, mark_for_escaping, mark_safe,
)
from django.utils.text import (
get_text_list, smart_split, unescape_string_literal,
)
from django.utils.timezone import template_localtime
from django.utils.translation import pgettext_lazy, ugettext_lazy
TOKEN_TEXT = 0
TOKEN_VAR = 1
TOKEN_BLOCK = 2
TOKEN_COMMENT = 3
TOKEN_MAPPING = {
TOKEN_TEXT: 'Text',
TOKEN_VAR: 'Var',
TOKEN_BLOCK: 'Block',
TOKEN_COMMENT: 'Comment',
}
# template syntax constants
FILTER_SEPARATOR = '|'
FILTER_ARGUMENT_SEPARATOR = ':'
VARIABLE_ATTRIBUTE_SEPARATOR = '.'
BLOCK_TAG_START = '{%'
BLOCK_TAG_END = '%}'
VARIABLE_TAG_START = '{{'
VARIABLE_TAG_END = '}}'
COMMENT_TAG_START = '{#'
COMMENT_TAG_END = '#}'
TRANSLATOR_COMMENT_MARK = 'Translators'
SINGLE_BRACE_START = '{'
SINGLE_BRACE_END = '}'
ALLOWED_VARIABLE_CHARS = ('abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_.')
# what to report as the origin for templates that come from non-loader sources
# (e.g. strings)
UNKNOWN_SOURCE = '<unknown source>'
# match a variable or block tag and capture the entire tag, including start/end
# delimiters
tag_re = (re.compile('(%s.*?%s|%s.*?%s|%s.*?%s)' %
(re.escape(BLOCK_TAG_START), re.escape(BLOCK_TAG_END),
re.escape(VARIABLE_TAG_START), re.escape(VARIABLE_TAG_END),
re.escape(COMMENT_TAG_START), re.escape(COMMENT_TAG_END))))
# global dictionary of libraries that have been loaded using get_library
libraries = {}
# global list of libraries to load by default for a new parser
builtins = []
class TemplateSyntaxError(Exception):
pass
class TemplateDoesNotExist(Exception):
pass
class TemplateEncodingError(Exception):
pass
@python_2_unicode_compatible
class VariableDoesNotExist(Exception):
def __init__(self, msg, params=()):
self.msg = msg
self.params = params
def __str__(self):
return self.msg % tuple(force_text(p, errors='replace') for p in self.params)
class InvalidTemplateLibrary(Exception):
pass
class Origin(object):
def __init__(self, name):
self.name = name
def reload(self):
raise NotImplementedError('subclasses of Origin must provide a reload() method')
def __str__(self):
return self.name
class StringOrigin(Origin):
def __init__(self, source):
super(StringOrigin, self).__init__(UNKNOWN_SOURCE)
self.source = source
def reload(self):
return self.source
class Template(object):
def __init__(self, template_string, origin=None, name=None, engine=None):
try:
template_string = force_text(template_string)
except UnicodeDecodeError:
raise TemplateEncodingError("Templates can only be constructed "
"from unicode or UTF-8 strings.")
# If Template is instantiated directly rather than from an Engine and
# exactly one Django template engine is configured, use that engine.
# This is required to preserve backwards-compatibility for direct use
# e.g. Template('...').render(Context({...}))
if engine is None:
from .engine import Engine
engine = Engine.get_default()
if engine.debug and origin is None:
origin = StringOrigin(template_string)
self.nodelist = engine.compile_string(template_string, origin)
self.name = name
self.origin = origin
self.engine = engine
def __iter__(self):
for node in self.nodelist:
for subnode in node:
yield subnode
def _render(self, context):
return self.nodelist.render(context)
def render(self, context):
"Display stage -- can be called many times"
context.render_context.push()
try:
if context.template is None:
with context.bind_template(self):
return self._render(context)
else:
return self._render(context)
finally:
context.render_context.pop()
class Token(object):
def __init__(self, token_type, contents):
# token_type must be TOKEN_TEXT, TOKEN_VAR, TOKEN_BLOCK or
# TOKEN_COMMENT.
self.token_type, self.contents = token_type, contents
self.lineno = None
def __str__(self):
token_name = TOKEN_MAPPING[self.token_type]
return ('<%s token: "%s...">' %
(token_name, self.contents[:20].replace('\n', '')))
def split_contents(self):
split = []
bits = iter(smart_split(self.contents))
for bit in bits:
# Handle translation-marked template pieces
if bit.startswith('_("') or bit.startswith("_('"):
sentinal = bit[2] + ')'
trans_bit = [bit]
while not bit.endswith(sentinal):
bit = next(bits)
trans_bit.append(bit)
bit = ' '.join(trans_bit)
split.append(bit)
return split
class Lexer(object):
def __init__(self, template_string, origin):
self.template_string = template_string
self.origin = origin
self.lineno = 1
self.verbatim = False
def tokenize(self):
"""
Return a list of tokens from a given template_string.
"""
in_tag = False
result = []
for bit in tag_re.split(self.template_string):
if bit:
result.append(self.create_token(bit, in_tag))
in_tag = not in_tag
return result
def create_token(self, token_string, in_tag):
"""
Convert the given token string into a new Token object and return it.
If in_tag is True, we are processing something that matched a tag,
otherwise it should be treated as a literal string.
"""
if in_tag and token_string.startswith(BLOCK_TAG_START):
# The [2:-2] ranges below strip off *_TAG_START and *_TAG_END.
# We could do len(BLOCK_TAG_START) to be more "correct", but we've
# hard-coded the 2s here for performance. And it's not like
# the TAG_START values are going to change anytime, anyway.
block_content = token_string[2:-2].strip()
if self.verbatim and block_content == self.verbatim:
self.verbatim = False
if in_tag and not self.verbatim:
if token_string.startswith(VARIABLE_TAG_START):
token = Token(TOKEN_VAR, token_string[2:-2].strip())
elif token_string.startswith(BLOCK_TAG_START):
if block_content[:9] in ('verbatim', 'verbatim '):
self.verbatim = 'end%s' % block_content
token = Token(TOKEN_BLOCK, block_content)
elif token_string.startswith(COMMENT_TAG_START):
content = ''
if token_string.find(TRANSLATOR_COMMENT_MARK):
content = token_string[2:-2].strip()
token = Token(TOKEN_COMMENT, content)
else:
token = Token(TOKEN_TEXT, token_string)
token.lineno = self.lineno
self.lineno += token_string.count('\n')
return token
class Parser(object):
def __init__(self, tokens):
self.tokens = tokens
self.tags = {}
self.filters = {}
for lib in builtins:
self.add_library(lib)
def parse(self, parse_until=None):
if parse_until is None:
parse_until = []
nodelist = self.create_nodelist()
while self.tokens:
token = self.next_token()
# Use the raw values here for TOKEN_* for a tiny performance boost.
if token.token_type == 0: # TOKEN_TEXT
self.extend_nodelist(nodelist, TextNode(token.contents), token)
elif token.token_type == 1: # TOKEN_VAR
if not token.contents:
self.empty_variable(token)
try:
filter_expression = self.compile_filter(token.contents)
except TemplateSyntaxError as e:
if not self.compile_filter_error(token, e):
raise
var_node = self.create_variable_node(filter_expression)
self.extend_nodelist(nodelist, var_node, token)
elif token.token_type == 2: # TOKEN_BLOCK
try:
command = token.contents.split()[0]
except IndexError:
self.empty_block_tag(token)
if command in parse_until:
# put token back on token list so calling
# code knows why it terminated
self.prepend_token(token)
return nodelist
# execute callback function for this tag and append
# resulting node
self.enter_command(command, token)
try:
compile_func = self.tags[command]
except KeyError:
self.invalid_block_tag(token, command, parse_until)
try:
compiled_result = compile_func(self, token)
except TemplateSyntaxError as e:
if not self.compile_function_error(token, e):
raise
self.extend_nodelist(nodelist, compiled_result, token)
self.exit_command()
if parse_until:
self.unclosed_block_tag(parse_until)
return nodelist
def skip_past(self, endtag):
while self.tokens:
token = self.next_token()
if token.token_type == TOKEN_BLOCK and token.contents == endtag:
return
self.unclosed_block_tag([endtag])
def create_variable_node(self, filter_expression):
return VariableNode(filter_expression)
def create_nodelist(self):
return NodeList()
def extend_nodelist(self, nodelist, node, token):
if node.must_be_first and nodelist:
try:
if nodelist.contains_nontext:
raise AttributeError
except AttributeError:
raise TemplateSyntaxError("%r must be the first tag "
"in the template." % node)
if isinstance(nodelist, NodeList) and not isinstance(node, TextNode):
nodelist.contains_nontext = True
nodelist.append(node)
def enter_command(self, command, token):
pass
def exit_command(self):
pass
def error(self, token, msg):
return TemplateSyntaxError(msg)
def empty_variable(self, token):
raise self.error(token, "Empty variable tag")
def empty_block_tag(self, token):
raise self.error(token, "Empty block tag")
def invalid_block_tag(self, token, command, parse_until=None):
if parse_until:
raise self.error(token, "Invalid block tag: '%s', expected %s" %
(command, get_text_list(["'%s'" % p for p in parse_until])))
raise self.error(token, "Invalid block tag: '%s'" % command)
def unclosed_block_tag(self, parse_until):
raise self.error(None, "Unclosed tags: %s " % ', '.join(parse_until))
def compile_filter_error(self, token, e):
pass
def compile_function_error(self, token, e):
pass
def next_token(self):
return self.tokens.pop(0)
def prepend_token(self, token):
self.tokens.insert(0, token)
def delete_first_token(self):
del self.tokens[0]
def add_library(self, lib):
self.tags.update(lib.tags)
self.filters.update(lib.filters)
def compile_filter(self, token):
"""
Convenient wrapper for FilterExpression
"""
return FilterExpression(token, self)
def find_filter(self, filter_name):
if filter_name in self.filters:
return self.filters[filter_name]
else:
raise TemplateSyntaxError("Invalid filter: '%s'" % filter_name)
class TokenParser(object):
"""
Subclass this and implement the top() method to parse a template line.
When instantiating the parser, pass in the line from the Django template
parser.
The parser's "tagname" instance-variable stores the name of the tag that
the filter was called with.
"""
def __init__(self, subject):
self.subject = subject
self.pointer = 0
self.backout = []
self.tagname = self.tag()
def top(self):
"""
Overload this method to do the actual parsing and return the result.
"""
raise NotImplementedError('subclasses of Tokenparser must provide a top() method')
def more(self):
"""
Returns True if there is more stuff in the tag.
"""
return self.pointer < len(self.subject)
def back(self):
"""
Undoes the last microparser. Use this for lookahead and backtracking.
"""
if not len(self.backout):
raise TemplateSyntaxError("back called without some previous "
"parsing")
self.pointer = self.backout.pop()
def tag(self):
"""
A microparser that just returns the next tag from the line.
"""
subject = self.subject
i = self.pointer
if i >= len(subject):
raise TemplateSyntaxError("expected another tag, found "
"end of string: %s" % subject)
p = i
while i < len(subject) and subject[i] not in (' ', '\t'):
i += 1
s = subject[p:i]
while i < len(subject) and subject[i] in (' ', '\t'):
i += 1
self.backout.append(self.pointer)
self.pointer = i
return s
def value(self):
"""
A microparser that parses for a value: some string constant or
variable name.
"""
subject = self.subject
i = self.pointer
def next_space_index(subject, i):
"""
Increment pointer until a real space (i.e. a space not within
quotes) is encountered
"""
while i < len(subject) and subject[i] not in (' ', '\t'):
if subject[i] in ('"', "'"):
c = subject[i]
i += 1
while i < len(subject) and subject[i] != c:
i += 1
if i >= len(subject):
raise TemplateSyntaxError("Searching for value. "
"Unexpected end of string in column %d: %s" %
(i, subject))
i += 1
return i
if i >= len(subject):
raise TemplateSyntaxError("Searching for value. Expected another "
"value but found end of string: %s" %
subject)
if subject[i] in ('"', "'"):
p = i
i += 1
while i < len(subject) and subject[i] != subject[p]:
i += 1
if i >= len(subject):
raise TemplateSyntaxError("Searching for value. Unexpected "
"end of string in column %d: %s" %
(i, subject))
i += 1
# Continue parsing until next "real" space,
# so that filters are also included
i = next_space_index(subject, i)
res = subject[p:i]
while i < len(subject) and subject[i] in (' ', '\t'):
i += 1
self.backout.append(self.pointer)
self.pointer = i
return res
else:
p = i
i = next_space_index(subject, i)
s = subject[p:i]
while i < len(subject) and subject[i] in (' ', '\t'):
i += 1
self.backout.append(self.pointer)
self.pointer = i
return s
# This only matches constant *strings* (things in quotes or marked for
# translation). Numbers are treated as variables for implementation reasons
# (so that they retain their type when passed to filters).
constant_string = r"""
(?:%(i18n_open)s%(strdq)s%(i18n_close)s|
%(i18n_open)s%(strsq)s%(i18n_close)s|
%(strdq)s|
%(strsq)s)
""" % {
'strdq': r'"[^"\\]*(?:\\.[^"\\]*)*"', # double-quoted string
'strsq': r"'[^'\\]*(?:\\.[^'\\]*)*'", # single-quoted string
'i18n_open': re.escape("_("),
'i18n_close': re.escape(")"),
}
constant_string = constant_string.replace("\n", "")
filter_raw_string = r"""
^(?P<constant>%(constant)s)|
^(?P<var>[%(var_chars)s]+|%(num)s)|
(?:\s*%(filter_sep)s\s*
(?P<filter_name>\w+)
(?:%(arg_sep)s
(?:
(?P<constant_arg>%(constant)s)|
(?P<var_arg>[%(var_chars)s]+|%(num)s)
)
)?
)""" % {
'constant': constant_string,
'num': r'[-+\.]?\d[\d\.e]*',
'var_chars': "\w\.",
'filter_sep': re.escape(FILTER_SEPARATOR),
'arg_sep': re.escape(FILTER_ARGUMENT_SEPARATOR),
}
filter_re = re.compile(filter_raw_string, re.UNICODE | re.VERBOSE)
class FilterExpression(object):
"""
Parses a variable token and its optional filters (all as a single string),
and return a list of tuples of the filter name and arguments.
Sample::
>>> token = 'variable|default:"Default value"|date:"Y-m-d"'
>>> p = Parser('')
>>> fe = FilterExpression(token, p)
>>> len(fe.filters)
2
>>> fe.var
<Variable: 'variable'>
"""
def __init__(self, token, parser):
self.token = token
matches = filter_re.finditer(token)
var_obj = None
filters = []
upto = 0
for match in matches:
start = match.start()
if upto != start:
raise TemplateSyntaxError("Could not parse some characters: "
"%s|%s|%s" %
(token[:upto], token[upto:start],
token[start:]))
if var_obj is None:
var, constant = match.group("var", "constant")
if constant:
try:
var_obj = Variable(constant).resolve({})
except VariableDoesNotExist:
var_obj = None
elif var is None:
raise TemplateSyntaxError("Could not find variable at "
"start of %s." % token)
else:
var_obj = Variable(var)
else:
filter_name = match.group("filter_name")
args = []
constant_arg, var_arg = match.group("constant_arg", "var_arg")
if constant_arg:
args.append((False, Variable(constant_arg).resolve({})))
elif var_arg:
args.append((True, Variable(var_arg)))
filter_func = parser.find_filter(filter_name)
self.args_check(filter_name, filter_func, args)
filters.append((filter_func, args))
upto = match.end()
if upto != len(token):
raise TemplateSyntaxError("Could not parse the remainder: '%s' "
"from '%s'" % (token[upto:], token))
self.filters = filters
self.var = var_obj
def resolve(self, context, ignore_failures=False):
if isinstance(self.var, Variable):
try:
obj = self.var.resolve(context)
except VariableDoesNotExist:
if ignore_failures:
obj = None
else:
string_if_invalid = context.template.engine.string_if_invalid
if string_if_invalid:
if '%s' in string_if_invalid:
return string_if_invalid % self.var
else:
return string_if_invalid
else:
obj = string_if_invalid
else:
obj = self.var
for func, args in self.filters:
arg_vals = []
for lookup, arg in args:
if not lookup:
arg_vals.append(mark_safe(arg))
else:
arg_vals.append(arg.resolve(context))
if getattr(func, 'expects_localtime', False):
obj = template_localtime(obj, context.use_tz)
if getattr(func, 'needs_autoescape', False):
new_obj = func(obj, autoescape=context.autoescape, *arg_vals)
else:
new_obj = func(obj, *arg_vals)
if getattr(func, 'is_safe', False) and isinstance(obj, SafeData):
obj = mark_safe(new_obj)
elif isinstance(obj, EscapeData):
obj = mark_for_escaping(new_obj)
else:
obj = new_obj
return obj
def args_check(name, func, provided):
provided = list(provided)
# First argument, filter input, is implied.
plen = len(provided) + 1
# Check to see if a decorator is providing the real function.
func = getattr(func, '_decorated_function', func)
args, varargs, varkw, defaults = getargspec(func)
alen = len(args)
dlen = len(defaults or [])
# Not enough OR Too many
if plen < (alen - dlen) or plen > alen:
raise TemplateSyntaxError("%s requires %d arguments, %d provided" %
(name, alen - dlen, plen))
return True
args_check = staticmethod(args_check)
def __str__(self):
return self.token
def resolve_variable(path, context):
"""
Returns the resolved variable, which may contain attribute syntax, within
the given context.
Deprecated; use the Variable class instead.
"""
warnings.warn("resolve_variable() is deprecated. Use django.template."
"Variable(path).resolve(context) instead",
RemovedInDjango110Warning, stacklevel=2)
return Variable(path).resolve(context)
class Variable(object):
"""
A template variable, resolvable against a given context. The variable may
be a hard-coded string (if it begins and ends with single or double quote
marks)::
>>> c = {'article': {'section':u'News'}}
>>> Variable('article.section').resolve(c)
u'News'
>>> Variable('article').resolve(c)
{'section': u'News'}
>>> class AClass: pass
>>> c = AClass()
>>> c.article = AClass()
>>> c.article.section = u'News'
(The example assumes VARIABLE_ATTRIBUTE_SEPARATOR is '.')
"""
def __init__(self, var):
self.var = var
self.literal = None
self.lookups = None
self.translate = False
self.message_context = None
if not isinstance(var, six.string_types):
raise TypeError(
"Variable must be a string or number, got %s" % type(var))
try:
# First try to treat this variable as a number.
#
# Note that this could cause an OverflowError here that we're not
# catching. Since this should only happen at compile time, that's
# probably OK.
self.literal = float(var)
# So it's a float... is it an int? If the original value contained a
# dot or an "e" then it was a float, not an int.
if '.' not in var and 'e' not in var.lower():
self.literal = int(self.literal)
# "2." is invalid
if var.endswith('.'):
raise ValueError
except ValueError:
# A ValueError means that the variable isn't a number.
if var.startswith('_(') and var.endswith(')'):
# The result of the lookup should be translated at rendering
# time.
self.translate = True
var = var[2:-1]
# If it's wrapped with quotes (single or double), then
# we're also dealing with a literal.
try:
self.literal = mark_safe(unescape_string_literal(var))
except ValueError:
# Otherwise we'll set self.lookups so that resolve() knows we're
# dealing with a bonafide variable
if var.find(VARIABLE_ATTRIBUTE_SEPARATOR + '_') > -1 or var[0] == '_':
raise TemplateSyntaxError("Variables and attributes may "
"not begin with underscores: '%s'" %
var)
self.lookups = tuple(var.split(VARIABLE_ATTRIBUTE_SEPARATOR))
def resolve(self, context):
"""Resolve this variable against a given context."""
if self.lookups is not None:
# We're dealing with a variable that needs to be resolved
value = self._resolve_lookup(context)
else:
# We're dealing with a literal, so it's already been "resolved"
value = self.literal
if self.translate:
if self.message_context:
return pgettext_lazy(self.message_context, value)
else:
return ugettext_lazy(value)
return value
def __repr__(self):
return "<%s: %r>" % (self.__class__.__name__, self.var)
def __str__(self):
return self.var
def _resolve_lookup(self, context):
"""
Performs resolution of a real variable (i.e. not a literal) against the
given context.
As indicated by the method's name, this method is an implementation
detail and shouldn't be called by external code. Use Variable.resolve()
instead.
"""
current = context
try: # catch-all for silent variable failures
for bit in self.lookups:
try: # dictionary lookup
current = current[bit]
# ValueError/IndexError are for numpy.array lookup on
# numpy < 1.9 and 1.9+ respectively
except (TypeError, AttributeError, KeyError, ValueError, IndexError):
try: # attribute lookup
# Don't return class attributes if the class is the context:
if isinstance(current, BaseContext) and getattr(type(current), bit):
raise AttributeError
current = getattr(current, bit)
except (TypeError, AttributeError) as e:
# Reraise an AttributeError raised by a @property
if (isinstance(e, AttributeError) and
not isinstance(current, BaseContext) and bit in dir(current)):
raise
try: # list-index lookup
current = current[int(bit)]
except (IndexError, # list index out of range
ValueError, # invalid literal for int()
KeyError, # current is a dict without `int(bit)` key
TypeError): # unsubscriptable object
raise VariableDoesNotExist("Failed lookup for key "
"[%s] in %r",
(bit, current)) # missing attribute
if callable(current):
if getattr(current, 'do_not_call_in_templates', False):
pass
elif getattr(current, 'alters_data', False):
current = context.template.engine.string_if_invalid
else:
try: # method call (assuming no args required)
current = current()
except TypeError:
try:
getcallargs(current)
except TypeError: # arguments *were* required
current = context.template.engine.string_if_invalid # invalid method call
else:
raise
except Exception as e:
if getattr(e, 'silent_variable_failure', False):
current = context.template.engine.string_if_invalid
else:
raise
return current
class Node(object):
# Set this to True for nodes that must be first in the template (although
# they can be preceded by text nodes.
must_be_first = False
child_nodelists = ('nodelist',)
def render(self, context):
"""
Return the node rendered as a string.
"""
pass
def __iter__(self):
yield self
def get_nodes_by_type(self, nodetype):
"""
Return a list of all nodes (within this node and its nodelist)
of the given type
"""
nodes = []
if isinstance(self, nodetype):
nodes.append(self)
for attr in self.child_nodelists:
nodelist = getattr(self, attr, None)
if nodelist:
nodes.extend(nodelist.get_nodes_by_type(nodetype))
return nodes
class NodeList(list):
# Set to True the first time a non-TextNode is inserted by
# extend_nodelist().
contains_nontext = False
def render(self, context):
bits = []
for node in self:
if isinstance(node, Node):
bit = self.render_node(node, context)
else:
bit = node
bits.append(force_text(bit))
return mark_safe(''.join(bits))
def get_nodes_by_type(self, nodetype):
"Return a list of all nodes of the given type"
nodes = []
for node in self:
nodes.extend(node.get_nodes_by_type(nodetype))
return nodes
def render_node(self, node, context):
return node.render(context)
class TextNode(Node):
def __init__(self, s):
self.s = s
def __repr__(self):
return force_str("<Text Node: '%s'>" % self.s[:25], 'ascii',
errors='replace')
def render(self, context):
return self.s
def render_value_in_context(value, context):
"""
Converts any value to a string to become part of a rendered template. This
means escaping, if required, and conversion to a unicode object. If value
is a string, it is expected to have already been translated.
"""
value = template_localtime(value, use_tz=context.use_tz)
value = localize(value, use_l10n=context.use_l10n)
value = force_text(value)
if ((context.autoescape and not isinstance(value, SafeData)) or
isinstance(value, EscapeData)):
return conditional_escape(value)
else:
return value
class VariableNode(Node):
def __init__(self, filter_expression):
self.filter_expression = filter_expression
def __repr__(self):
return "<Variable Node: %s>" % self.filter_expression
def render(self, context):
try:
output = self.filter_expression.resolve(context)
except UnicodeDecodeError:
# Unicode conversion can fail sometimes for reasons out of our
# control (e.g. exception rendering). In that case, we fail
# quietly.
return ''
return render_value_in_context(output, context)
# Regex for token keyword arguments
kwarg_re = re.compile(r"(?:(\w+)=)?(.+)")
def token_kwargs(bits, parser, support_legacy=False):
"""
A utility method for parsing token keyword arguments.
:param bits: A list containing remainder of the token (split by spaces)
that is to be checked for arguments. Valid arguments will be removed
from this list.
:param support_legacy: If set to true ``True``, the legacy format
``1 as foo`` will be accepted. Otherwise, only the standard ``foo=1``
format is allowed.
:returns: A dictionary of the arguments retrieved from the ``bits`` token
list.
There is no requirement for all remaining token ``bits`` to be keyword
arguments, so the dictionary will be returned as soon as an invalid
argument format is reached.
"""
if not bits:
return {}
match = kwarg_re.match(bits[0])
kwarg_format = match and match.group(1)
if not kwarg_format:
if not support_legacy:
return {}
if len(bits) < 3 or bits[1] != 'as':
return {}
kwargs = {}
while bits:
if kwarg_format:
match = kwarg_re.match(bits[0])
if not match or not match.group(1):
return kwargs
key, value = match.groups()
del bits[:1]
else:
if len(bits) < 3 or bits[1] != 'as':
return kwargs
key, value = bits[2], bits[0]
del bits[:3]
kwargs[key] = parser.compile_filter(value)
if bits and not kwarg_format:
if bits[0] != 'and':
return kwargs
del bits[:1]
return kwargs
def parse_bits(parser, bits, params, varargs, varkw, defaults,
takes_context, name):
"""
Parses bits for template tag helpers (simple_tag, include_tag and
assignment_tag), in particular by detecting syntax errors and by
extracting positional and keyword arguments.
"""
if takes_context:
if params[0] == 'context':
params = params[1:]
else:
raise TemplateSyntaxError(
"'%s' is decorated with takes_context=True so it must "
"have a first argument of 'context'" % name)
args = []
kwargs = {}
unhandled_params = list(params)
for bit in bits:
# First we try to extract a potential kwarg from the bit
kwarg = token_kwargs([bit], parser)
if kwarg:
# The kwarg was successfully extracted
param, value = list(six.iteritems(kwarg))[0]
if param not in params and varkw is None:
# An unexpected keyword argument was supplied
raise TemplateSyntaxError(
"'%s' received unexpected keyword argument '%s'" %
(name, param))
elif param in kwargs:
# The keyword argument has already been supplied once
raise TemplateSyntaxError(
"'%s' received multiple values for keyword argument '%s'" %
(name, param))
else:
# All good, record the keyword argument
kwargs[str(param)] = value
if param in unhandled_params:
# If using the keyword syntax for a positional arg, then
# consume it.
unhandled_params.remove(param)
else:
if kwargs:
raise TemplateSyntaxError(
"'%s' received some positional argument(s) after some "
"keyword argument(s)" % name)
else:
# Record the positional argument
args.append(parser.compile_filter(bit))
try:
# Consume from the list of expected positional arguments
unhandled_params.pop(0)
except IndexError:
if varargs is None:
raise TemplateSyntaxError(
"'%s' received too many positional arguments" %
name)
if defaults is not None:
# Consider the last n params handled, where n is the
# number of defaults.
unhandled_params = unhandled_params[:-len(defaults)]
if unhandled_params:
# Some positional arguments were not supplied
raise TemplateSyntaxError(
"'%s' did not receive value(s) for the argument(s): %s" %
(name, ", ".join("'%s'" % p for p in unhandled_params)))
return args, kwargs
def generic_tag_compiler(parser, token, params, varargs, varkw, defaults,
name, takes_context, node_class):
"""
Returns a template.Node subclass.
"""
bits = token.split_contents()[1:]
args, kwargs = parse_bits(parser, bits, params, varargs, varkw,
defaults, takes_context, name)
return node_class(takes_context, args, kwargs)
class TagHelperNode(Node):
"""
Base class for tag helper nodes such as SimpleNode, InclusionNode and
AssignmentNode. Manages the positional and keyword arguments to be passed
to the decorated function.
"""
def __init__(self, takes_context, args, kwargs):
self.takes_context = takes_context
self.args = args
self.kwargs = kwargs
def get_resolved_arguments(self, context):
resolved_args = [var.resolve(context) for var in self.args]
if self.takes_context:
resolved_args = [context] + resolved_args
resolved_kwargs = {k: v.resolve(context) for k, v in self.kwargs.items()}
return resolved_args, resolved_kwargs
class Library(object):
def __init__(self):
self.filters = {}
self.tags = {}
def tag(self, name=None, compile_function=None):
if name is None and compile_function is None:
# @register.tag()
return self.tag_function
elif name is not None and compile_function is None:
if callable(name):
# @register.tag
return self.tag_function(name)
else:
# @register.tag('somename') or @register.tag(name='somename')
def dec(func):
return self.tag(name, func)
return dec
elif name is not None and compile_function is not None:
# register.tag('somename', somefunc)
self.tags[name] = compile_function
return compile_function
else:
raise InvalidTemplateLibrary("Unsupported arguments to "
"Library.tag: (%r, %r)", (name, compile_function))
def tag_function(self, func):
self.tags[getattr(func, "_decorated_function", func).__name__] = func
return func
def filter(self, name=None, filter_func=None, **flags):
if name is None and filter_func is None:
# @register.filter()
def dec(func):
return self.filter_function(func, **flags)
return dec
elif name is not None and filter_func is None:
if callable(name):
# @register.filter
return self.filter_function(name, **flags)
else:
# @register.filter('somename') or @register.filter(name='somename')
def dec(func):
return self.filter(name, func, **flags)
return dec
elif name is not None and filter_func is not None:
# register.filter('somename', somefunc)
self.filters[name] = filter_func
for attr in ('expects_localtime', 'is_safe', 'needs_autoescape'):
if attr in flags:
value = flags[attr]
# set the flag on the filter for FilterExpression.resolve
setattr(filter_func, attr, value)
# set the flag on the innermost decorated function
# for decorators that need it e.g. stringfilter
if hasattr(filter_func, "_decorated_function"):
setattr(filter_func._decorated_function, attr, value)
filter_func._filter_name = name
return filter_func
else:
raise InvalidTemplateLibrary("Unsupported arguments to "
"Library.filter: (%r, %r)", (name, filter_func))
def filter_function(self, func, **flags):
name = getattr(func, "_decorated_function", func).__name__
return self.filter(name, func, **flags)
def simple_tag(self, func=None, takes_context=None, name=None):
def dec(func):
params, varargs, varkw, defaults = getargspec(func)
class SimpleNode(TagHelperNode):
def render(self, context):
resolved_args, resolved_kwargs = self.get_resolved_arguments(context)
return func(*resolved_args, **resolved_kwargs)
function_name = (name or
getattr(func, '_decorated_function', func).__name__)
compile_func = partial(generic_tag_compiler,
params=params, varargs=varargs, varkw=varkw,
defaults=defaults, name=function_name,
takes_context=takes_context, node_class=SimpleNode)
compile_func.__doc__ = func.__doc__
self.tag(function_name, compile_func)
return func
if func is None:
# @register.simple_tag(...)
return dec
elif callable(func):
# @register.simple_tag
return dec(func)
else:
raise TemplateSyntaxError("Invalid arguments provided to simple_tag")
def assignment_tag(self, func=None, takes_context=None, name=None):
def dec(func):
params, varargs, varkw, defaults = getargspec(func)
class AssignmentNode(TagHelperNode):
def __init__(self, takes_context, args, kwargs, target_var):
super(AssignmentNode, self).__init__(takes_context, args, kwargs)
self.target_var = target_var
def render(self, context):
resolved_args, resolved_kwargs = self.get_resolved_arguments(context)
context[self.target_var] = func(*resolved_args, **resolved_kwargs)
return ''
function_name = (name or
getattr(func, '_decorated_function', func).__name__)
def compile_func(parser, token):
bits = token.split_contents()[1:]
if len(bits) < 2 or bits[-2] != 'as':
raise TemplateSyntaxError(
"'%s' tag takes at least 2 arguments and the "
"second last argument must be 'as'" % function_name)
target_var = bits[-1]
bits = bits[:-2]
args, kwargs = parse_bits(parser, bits, params,
varargs, varkw, defaults, takes_context, function_name)
return AssignmentNode(takes_context, args, kwargs, target_var)
compile_func.__doc__ = func.__doc__
self.tag(function_name, compile_func)
return func
if func is None:
# @register.assignment_tag(...)
return dec
elif callable(func):
# @register.assignment_tag
return dec(func)
else:
raise TemplateSyntaxError("Invalid arguments provided to assignment_tag")
def inclusion_tag(self, file_name, takes_context=False, name=None):
def dec(func):
params, varargs, varkw, defaults = getargspec(func)
class InclusionNode(TagHelperNode):
def render(self, context):
"""
Renders the specified template and context. Caches the
template object in render_context to avoid reparsing and
loading when used in a for loop.
"""
resolved_args, resolved_kwargs = self.get_resolved_arguments(context)
_dict = func(*resolved_args, **resolved_kwargs)
t = context.render_context.get(self)
if t is None:
if isinstance(file_name, Template):
t = file_name
elif isinstance(getattr(file_name, 'template', None), Template):
t = file_name.template
elif not isinstance(file_name, six.string_types) and is_iterable(file_name):
t = context.template.engine.select_template(file_name)
else:
t = context.template.engine.get_template(file_name)
context.render_context[self] = t
new_context = context.new(_dict)
# Copy across the CSRF token, if present, because
# inclusion tags are often used for forms, and we need
# instructions for using CSRF protection to be as simple
# as possible.
csrf_token = context.get('csrf_token', None)
if csrf_token is not None:
new_context['csrf_token'] = csrf_token
return t.render(new_context)
function_name = (name or
getattr(func, '_decorated_function', func).__name__)
compile_func = partial(generic_tag_compiler,
params=params, varargs=varargs, varkw=varkw,
defaults=defaults, name=function_name,
takes_context=takes_context, node_class=InclusionNode)
compile_func.__doc__ = func.__doc__
self.tag(function_name, compile_func)
return func
return dec
def is_library_missing(name):
"""Check if library that failed to load cannot be found under any
templatetags directory or does exist but fails to import.
Non-existing condition is checked recursively for each subpackage in cases
like <appdir>/templatetags/subpackage/package/module.py.
"""
# Don't bother to check if '.' is in name since any name will be prefixed
# with some template root.
path, module = name.rsplit('.', 1)
try:
package = import_module(path)
return not module_has_submodule(package, module)
except ImportError:
return is_library_missing(path)
def import_library(taglib_module):
"""
Load a template tag library module.
Verifies that the library contains a 'register' attribute, and
returns that attribute as the representation of the library
"""
try:
mod = import_module(taglib_module)
except ImportError as e:
# If the ImportError is because the taglib submodule does not exist,
# that's not an error that should be raised. If the submodule exists
# and raised an ImportError on the attempt to load it, that we want
# to raise.
if is_library_missing(taglib_module):
return None
else:
raise InvalidTemplateLibrary("ImportError raised loading %s: %s" %
(taglib_module, e))
try:
return mod.register
except AttributeError:
raise InvalidTemplateLibrary("Template library %s does not have "
"a variable named 'register'" %
taglib_module)
@lru_cache.lru_cache()
def get_templatetags_modules():
"""
Return the list of all available template tag modules.
Caches the result for faster access.
"""
templatetags_modules_candidates = ['django.templatetags']
templatetags_modules_candidates.extend(
'%s.templatetags' % app_config.name
for app_config in apps.get_app_configs())
templatetags_modules = []
for templatetag_module in templatetags_modules_candidates:
try:
import_module(templatetag_module)
except ImportError:
continue
else:
templatetags_modules.append(templatetag_module)
return templatetags_modules
def get_library(library_name):
"""
Load the template library module with the given name.
If library is not already loaded loop over all templatetags modules
to locate it.
{% load somelib %} and {% load someotherlib %} loops twice.
Subsequent loads eg. {% load somelib %} in the same process will grab
the cached module from libraries.
"""
lib = libraries.get(library_name, None)
if not lib:
templatetags_modules = get_templatetags_modules()
tried_modules = []
for module in templatetags_modules:
taglib_module = '%s.%s' % (module, library_name)
tried_modules.append(taglib_module)
lib = import_library(taglib_module)
if lib:
libraries[library_name] = lib
break
if not lib:
raise InvalidTemplateLibrary("Template library %s not found, "
"tried %s" %
(library_name,
','.join(tried_modules)))
return lib
def add_to_builtins(module):
builtins.append(import_library(module))
add_to_builtins('django.template.defaulttags')
add_to_builtins('django.template.defaultfilters')
add_to_builtins('django.template.loader_tags')
|
mit
|
aequitas/home-assistant
|
homeassistant/components/netatmo/climate.py
|
4
|
15586
|
"""Support for Netatmo Smart thermostats."""
import logging
from datetime import timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.climate import ClimateDevice, PLATFORM_SCHEMA
from homeassistant.components.climate.const import (
STATE_HEAT, SUPPORT_ON_OFF, SUPPORT_TARGET_TEMPERATURE,
SUPPORT_OPERATION_MODE, SUPPORT_AWAY_MODE, STATE_MANUAL, STATE_AUTO,
STATE_ECO, STATE_COOL)
from homeassistant.const import (
STATE_OFF, TEMP_CELSIUS, ATTR_TEMPERATURE, CONF_NAME)
from homeassistant.util import Throttle
from .const import DATA_NETATMO_AUTH
_LOGGER = logging.getLogger(__name__)
CONF_HOMES = 'homes'
CONF_ROOMS = 'rooms'
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=10)
HOME_CONFIG_SCHEMA = vol.Schema({
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_ROOMS, default=[]): vol.All(cv.ensure_list, [cv.string])
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_HOMES): vol.All(cv.ensure_list, [HOME_CONFIG_SCHEMA])
})
STATE_NETATMO_SCHEDULE = 'schedule'
STATE_NETATMO_HG = 'hg'
STATE_NETATMO_MAX = 'max'
STATE_NETATMO_AWAY = 'away'
STATE_NETATMO_OFF = STATE_OFF
STATE_NETATMO_MANUAL = STATE_MANUAL
DICT_NETATMO_TO_HA = {
STATE_NETATMO_SCHEDULE: STATE_AUTO,
STATE_NETATMO_HG: STATE_COOL,
STATE_NETATMO_MAX: STATE_HEAT,
STATE_NETATMO_AWAY: STATE_ECO,
STATE_NETATMO_OFF: STATE_OFF,
STATE_NETATMO_MANUAL: STATE_MANUAL
}
DICT_HA_TO_NETATMO = {
STATE_AUTO: STATE_NETATMO_SCHEDULE,
STATE_COOL: STATE_NETATMO_HG,
STATE_HEAT: STATE_NETATMO_MAX,
STATE_ECO: STATE_NETATMO_AWAY,
STATE_OFF: STATE_NETATMO_OFF,
STATE_MANUAL: STATE_NETATMO_MANUAL
}
SUPPORT_FLAGS = (SUPPORT_TARGET_TEMPERATURE | SUPPORT_OPERATION_MODE |
SUPPORT_AWAY_MODE)
NA_THERM = 'NATherm1'
NA_VALVE = 'NRV'
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the NetAtmo Thermostat."""
import pyatmo
homes_conf = config.get(CONF_HOMES)
auth = hass.data[DATA_NETATMO_AUTH]
try:
home_data = HomeData(auth)
except pyatmo.NoDevice:
return
homes = []
rooms = {}
if homes_conf is not None:
for home_conf in homes_conf:
home = home_conf[CONF_NAME]
if home_conf[CONF_ROOMS] != []:
rooms[home] = home_conf[CONF_ROOMS]
homes.append(home)
else:
homes = home_data.get_home_names()
devices = []
for home in homes:
_LOGGER.debug("Setting up %s ...", home)
try:
room_data = ThermostatData(auth, home)
except pyatmo.NoDevice:
continue
for room_id in room_data.get_room_ids():
room_name = room_data.homedata.rooms[home][room_id]['name']
_LOGGER.debug("Setting up %s (%s) ...", room_name, room_id)
if home in rooms and room_name not in rooms[home]:
_LOGGER.debug("Excluding %s ...", room_name)
continue
_LOGGER.debug("Adding devices for room %s (%s) ...",
room_name, room_id)
devices.append(NetatmoThermostat(room_data, room_id))
add_entities(devices, True)
class NetatmoThermostat(ClimateDevice):
"""Representation a Netatmo thermostat."""
def __init__(self, data, room_id):
"""Initialize the sensor."""
self._data = data
self._state = None
self._room_id = room_id
room_name = self._data.homedata.rooms[self._data.home][room_id]['name']
self._name = 'netatmo_{}'.format(room_name)
self._target_temperature = None
self._away = None
self._module_type = self._data.room_status[room_id]['module_type']
if self._module_type == NA_VALVE:
self._operation_list = [DICT_NETATMO_TO_HA[STATE_NETATMO_SCHEDULE],
DICT_NETATMO_TO_HA[STATE_NETATMO_MANUAL],
DICT_NETATMO_TO_HA[STATE_NETATMO_AWAY],
DICT_NETATMO_TO_HA[STATE_NETATMO_HG]]
self._support_flags = SUPPORT_FLAGS
elif self._module_type == NA_THERM:
self._operation_list = [DICT_NETATMO_TO_HA[STATE_NETATMO_SCHEDULE],
DICT_NETATMO_TO_HA[STATE_NETATMO_MANUAL],
DICT_NETATMO_TO_HA[STATE_NETATMO_AWAY],
DICT_NETATMO_TO_HA[STATE_NETATMO_HG],
DICT_NETATMO_TO_HA[STATE_NETATMO_MAX],
DICT_NETATMO_TO_HA[STATE_NETATMO_OFF]]
self._support_flags = SUPPORT_FLAGS | SUPPORT_ON_OFF
self._operation_mode = None
self.update_without_throttle = False
@property
def supported_features(self):
"""Return the list of supported features."""
return self._support_flags
@property
def name(self):
"""Return the name of the thermostat."""
return self._name
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def current_temperature(self):
"""Return the current temperature."""
return self._data.room_status[self._room_id]['current_temperature']
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._data.room_status[self._room_id]['target_temperature']
@property
def current_operation(self):
"""Return the current state of the thermostat."""
return self._operation_mode
@property
def operation_list(self):
"""Return the operation modes list."""
return self._operation_list
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
module_type = self._data.room_status[self._room_id]['module_type']
if module_type not in (NA_THERM, NA_VALVE):
return {}
state_attributes = {
"home_id": self._data.homedata.gethomeId(self._data.home),
"room_id": self._room_id,
"setpoint_default_duration": self._data.setpoint_duration,
"away_temperature": self._data.away_temperature,
"hg_temperature": self._data.hg_temperature,
"operation_mode": self._operation_mode,
"module_type": module_type,
"module_id": self._data.room_status[self._room_id]['module_id']
}
if module_type == NA_THERM:
state_attributes["boiler_status"] = self._data.boilerstatus
elif module_type == NA_VALVE:
state_attributes["heating_power_request"] = \
self._data.room_status[self._room_id]['heating_power_request']
return state_attributes
@property
def is_away_mode_on(self):
"""Return true if away mode is on."""
return self._away
@property
def is_on(self):
"""Return true if on."""
return self.target_temperature > 0
def turn_away_mode_on(self):
"""Turn away on."""
self.set_operation_mode(DICT_NETATMO_TO_HA[STATE_NETATMO_AWAY])
def turn_away_mode_off(self):
"""Turn away off."""
self.set_operation_mode(DICT_NETATMO_TO_HA[STATE_NETATMO_SCHEDULE])
def turn_off(self):
"""Turn Netatmo off."""
_LOGGER.debug("Switching off ...")
self.set_operation_mode(DICT_NETATMO_TO_HA[STATE_NETATMO_OFF])
self.update_without_throttle = True
self.schedule_update_ha_state()
def turn_on(self):
"""Turn Netatmo on."""
_LOGGER.debug("Switching on ...")
_LOGGER.debug("Setting temperature first to %d ...",
self._data.hg_temperature)
self._data.homestatus.setroomThermpoint(
self._data.homedata.gethomeId(self._data.home),
self._room_id, STATE_NETATMO_MANUAL, self._data.hg_temperature)
_LOGGER.debug("Setting operation mode to schedule ...")
self._data.homestatus.setThermmode(
self._data.homedata.gethomeId(self._data.home),
STATE_NETATMO_SCHEDULE)
self.update_without_throttle = True
self.schedule_update_ha_state()
def set_operation_mode(self, operation_mode):
"""Set HVAC mode (auto, auxHeatOnly, cool, heat, off)."""
if not self.is_on:
self.turn_on()
if operation_mode in [DICT_NETATMO_TO_HA[STATE_NETATMO_MAX],
DICT_NETATMO_TO_HA[STATE_NETATMO_OFF]]:
self._data.homestatus.setroomThermpoint(
self._data.homedata.gethomeId(self._data.home),
self._room_id, DICT_HA_TO_NETATMO[operation_mode])
elif operation_mode in [DICT_NETATMO_TO_HA[STATE_NETATMO_HG],
DICT_NETATMO_TO_HA[STATE_NETATMO_SCHEDULE],
DICT_NETATMO_TO_HA[STATE_NETATMO_AWAY]]:
self._data.homestatus.setThermmode(
self._data.homedata.gethomeId(self._data.home),
DICT_HA_TO_NETATMO[operation_mode])
self.update_without_throttle = True
self.schedule_update_ha_state()
def set_temperature(self, **kwargs):
"""Set new target temperature for 2 hours."""
temp = kwargs.get(ATTR_TEMPERATURE)
if temp is None:
return
mode = STATE_NETATMO_MANUAL
self._data.homestatus.setroomThermpoint(
self._data.homedata.gethomeId(self._data.home),
self._room_id, DICT_HA_TO_NETATMO[mode], temp)
self.update_without_throttle = True
self.schedule_update_ha_state()
def update(self):
"""Get the latest data from NetAtmo API and updates the states."""
try:
if self.update_without_throttle:
self._data.update(no_throttle=True)
self.update_without_throttle = False
else:
self._data.update()
except AttributeError:
_LOGGER.error("NetatmoThermostat::update() "
"got exception.")
return
self._target_temperature = \
self._data.room_status[self._room_id]['target_temperature']
self._operation_mode = DICT_NETATMO_TO_HA[
self._data.room_status[self._room_id]['setpoint_mode']]
self._away = self._operation_mode == DICT_NETATMO_TO_HA[
STATE_NETATMO_AWAY]
class HomeData:
"""Representation Netatmo homes."""
def __init__(self, auth, home=None):
"""Initialize the HomeData object."""
self.auth = auth
self.homedata = None
self.home_names = []
self.room_names = []
self.schedules = []
self.home = home
self.home_id = None
def get_home_names(self):
"""Get all the home names returned by NetAtmo API."""
self.setup()
if self.homedata is None:
return []
for home in self.homedata.homes:
if 'therm_schedules' in self.homedata.homes[home] and 'modules' \
in self.homedata.homes[home]:
self.home_names.append(self.homedata.homes[home]['name'])
return self.home_names
def setup(self):
"""Retrieve HomeData by NetAtmo API."""
import pyatmo
try:
self.homedata = pyatmo.HomeData(self.auth)
self.home_id = self.homedata.gethomeId(self.home)
except TypeError:
_LOGGER.error("Error when getting home data.")
except AttributeError:
_LOGGER.error("No default_home in HomeData.")
except pyatmo.NoDevice:
_LOGGER.debug("No thermostat devices available.")
class ThermostatData:
"""Get the latest data from Netatmo."""
def __init__(self, auth, home=None):
"""Initialize the data object."""
self.auth = auth
self.homedata = None
self.homestatus = None
self.room_ids = []
self.room_status = {}
self.schedules = []
self.home = home
self.away_temperature = None
self.hg_temperature = None
self.boilerstatus = None
self.setpoint_duration = None
self.home_id = None
def get_room_ids(self):
"""Return all module available on the API as a list."""
if not self.setup():
return []
for key in self.homestatus.rooms:
self.room_ids.append(key)
return self.room_ids
def setup(self):
"""Retrieve HomeData and HomeStatus by NetAtmo API."""
import pyatmo
try:
self.homedata = pyatmo.HomeData(self.auth)
self.homestatus = pyatmo.HomeStatus(self.auth, home=self.home)
self.home_id = self.homedata.gethomeId(self.home)
self.update()
except TypeError:
_LOGGER.error("ThermostatData::setup() got error.")
return False
return True
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Call the NetAtmo API to update the data."""
import pyatmo
try:
self.homestatus = pyatmo.HomeStatus(self.auth, home=self.home)
except TypeError:
_LOGGER.error("Error when getting homestatus.")
return
_LOGGER.debug("Following is the debugging output for homestatus:")
_LOGGER.debug(self.homestatus.rawData)
for key in self.homestatus.rooms:
roomstatus = {}
homestatus_room = self.homestatus.rooms[key]
homedata_room = self.homedata.rooms[self.home][key]
roomstatus['roomID'] = homestatus_room['id']
roomstatus['roomname'] = homedata_room['name']
roomstatus['target_temperature'] = \
homestatus_room['therm_setpoint_temperature']
roomstatus['setpoint_mode'] = \
homestatus_room['therm_setpoint_mode']
roomstatus['current_temperature'] = \
homestatus_room['therm_measured_temperature']
roomstatus['module_type'] = \
self.homestatus.thermostatType(self.home, key)
roomstatus['module_id'] = None
roomstatus['heating_status'] = None
roomstatus['heating_power_request'] = None
for module_id in homedata_room['module_ids']:
if self.homedata.modules[self.home][module_id]['type'] == \
NA_THERM or roomstatus['module_id'] is None:
roomstatus['module_id'] = module_id
if roomstatus['module_type'] == NA_THERM:
self.boilerstatus = self.homestatus.boilerStatus(
rid=roomstatus['module_id'])
roomstatus['heating_status'] = self.boilerstatus
elif roomstatus['module_type'] == NA_VALVE:
roomstatus['heating_power_request'] = \
homestatus_room['heating_power_request']
roomstatus['heating_status'] = \
roomstatus['heating_power_request'] > 0
if self.boilerstatus is not None:
roomstatus['heating_status'] = \
self.boilerstatus and roomstatus['heating_status']
self.room_status[key] = roomstatus
self.away_temperature = self.homestatus.getAwaytemp(self.home)
self.hg_temperature = self.homestatus.getHgtemp(self.home)
self.setpoint_duration = self.homedata.setpoint_duration[self.home]
|
apache-2.0
|
shyamalschandra/picochess
|
libs/ecdsa/der.py
|
26
|
7023
|
from __future__ import division
import binascii
import base64
from .six import int2byte, b, PY3, integer_types, text_type
class UnexpectedDER(Exception):
pass
def encode_constructed(tag, value):
return int2byte(0xa0+tag) + encode_length(len(value)) + value
def encode_integer(r):
assert r >= 0 # can't support negative numbers yet
h = ("%x" % r).encode()
if len(h) % 2:
h = b("0") + h
s = binascii.unhexlify(h)
num = s[0] if isinstance(s[0], integer_types) else ord(s[0])
if num <= 0x7f:
return b("\x02") + int2byte(len(s)) + s
else:
# DER integers are two's complement, so if the first byte is
# 0x80-0xff then we need an extra 0x00 byte to prevent it from
# looking negative.
return b("\x02") + int2byte(len(s)+1) + b("\x00") + s
def encode_bitstring(s):
return b("\x03") + encode_length(len(s)) + s
def encode_octet_string(s):
return b("\x04") + encode_length(len(s)) + s
def encode_oid(first, second, *pieces):
assert first <= 2
assert second <= 39
encoded_pieces = [int2byte(40*first+second)] + [encode_number(p)
for p in pieces]
body = b('').join(encoded_pieces)
return b('\x06') + encode_length(len(body)) + body
def encode_sequence(*encoded_pieces):
total_len = sum([len(p) for p in encoded_pieces])
return b('\x30') + encode_length(total_len) + b('').join(encoded_pieces)
def encode_number(n):
b128_digits = []
while n:
b128_digits.insert(0, (n & 0x7f) | 0x80)
n = n >> 7
if not b128_digits:
b128_digits.append(0)
b128_digits[-1] &= 0x7f
return b('').join([int2byte(d) for d in b128_digits])
def remove_constructed(string):
s0 = string[0] if isinstance(string[0], integer_types) else ord(string[0])
if (s0 & 0xe0) != 0xa0:
raise UnexpectedDER("wanted constructed tag (0xa0-0xbf), got 0x%02x"
% s0)
tag = s0 & 0x1f
length, llen = read_length(string[1:])
body = string[1+llen:1+llen+length]
rest = string[1+llen+length:]
return tag, body, rest
def remove_sequence(string):
if not string.startswith(b("\x30")):
n = string[0] if isinstance(string[0], integer_types) else ord(string[0])
raise UnexpectedDER("wanted sequence (0x30), got 0x%02x" % n)
length, lengthlength = read_length(string[1:])
endseq = 1+lengthlength+length
return string[1+lengthlength:endseq], string[endseq:]
def remove_octet_string(string):
if not string.startswith(b("\x04")):
n = string[0] if isinstance(string[0], integer_types) else ord(string[0])
raise UnexpectedDER("wanted octetstring (0x04), got 0x%02x" % n)
length, llen = read_length(string[1:])
body = string[1+llen:1+llen+length]
rest = string[1+llen+length:]
return body, rest
def remove_object(string):
if not string.startswith(b("\x06")):
n = string[0] if isinstance(string[0], integer_types) else ord(string[0])
raise UnexpectedDER("wanted object (0x06), got 0x%02x" % n)
length, lengthlength = read_length(string[1:])
body = string[1+lengthlength:1+lengthlength+length]
rest = string[1+lengthlength+length:]
numbers = []
while body:
n, ll = read_number(body)
numbers.append(n)
body = body[ll:]
n0 = numbers.pop(0)
first = n0//40
second = n0-(40*first)
numbers.insert(0, first)
numbers.insert(1, second)
return tuple(numbers), rest
def remove_integer(string):
if not string.startswith(b("\x02")):
n = string[0] if isinstance(string[0], integer_types) else ord(string[0])
raise UnexpectedDER("wanted integer (0x02), got 0x%02x" % n)
length, llen = read_length(string[1:])
numberbytes = string[1+llen:1+llen+length]
rest = string[1+llen+length:]
nbytes = numberbytes[0] if isinstance(numberbytes[0], integer_types) else ord(numberbytes[0])
assert nbytes < 0x80 # can't support negative numbers yet
return int(binascii.hexlify(numberbytes), 16), rest
def read_number(string):
number = 0
llen = 0
# base-128 big endian, with b7 set in all but the last byte
while True:
if llen > len(string):
raise UnexpectedDER("ran out of length bytes")
number = number << 7
d = string[llen] if isinstance(string[llen], integer_types) else ord(string[llen])
number += (d & 0x7f)
llen += 1
if not d & 0x80:
break
return number, llen
def encode_length(l):
assert l >= 0
if l < 0x80:
return int2byte(l)
s = ("%x" % l).encode()
if len(s)%2:
s = b("0")+s
s = binascii.unhexlify(s)
llen = len(s)
return int2byte(0x80|llen) + s
def read_length(string):
num = string[0] if isinstance(string[0], integer_types) else ord(string[0])
if not (num & 0x80):
# short form
return (num & 0x7f), 1
# else long-form: b0&0x7f is number of additional base256 length bytes,
# big-endian
llen = num & 0x7f
if llen > len(string)-1:
raise UnexpectedDER("ran out of length bytes")
return int(binascii.hexlify(string[1:1+llen]), 16), 1+llen
def remove_bitstring(string):
num = string[0] if isinstance(string[0], integer_types) else ord(string[0])
if not string.startswith(b("\x03")):
raise UnexpectedDER("wanted bitstring (0x03), got 0x%02x" % num)
length, llen = read_length(string[1:])
body = string[1+llen:1+llen+length]
rest = string[1+llen+length:]
return body, rest
# SEQUENCE([1, STRING(secexp), cont[0], OBJECT(curvename), cont[1], BINTSTRING)
# signatures: (from RFC3279)
# ansi-X9-62 OBJECT IDENTIFIER ::= {
# iso(1) member-body(2) us(840) 10045 }
#
# id-ecSigType OBJECT IDENTIFIER ::= {
# ansi-X9-62 signatures(4) }
# ecdsa-with-SHA1 OBJECT IDENTIFIER ::= {
# id-ecSigType 1 }
## so 1,2,840,10045,4,1
## so 0x42, .. ..
# Ecdsa-Sig-Value ::= SEQUENCE {
# r INTEGER,
# s INTEGER }
# id-public-key-type OBJECT IDENTIFIER ::= { ansi-X9.62 2 }
#
# id-ecPublicKey OBJECT IDENTIFIER ::= { id-publicKeyType 1 }
# I think the secp224r1 identifier is (t=06,l=05,v=2b81040021)
# secp224r1 OBJECT IDENTIFIER ::= {
# iso(1) identified-organization(3) certicom(132) curve(0) 33 }
# and the secp384r1 is (t=06,l=05,v=2b81040022)
# secp384r1 OBJECT IDENTIFIER ::= {
# iso(1) identified-organization(3) certicom(132) curve(0) 34 }
def unpem(pem):
if isinstance(pem, text_type):
pem = pem.encode()
d = b("").join([l.strip() for l in pem.split(b("\n"))
if l and not l.startswith(b("-----"))])
return base64.b64decode(d)
def topem(der, name):
b64 = base64.b64encode(der)
lines = [("-----BEGIN %s-----\n" % name).encode()]
lines.extend([b64[start:start+64]+b("\n")
for start in range(0, len(b64), 64)])
lines.append(("-----END %s-----\n" % name).encode())
return b("").join(lines)
|
gpl-3.0
|
naev/naev
|
utils/on-valid/readers/ship.py
|
20
|
2802
|
# -*- coding: utf-8 -*-
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=80:
import os,sys
from glob import glob
from ._Readers import readers
class ship(readers):
def __init__(self, **config):
shipsXml = glob(os.path.join(config['datpath'], 'ships/*.xml'))
readers.__init__(self, shipsXml, config['verbose'])
self._componentName = 'ship'
self._tech = config['tech']
self._fleet = config['fleetobj']
self.used = list()
self.unknown = list()
self.nameList = list()
self.missingTech = list()
self.missingLua = list()
self.missionInTech = list()
print('Compiling ship list ...',end=' ')
try:
for ship in self.xmlData:
ship = ship.getroot()
name = ship.attrib['name']
self.nameList.append(name)
if ship.find('mission') is None:
if not self._tech.findItem(name):
self.missingTech.append(name)
else:
self.used.append(name)
else:
self.missingLua.append(name)
if self._tech.findItem(name):
self.missionInTech.append(name)
except Exception as e:
print('FAILED')
raise e
else:
print("DONE")
# Remove ships that are in fleets.
for ship in list(self.missingLua):
if self._fleet.findPilots(ship=ship):
self.missingLua.remove(ship)
if ship not in self.used:
self.used.append(ship)
self.missingLua.sort()
def find(self, name):
if name in self.nameList:
if name in self.missingLua:
self.missingLua.remove(name)
if name not in self.used:
self.used.append(name)
return True
else:
return False
def showMissingTech(self):
if len(self.missingTech) > 0 or len(self.missingLua) > 0:
print('\nship.xml unused items:')
# Player-buyable ships.
if len(self.missingTech) > 0:
for item in self.missingTech:
print("Warning: item ''{0}`` is not found in tech.xml".format(item))
# Mission-specific ships.
if len(self.missingLua) > 0:
for item in self.missingLua:
print("Warning: mission item ''{0}`` is not found in "\
"fleet.xml nor lua files".format(item))
# Mission-specific ships should never be in tech.xml
if len(self.missionInTech) > 0:
for item in self.missionInTech:
print("Warning: mission item ''{0}`` was found in tech.xml".format(item))
|
gpl-3.0
|
HewlettPackard/oneview-ansible
|
test/test_oneview_rack.py
|
1
|
6732
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
###
# Copyright (2016-2020) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
import pytest
import mock
from copy import deepcopy
from hpe_test_utils import OneViewBaseTest
from oneview_module_loader import RackModule
FAKE_MSG_ERROR = 'Fake message error'
DEFAULT_RACK_TEMPLATE = dict(
name='New Rack 2',
autoLoginRedistribution=True,
fabricType='FabricAttach'
)
UPDATED_RACK_TEMPLATE = dict(
name='New Rack 2',
newName='Rename Rack',
autoLoginRedistribution=True,
fabricType='FabricAttach',
rackMounts=[{'mountUri': '/rest/server-hardware/31393736-3831-4753-567h-30335837524E', 'topUSlot': 20},
{'mountUri': '/rest/server-hardware/31393736-3831-4753-567h-30335837526F', 'topUSlot': 20}]
)
UPDATED_RACK_TEMPLATE_WITH_DIFFERENT_MOUNTURIS = dict(
name='New Rack 2',
newName='Rename Rack',
autoLoginRedistribution=True,
fabricType='FabricAttach',
rackMounts=[{'mountUri': '/rest/server-hardware/31393736-3831-4753-568h-30335837526F', 'topUSlot': 22}]
)
PARAMS_FOR_PRESENT = dict(
config='config.json',
state='present',
data=dict(name=DEFAULT_RACK_TEMPLATE['name'])
)
PARAMS_WITH_CHANGES = dict(
config='config.json',
state='present',
data=UPDATED_RACK_TEMPLATE
)
PARAMS_WITH_MOUNTURI = dict(
config='config.json',
state='present',
data=UPDATED_RACK_TEMPLATE_WITH_DIFFERENT_MOUNTURIS
)
PARAMS_FOR_ABSENT = dict(
config='config.json',
state='absent',
data=dict(name=DEFAULT_RACK_TEMPLATE['name'])
)
RACK_TEMPLATE_WITH_NEWNAME = dict(
name='Rename Rack',
autoLoginRedistribution=True,
fabricType='FabricAttach',
rackMounts=[{'mountUri': '/rest/server-hardware/31393736-3831-4753-568h-30335837526F', 'topUSlot': 22}]
)
@pytest.mark.resource(TestRackModule='racks')
class TestRackModule(OneViewBaseTest):
"""
OneViewBaseTestCase provides the mocks used in this test case.
"""
def test_should_create_new_rack(self):
self.resource.get_by.return_value = []
self.resource.add.return_value = DEFAULT_RACK_TEMPLATE
self.mock_ansible_module.params = PARAMS_FOR_PRESENT
RackModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=RackModule.MSG_ADDED,
ansible_facts=dict(rack=DEFAULT_RACK_TEMPLATE)
)
def test_should_create_new_rack_if_newName_not_exists(self):
self.resource.get_by.return_value = []
self.resource.add.return_value = RACK_TEMPLATE_WITH_NEWNAME
self.mock_ansible_module.params = PARAMS_WITH_CHANGES
RackModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=RackModule.MSG_ADDED,
ansible_facts=dict(rack=RACK_TEMPLATE_WITH_NEWNAME)
)
def test_should_not_update_when_data_is_equals(self):
self.resource.get_by.return_value = [DEFAULT_RACK_TEMPLATE]
self.mock_ansible_module.params = PARAMS_FOR_PRESENT
RackModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
msg=RackModule.MSG_ALREADY_PRESENT,
ansible_facts=dict(rack=DEFAULT_RACK_TEMPLATE)
)
def test_should_update(self):
self.resource.get_by.return_value = [DEFAULT_RACK_TEMPLATE]
self.resource.update.return_value = DEFAULT_RACK_TEMPLATE
self.mock_ansible_module.params = PARAMS_WITH_CHANGES
RackModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=RackModule.MSG_UPDATED,
ansible_facts=dict(rack=DEFAULT_RACK_TEMPLATE)
)
def test_update_when_data_has_modified_attributes_with_different_mountUris(self):
data_merged = DEFAULT_RACK_TEMPLATE.copy()
DEFAULT_RACK_TEMPLATE['rackMounts'] = [{'mountUri': '/rest/server-hardware/31393736-3831-4753-569h-30335837524E', 'topUSlot': 20}]
data_merged['name'] = 'Rename Rack'
self.resource.update.return_value = data_merged
self.resource.data = DEFAULT_RACK_TEMPLATE
self.resource.get_by.return_value = [UPDATED_RACK_TEMPLATE_WITH_DIFFERENT_MOUNTURIS]
self.mock_ansible_module.params = PARAMS_WITH_CHANGES
RackModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=RackModule.MSG_UPDATED,
ansible_facts=dict(rack=data_merged)
)
def test_update_when_data_has_modified_attributes_with_same_mountUris(self):
DEFAULT_RACK_TEMPLATE['rackMounts'] = [{'mountUri': '/rest/server-hardware/31393736-3831-4753-568h-30335837526F', 'topUSlot': 22}]
data_merged = deepcopy(DEFAULT_RACK_TEMPLATE)
data_merged['name'] = 'Rename Rack'
self.resource.update.return_value = data_merged
self.resource.data = DEFAULT_RACK_TEMPLATE
self.mock_ansible_module.params = PARAMS_WITH_MOUNTURI
self.resource.get_by.return_value = [UPDATED_RACK_TEMPLATE_WITH_DIFFERENT_MOUNTURIS]
RackModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
msg=RackModule.MSG_ALREADY_PRESENT,
ansible_facts=dict(rack=data_merged)
)
def test_should_remove_rack(self):
self.resource.get_by.return_value = [DEFAULT_RACK_TEMPLATE]
self.mock_ansible_module.params = PARAMS_FOR_ABSENT
RackModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=RackModule.MSG_DELETED
)
def test_should_do_nothing_when_rack_not_exist(self):
self.resource.get_by.return_value = []
self.mock_ansible_module.params = PARAMS_FOR_ABSENT
RackModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
msg=RackModule.MSG_ALREADY_ABSENT,
ansible_facts=dict(rack=None)
)
if __name__ == '__main__':
pytest.main([__file__])
|
apache-2.0
|
a115027a/Openkore
|
src/scons-local-2.0.1/SCons/Platform/posix.py
|
61
|
8697
|
"""SCons.Platform.posix
Platform-specific initialization for POSIX (Linux, UNIX, etc.) systems.
There normally shouldn't be any need to import this module directly. It
will usually be imported through the generic SCons.Platform.Platform()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Platform/posix.py 5134 2010/08/16 23:02:40 bdeegan"
import errno
import os
import os.path
import subprocess
import sys
import select
import SCons.Util
from SCons.Platform import TempFileMunge
exitvalmap = {
2 : 127,
13 : 126,
}
def escape(arg):
"escape shell special characters"
slash = '\\'
special = '"$()'
arg = arg.replace(slash, slash+slash)
for c in special:
arg = arg.replace(c, slash+c)
return '"' + arg + '"'
def exec_system(l, env):
stat = os.system(' '.join(l))
if stat & 0xff:
return stat | 0x80
return stat >> 8
def exec_spawnvpe(l, env):
stat = os.spawnvpe(os.P_WAIT, l[0], l, env)
# os.spawnvpe() returns the actual exit code, not the encoding
# returned by os.waitpid() or os.system().
return stat
def exec_fork(l, env):
pid = os.fork()
if not pid:
# Child process.
exitval = 127
try:
os.execvpe(l[0], l, env)
except OSError, e:
exitval = exitvalmap.get(e[0], e[0])
sys.stderr.write("scons: %s: %s\n" % (l[0], e[1]))
os._exit(exitval)
else:
# Parent process.
pid, stat = os.waitpid(pid, 0)
if stat & 0xff:
return stat | 0x80
return stat >> 8
def _get_env_command(sh, escape, cmd, args, env):
s = ' '.join(args)
if env:
l = ['env', '-'] + \
[escape(t[0])+'='+escape(t[1]) for t in env.items()] + \
[sh, '-c', escape(s)]
s = ' '.join(l)
return s
def env_spawn(sh, escape, cmd, args, env):
return exec_system([_get_env_command( sh, escape, cmd, args, env)], env)
def spawnvpe_spawn(sh, escape, cmd, args, env):
return exec_spawnvpe([sh, '-c', ' '.join(args)], env)
def fork_spawn(sh, escape, cmd, args, env):
return exec_fork([sh, '-c', ' '.join(args)], env)
def process_cmd_output(cmd_stdout, cmd_stderr, stdout, stderr):
stdout_eof = stderr_eof = 0
while not (stdout_eof and stderr_eof):
try:
(i,o,e) = select.select([cmd_stdout, cmd_stderr], [], [])
if cmd_stdout in i:
str = cmd_stdout.read()
if len(str) == 0:
stdout_eof = 1
elif stdout is not None:
stdout.write(str)
if cmd_stderr in i:
str = cmd_stderr.read()
if len(str) == 0:
#sys.__stderr__.write( "stderr_eof=1\n" )
stderr_eof = 1
else:
#sys.__stderr__.write( "str(stderr) = %s\n" % str )
stderr.write(str)
except select.error, (_errno, _strerror):
if _errno != errno.EINTR:
raise
def exec_popen3(l, env, stdout, stderr):
proc = subprocess.Popen(' '.join(l),
stdout=stdout,
stderr=stderr,
shell=True)
stat = proc.wait()
if stat & 0xff:
return stat | 0x80
return stat >> 8
def exec_piped_fork(l, env, stdout, stderr):
# spawn using fork / exec and providing a pipe for the command's
# stdout / stderr stream
if stdout != stderr:
(rFdOut, wFdOut) = os.pipe()
(rFdErr, wFdErr) = os.pipe()
else:
(rFdOut, wFdOut) = os.pipe()
rFdErr = rFdOut
wFdErr = wFdOut
# do the fork
pid = os.fork()
if not pid:
# Child process
os.close( rFdOut )
if rFdOut != rFdErr:
os.close( rFdErr )
os.dup2( wFdOut, 1 ) # is there some symbolic way to do that ?
os.dup2( wFdErr, 2 )
os.close( wFdOut )
if stdout != stderr:
os.close( wFdErr )
exitval = 127
try:
os.execvpe(l[0], l, env)
except OSError, e:
exitval = exitvalmap.get(e[0], e[0])
stderr.write("scons: %s: %s\n" % (l[0], e[1]))
os._exit(exitval)
else:
# Parent process
pid, stat = os.waitpid(pid, 0)
os.close( wFdOut )
if stdout != stderr:
os.close( wFdErr )
childOut = os.fdopen( rFdOut )
if stdout != stderr:
childErr = os.fdopen( rFdErr )
else:
childErr = childOut
process_cmd_output(childOut, childErr, stdout, stderr)
os.close( rFdOut )
if stdout != stderr:
os.close( rFdErr )
if stat & 0xff:
return stat | 0x80
return stat >> 8
def piped_env_spawn(sh, escape, cmd, args, env, stdout, stderr):
# spawn using Popen3 combined with the env command
# the command name and the command's stdout is written to stdout
# the command's stderr is written to stderr
return exec_popen3([_get_env_command(sh, escape, cmd, args, env)],
env, stdout, stderr)
def piped_fork_spawn(sh, escape, cmd, args, env, stdout, stderr):
# spawn using fork / exec and providing a pipe for the command's
# stdout / stderr stream
return exec_piped_fork([sh, '-c', ' '.join(args)],
env, stdout, stderr)
def generate(env):
# If os.spawnvpe() exists, we use it to spawn commands. Otherwise
# if the env utility exists, we use os.system() to spawn commands,
# finally we fall back on os.fork()/os.exec().
#
# os.spawnvpe() is prefered because it is the most efficient. But
# for Python versions without it, os.system() is prefered because it
# is claimed that it works better with threads (i.e. -j) and is more
# efficient than forking Python.
#
# NB: Other people on the scons-users mailing list have claimed that
# os.fork()/os.exec() works better than os.system(). There may just
# not be a default that works best for all users.
if 'spawnvpe' in os.__dict__:
spawn = spawnvpe_spawn
elif env.Detect('env'):
spawn = env_spawn
else:
spawn = fork_spawn
if env.Detect('env'):
pspawn = piped_env_spawn
else:
pspawn = piped_fork_spawn
if 'ENV' not in env:
env['ENV'] = {}
env['ENV']['PATH'] = '/usr/local/bin:/opt/bin:/bin:/usr/bin'
env['OBJPREFIX'] = ''
env['OBJSUFFIX'] = '.o'
env['SHOBJPREFIX'] = '$OBJPREFIX'
env['SHOBJSUFFIX'] = '$OBJSUFFIX'
env['PROGPREFIX'] = ''
env['PROGSUFFIX'] = ''
env['LIBPREFIX'] = 'lib'
env['LIBSUFFIX'] = '.a'
env['SHLIBPREFIX'] = '$LIBPREFIX'
env['SHLIBSUFFIX'] = '.so'
env['LIBPREFIXES'] = [ '$LIBPREFIX' ]
env['LIBSUFFIXES'] = [ '$LIBSUFFIX', '$SHLIBSUFFIX' ]
env['PSPAWN'] = pspawn
env['SPAWN'] = spawn
env['SHELL'] = 'sh'
env['ESCAPE'] = escape
env['TEMPFILE'] = TempFileMunge
env['TEMPFILEPREFIX'] = '@'
#Based on LINUX: ARG_MAX=ARG_MAX=131072 - 3000 for environment expansion
#Note: specific platforms might rise or lower this value
env['MAXLINELENGTH'] = 128072
# This platform supports RPATH specifications.
env['__RPATH'] = '$_RPATH'
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
gpl-2.0
|
pk-sam/crosswalk-test-suite
|
tools/allpairs-plus/metacomm/combinatorics/all_pairs2.py
|
33
|
5816
|
import pairs_storage
from combinatorics import xuniqueCombinations
class item:
def __init__(self, id, value):
self.id = id
self.value = value
self.weights = []
def __str__(self):
return str(self.__dict__)
def get_max_comb_number( arr, n ):
items = [len(x) for x in arr]
#print items
f = lambda x,y:x*y
total = sum([ reduce(f, z) for z in xuniqueCombinations( items, n) ])
return total
class all_pairs2:
def __iter__( self ):
return self
def __init__( self, options, filter_func = lambda x: True, previously_tested = [[]], n = 2 ):
"""
TODO: check that input arrays are:
- (optional) has no duplicated values inside single array / or compress such values
"""
if len( options ) < 2:
raise Exception("must provide more than one option")
for arr in options:
if not len(arr):
raise Exception("option arrays must have at least one item")
self.__filter_func = filter_func
self.__n = n
self.__pairs = pairs_storage.pairs_storage(n)
self.__max_unique_pairs_expected = get_max_comb_number( options, n )
self.__working_arr = []
for i in range( len( options )):
self.__working_arr.append( [ item("a%iv%i" % (i,j), value) \
for j, value in enumerate(options[i] ) ] )
for arr in previously_tested:
if len(arr) == 0:
continue
elif len(arr) != len(self.__working_arr):
raise Exception("previously tested combination is not complete")
if not self.__filter_func(arr):
raise Exception("invalid tested combination is provided")
tested = []
for i, val in enumerate(arr):
idxs = [item(node.id, 0) for node in self.__working_arr[i] if node.value == val]
if len(idxs) != 1:
raise Exception("value from previously tested combination is not found in the options or found more than once")
tested.append(idxs[0])
self.__pairs.add_sequence(tested)
def next( self ):
assert( len(self.__pairs) <= self.__max_unique_pairs_expected )
p = self.__pairs
if len(self.__pairs) == self.__max_unique_pairs_expected:
# no reasons to search further - all pairs are found
raise StopIteration
previous_unique_pairs_count= len(self.__pairs)
chosen_values_arr = [None] * len(self.__working_arr)
indexes = [None] * len(self.__working_arr)
direction = 1
i = 0
while -1 < i < len(self.__working_arr):
if direction == 1: # move forward
self.resort_working_array( chosen_values_arr[:i], i )
indexes[i] = 0
elif direction == 0 or direction == -1: # scan current array or go back
indexes[i] += 1
if indexes[i] >= len( self.__working_arr[i] ):
direction = -1
if i == 0:
raise StopIteration
i += direction
continue
direction = 0
else:
raise Exception("next(): unknown 'direction' code.")
chosen_values_arr[i] = self.__working_arr[i][ indexes[i] ]
if self.__filter_func( self.get_values_array( chosen_values_arr[:i+1] ) ):
assert(direction > -1)
direction = 1
else:
direction = 0
i += direction
if len( self.__working_arr ) != len(chosen_values_arr):
raise StopIteration
self.__pairs.add_sequence( chosen_values_arr )
if len(self.__pairs) == previous_unique_pairs_count:
# could not find new unique pairs - stop
raise StopIteration
# replace returned array elements with real values and return it
return self.get_values_array( chosen_values_arr )
def get_values_array( self, arr ):
return [ item.value for item in arr ]
def resort_working_array( self, chosen_values_arr, num ):
for item in self.__working_arr[num]:
data_node = self.__pairs.get_node_info( item )
new_combs = []
for i in range(0, self.__n):
# numbers of new combinations to be created if this item is appended to array
new_combs.append( set([pairs_storage.key(z) for z in xuniqueCombinations( chosen_values_arr+[item], i+1)]) - self.__pairs.get_combs()[i] )
# weighting the node
item.weights = [ -len(new_combs[-1]) ] # node that creates most of new pairs is the best
item.weights += [ len(data_node.out) ] # less used outbound connections most likely to produce more new pairs while search continues
item.weights += [ len(x) for x in reversed(new_combs[:-1])]
item.weights += [ -data_node.counter ] # less used node is better
item.weights += [ -len(data_node.in_) ] # otherwise we will prefer node with most of free inbound connections; somehow it works out better ;)
self.__working_arr[num].sort( lambda a,b: cmp(a.weights, b.weights) )
# statistics, internal stuff
def get_pairs_found( self ):
return self.__pairs
__export__ = [ all_pairs2, get_max_comb_number ]
|
bsd-3-clause
|
cortedeltimo/SickRage
|
lib/twilio/rest/resources/sms_messages.py
|
51
|
6376
|
from .util import normalize_dates, parse_date
from . import InstanceResource, ListResource
class ShortCode(InstanceResource):
def update(self, **kwargs):
return self.parent.update(self.name, **kwargs)
class ShortCodes(ListResource):
name = "ShortCodes"
key = "short_codes"
instance = ShortCode
def list(self, **kwargs):
"""
Returns a page of :class:`ShortCode` resources as a list. For
paging information see :class:`ListResource`.
:param short_code: Only show the ShortCode resources that match this
pattern. You can specify partial numbers and use '*'
as a wildcard for any digit.
:param friendly_name: Only show the ShortCode resources with friendly
names that exactly match this name.
"""
return self.get_instances(kwargs)
def update(self, sid, url=None, method=None, fallback_url=None,
fallback_method=None, **kwargs):
"""
Update a specific :class:`ShortCode`, by specifying the sid.
:param friendly_name: Description of the short code, with maximum
length 64 characters.
:param api_version: SMSs to this short code will start a new TwiML
session with this API version.
:param url: The URL that Twilio should request when somebody sends an
SMS to the short code.
:param method: The HTTP method that should be used to request the url.
:param fallback_url: A URL that Twilio will request if an error occurs
requesting or executing the TwiML at the url.
:param fallback_method: The HTTP method that should be used to request
the fallback_url.
"""
kwargs["sms_url"] = kwargs.get("sms_url", url)
kwargs["sms_method"] = kwargs.get("sms_method", method)
kwargs["sms_fallback_url"] = \
kwargs.get("sms_fallback_url", fallback_url)
kwargs["sms_fallback_method"] = \
kwargs.get("sms_fallback_method", fallback_method)
return self.update_instance(sid, kwargs)
class Sms(object):
"""
Holds all the specific SMS list resources
"""
name = "SMS"
key = "sms"
def __init__(self, base_uri, auth, timeout):
self.uri = "%s/SMS" % base_uri
self.messages = SmsMessages(self.uri, auth, timeout)
self.short_codes = ShortCodes(self.uri, auth, timeout)
class SmsMessage(InstanceResource):
""" An instance of an SMS Message
.. attribute:: sid
A 34 character string that uniquely identifies this resource.
.. attribute:: date_created
The date that this resource was created, given in RFC 2822 format.
.. attribute:: date_updated
The date that this resource was last updated, given in RFC 2822 format.
.. attribute:: date_sent
The date that the SMS was sent, given in RFC 2822 format.
.. attribute:: account_sid
The unique id of the Account that sent this SMS message.
.. attribute:: from
The phone number that initiated the message in E.164 format.
For incoming messages, this will be the remote phone.
For outgoing messages, this will be one of your Twilio phone numbers.
.. attribute:: to
The phone number that received the message in E.164 format.
For incoming messages, this will be one of your Twilio phone numbers.
For outgoing messages, this will be the remote phone.
.. attribute:: body
The text body of the SMS message.
.. attribute:: status
The status of this SMS message. Either queued, sending, sent, or failed.
.. attribute:: direction
The direction of this SMS message. ``incoming`` for incoming
messages, ``outbound-api`` for messages initiated via the REST
API, ``outbound-call`` for messages initiated during a call or
``outbound-reply`` for messages initiated in response to an incoming
SMS.
.. attribute:: price
The amount billed for the message.
.. attribute:: api_version
The version of the Twilio API used to process the SMS message.
.. attribute:: uri
The URI for this resource, relative to https://api.twilio.com
"""
pass
class SmsMessages(ListResource):
name = "Messages"
key = "sms_messages"
instance = SmsMessage
def create(self, from_=None, **kwargs):
"""
Create and send a SMS Message.
:param str to: The destination phone number.
:param str `from_`: The phone number sending this message
(must be a verified Twilio number)
:param str body: The message you want to send,
limited to 160 characters.
:param status_callback: A URL that Twilio will POST to when
your message is processed.
:param str application_sid: The 34 character sid of the application
Twilio should use to handle this phone call.
Usage:
.. code-block::python
message = client.sms.messages.create(to="+12316851234",
from_="+15555555555",
body="Hello there!")
"""
kwargs["from"] = from_
return self.create_instance(kwargs)
@normalize_dates
def list(self, from_=None, before=None, after=None, date_sent=None, **kw):
"""
Returns a page of :class:`~twilio.rest.resources.SmsMessage` resources
as a list. For paging information see :class:`ListResource`.
:param to: Only show SMS messages to this phone number.
:param from_: Only show SMS messages from this phone number.
:param date after: Only list SMS messages sent after this date.
:param date before: Only list SMS message sent before this date.
:param date date_sent: Only list SMS message sent on this date.
:param `from_`: Only show SMS messages from this phone number.
:param date after: Only list SMS messages logged after this datetime
:param date before: Only list SMS messages logged before this datetime
"""
kw["From"] = from_
kw["DateSent<"] = before
kw["DateSent>"] = after
kw["DateSent"] = parse_date(date_sent)
return self.get_instances(kw)
|
gpl-3.0
|
Isabek/python-koans
|
python2/koans/about_monkey_patching.py
|
1
|
1428
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Related to AboutOpenClasses in the Ruby Koans
#
from runner.koan import *
class AboutMonkeyPatching(Koan):
class Dog(object):
def bark(self):
return "WOOF"
def test_as_defined_dogs_do_bark(self):
fido = self.Dog()
self.assertEqual('WOOF', fido.bark())
# ------------------------------------------------------------------
# Add a new method to an existing class.
def test_after_patching_dogs_can_both_wag_and_bark(self):
def wag(self):
return "HAPPY"
self.Dog.wag = wag
fido = self.Dog()
self.assertEqual("HAPPY", fido.wag())
self.assertEqual("WOOF", fido.bark())
# ------------------------------------------------------------------
def test_most_built_in_classes_cannot_be_monkey_patched(self):
try:
int.is_even = lambda self: (self % 2) == 0
except StandardError as ex:
self.assertMatch("can't set attributes of built-in/extension type 'int'", ex[0])
# ------------------------------------------------------------------
class MyInt(int):
pass
def test_subclasses_of_built_in_classes_can_be_be_monkey_patched(self):
self.MyInt.is_even = lambda self: (self % 2) == 0
self.assertEqual(False, self.MyInt(1).is_even())
self.assertEqual(True, self.MyInt(2).is_even())
|
mit
|
aljscott/phantomjs
|
src/qt/qtwebkit/Tools/QueueStatusServer/loggers/recordbotevent.py
|
122
|
1925
|
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from config.logging import queue_log_duration
from model.queuelog import QueueLog
class RecordBotEvent(object):
@classmethod
def record_activity(cls, queue_name, bot_id):
queue_log = QueueLog.get_current(queue_name, queue_log_duration)
if queue_log and bot_id not in queue_log.bot_ids_seen:
queue_log.bot_ids_seen.append(bot_id)
queue_log.put()
|
bsd-3-clause
|
leotrubach/sourceforge-allura
|
Allura/allura/command/smtp_server.py
|
3
|
1176
|
import smtpd
import asyncore
import tg
from paste.script import command
import allura.tasks
from allura.command import base
from paste.deploy.converters import asint
class SMTPServerCommand(base.Command):
min_args=1
max_args=1
usage = '<ini file>'
summary = 'Handle incoming emails, routing them to RabbitMQ'
parser = command.Command.standard_parser(verbose=True)
parser.add_option('-c', '--context', dest='context',
help=('The context of the message (path to the project'
' and/or tool'))
def command(self):
self.basic_setup()
MailServer((tg.config.get('forgemail.host', '0.0.0.0'),
asint(tg.config.get('forgemail.port', 8825))),
None)
asyncore.loop()
class MailServer(smtpd.SMTPServer):
def process_message(self, peer, mailfrom, rcpttos, data):
base.log.info('Msg Received from %s for %s', mailfrom, rcpttos)
base.log.info(' (%d bytes)', len(data))
allura.tasks.mail_tasks.route_email(
peer=peer, mailfrom=mailfrom, rcpttos=rcpttos, data=data)
base.log.info('Msg passed along')
|
apache-2.0
|
developerator/Maturaarbeit
|
GenerativeAdversarialNets/CelebA32 with DCGAN/CelebA32_dcgan.py
|
1
|
6898
|
'''
By Tim Ehrensberger
The base of the functions for the network's training is taken from https://github.com/Zackory/Keras-MNIST-GAN/blob/master/mnist_gan.py by Zackory Erickson
The network structure is inspired by https://github.com/aleju/face-generator by Alexander Jung
'''
import os
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
from keras.layers import Input, BatchNormalization, Activation, MaxPooling2D
from keras.models import Model, Sequential
from keras.layers.core import Reshape, Dense, Dropout, Flatten
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import Convolution2D, UpSampling2D
from keras.datasets import cifar10
from keras.optimizers import Adam
from keras.regularizers import l1_l2
#------
# DATA
#------
from keras import backend as K
K.set_image_dim_ordering('th')
import h5py
# Get hdf5 file
hdf5_file = os.path.join("PATH TO DATASET", "CelebA_32_data.h5")
with h5py.File(hdf5_file, "r") as hf:
X_train = hf["data"] [()] #[()] makes it read the whole thing
X_train = X_train.astype(np.float32) / 255
#----------------
# HYPERPARAMETERS
#----------------
randomDim = 100
adam = Adam(lr=0.0002, beta_1=0.5)
reg = lambda: l1_l2(l1=1e-7, l2=1e-7)
#dropout = 0.4
#-----------
# Generator
#-----------
h = 5
generator = Sequential()
#In: 100
generator.add(Dense(256 * 4 * 4, input_dim=100, kernel_regularizer=reg()))
generator.add(BatchNormalization())
generator.add(Reshape((256, 4, 4)))
#generator.add(Dropout(dropout))
#Out: 256 x 4 x 4
#In: 256 x 4 x 4
generator.add(UpSampling2D(size=(2, 2)))
generator.add(Convolution2D(128, (h, h), padding='same', kernel_regularizer=reg())) #1
generator.add(BatchNormalization(axis=1))
generator.add(LeakyReLU(0.2))
#generator.add(Dropout(dropout))
#Out: 128 x 8 x 8
#In: 128 x 8 x 8
generator.add(UpSampling2D(size=(2, 2)))
generator.add(Convolution2D(128, (h, h), padding='same', kernel_regularizer=reg())) #2
generator.add(BatchNormalization(axis=1))
generator.add(LeakyReLU(0.2))
#generator.add(Dropout(dropout))
#Out: 128 x 16 x 16
#In: 128 x 16 x 16
generator.add(UpSampling2D(size=(2, 2)))
generator.add(Convolution2D(64, (h, h), padding='same', kernel_regularizer=reg())) #3
generator.add(BatchNormalization(axis=1))
generator.add(LeakyReLU(0.2))
#generator.add(Dropout(dropout))
#Out: 64 x 32 x 32
#In: 64 x 32 x 32
generator.add(Convolution2D(3, (h, h), padding='same', kernel_regularizer=reg())) #4
generator.add(Activation('sigmoid'))
#Out: 3 x 32 x 32
generator.compile(loss='binary_crossentropy', optimizer=adam)
#--------------
# Discriminator
#--------------
discriminator = Sequential()
#In: 3 x 32 x 32
discriminator.add(Convolution2D(64, (h, h), padding='same', input_shape=(3, 32, 32), kernel_regularizer=reg()))
discriminator.add(MaxPooling2D(pool_size=(2, 2)))
discriminator.add(LeakyReLU(0.2))
#Out: 64 x 16 x 16
#In: 64 x 16 x 16
discriminator.add(Convolution2D(128, (h, h), padding='same', kernel_regularizer=reg()))
discriminator.add(MaxPooling2D(pool_size=(2, 2)))
discriminator.add(LeakyReLU(0.2))
#Out: 128 x 8 x 8
#In: 128 x 8 x 8
discriminator.add(Convolution2D(256, (h, h), padding='same', kernel_regularizer=reg()))
discriminator.add(MaxPooling2D(pool_size=(2, 2)))#Average?
discriminator.add(LeakyReLU(0.2))
#Out: 256 x 4 x 4
#In: 256 x 4 x 4
discriminator.add(Flatten())
discriminator.add(Dense(512))
discriminator.add(LeakyReLU(0.2))
#discriminator.add(Dropout(dropout))
discriminator.add(Dense(1))
discriminator.add(Activation('sigmoid'))
#Out: 1 (Probability)
discriminator.compile(loss='binary_crossentropy', optimizer=adam)
#-----
# GAN
#-----
discriminator.trainable = False
ganInput = Input(shape=(randomDim,))
x = generator(ganInput)
ganOutput = discriminator(x)
gan = Model(inputs=ganInput, outputs=ganOutput)
gan.compile(loss='binary_crossentropy', optimizer=adam)
#-----------
# FUNCTIONS
#-----------
dLosses = []
gLosses = []
def plotLoss(epoch):
assertExists('images')
plt.figure(figsize=(10, 8))
plt.plot(dLosses, label='Discriminative loss')
plt.plot(gLosses, label='Generative loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()
plt.savefig('images/dcgan_loss_epoch_%d.png' % epoch)
# Create a wall of generated images
noise = np.random.normal(0, 1, size=[examples, randomDim])
def plotGeneratedImages(epoch, examples=100, dim=(10, 10), figsize=(10, 10)):
generatedImages = generator.predict(noise)
generatedImages = generatedImages.transpose(0, 2, 3, 1) #transpose is crucial
assertExists('images')
plt.figure(figsize=figsize)
for i in range(generatedImages.shape[0]):
plt.subplot(dim[0], dim[1], i+1)
plt.imshow(generatedImages[i, :, :, :], interpolation='nearest')
plt.axis('off')
plt.tight_layout()
plt.savefig('images/dcgan_generated_image_epoch_%d.png' % epoch)
# Save the generator and discriminator networks (and weights) for later use
def savemodels(epoch):
assertExists('models')
generator.save('models/dcgan_generator_epoch_%d.h5' % epoch)
discriminator.save('models/dcgan_discriminator_epoch_%d.h5' % epoch)
def train(epochs=1, batchSize=128):
batchCount = X_train.shape[0] // batchSize
print('Epochs:', epochs)
print('Batch size:', batchSize)
print('Batches per epoch:', batchCount)
for e in range(1, epochs+1):
print('-'*15, 'Epoch %d' % e, '-'*15)
for _ in tqdm(range(batchCount)):
# Get a random set of input noise and images
noise = np.random.normal(0, 1, size=[batchSize, randomDim])
imageBatch = X_train[np.random.randint(0, X_train.shape[0], size=batchSize)]
# Generate fake images
generatedImages = generator.predict(noise)
X = np.concatenate([imageBatch, generatedImages])
# Labels for generated and real data
yDis = np.zeros(2*batchSize)
# One-sided label smoothing = not exactly 1
yDis[:batchSize] = 0.9
# Train discriminator
discriminator.trainable = True
dloss = discriminator.train_on_batch(X, yDis) # here only D is trained
# Train generator
noise = np.random.normal(0, 1, size=[batchSize, randomDim])
yGen = np.ones(batchSize)
discriminator.trainable = False
gloss = gan.train_on_batch(noise, yGen) # here only G is trained because D is not trainable
# Store loss of most recent batch from this epoch
dLosses.append(dloss)
gLosses.append(gloss)
#plot after every epoch
plotGeneratedImages(e)
savemodels(e)
# Plot losses from every epoch
plotLoss(e)
def assertExists(path):
if not os.path.exists(path):
os.makedirs(path)
if __name__ == '__main__':
train(100, 128)
|
mit
|
lewismc/topik
|
topik/preprocessing.py
|
1
|
2170
|
"""
This file is concerned with preparing input to modeling.
Put things like lowercasing, tokenizing, and vectorizing here.
Generally, methods to process text operate on single documents at a time. This
is done to facilitate parallelization over collections.
"""
# this is our output from whatever preprocessing we do. It is input to modeling step.
from topik.intermediaries.digested_document_collection import DigestedDocumentCollection
from topik.tokenizers import tokenizer_methods
def _get_parameter_string(**kwargs):
"""Used to create identifiers for output"""
id = ''.join('{}={}_'.format(key, val) for key, val in sorted(kwargs.items()))
return id[:-1]
def _to_lower(raw_record):
return raw_record.lower()
def _tokenize(raw_record, method="simple", **tokenizer_kwargs):
"""Tokenize a single document into individual words"""
tokenized_document = tokenizer_methods[method](raw_record, **tokenizer_kwargs)
return tokenized_document
def preprocess(raw_data, tokenizer_method="simple", **kwargs):
"""Convert data to lowercase; tokenize; create bag of words collection.
Output from this function is used as input to modeling steps.
raw_data: iterable corpus object containing the text to be processed.
Each iteration call should return a new document's content.
tokenizer_method: string id of tokenizer to use. For keys, see
topik.tokenizers.tokenizer_methods (which is a dictionary of classes)
kwargs: arbitrary dicionary of extra parameters. These are passed both
to the tokenizer and to the vectorizer steps.
"""
parameters_string = _get_parameter_string(method=tokenizer_method, **kwargs)
token_path = "tokens_"+parameters_string
for record_id, raw_record in raw_data:
tokenized_record = _tokenize(_to_lower(raw_record),
method=tokenizer_method,
**kwargs)
# TODO: would be nice to aggregate batches and append in bulk
raw_data.append_to_record(record_id, token_path, tokenized_record)
return DigestedDocumentCollection(raw_data.get_field(field=token_path))
|
bsd-3-clause
|
Serag8/Bachelor
|
google_appengine/lib/django-1.4/django/core/mail/backends/filebased.py
|
394
|
2485
|
"""Email backend that writes messages to a file."""
import datetime
import os
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.mail.backends.console import EmailBackend as ConsoleEmailBackend
class EmailBackend(ConsoleEmailBackend):
def __init__(self, *args, **kwargs):
self._fname = None
if 'file_path' in kwargs:
self.file_path = kwargs.pop('file_path')
else:
self.file_path = getattr(settings, 'EMAIL_FILE_PATH',None)
# Make sure self.file_path is a string.
if not isinstance(self.file_path, basestring):
raise ImproperlyConfigured('Path for saving emails is invalid: %r' % self.file_path)
self.file_path = os.path.abspath(self.file_path)
# Make sure that self.file_path is an directory if it exists.
if os.path.exists(self.file_path) and not os.path.isdir(self.file_path):
raise ImproperlyConfigured('Path for saving email messages exists, but is not a directory: %s' % self.file_path)
# Try to create it, if it not exists.
elif not os.path.exists(self.file_path):
try:
os.makedirs(self.file_path)
except OSError, err:
raise ImproperlyConfigured('Could not create directory for saving email messages: %s (%s)' % (self.file_path, err))
# Make sure that self.file_path is writable.
if not os.access(self.file_path, os.W_OK):
raise ImproperlyConfigured('Could not write to directory: %s' % self.file_path)
# Finally, call super().
# Since we're using the console-based backend as a base,
# force the stream to be None, so we don't default to stdout
kwargs['stream'] = None
super(EmailBackend, self).__init__(*args, **kwargs)
def _get_filename(self):
"""Return a unique file name."""
if self._fname is None:
timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
fname = "%s-%s.log" % (timestamp, abs(id(self)))
self._fname = os.path.join(self.file_path, fname)
return self._fname
def open(self):
if self.stream is None:
self.stream = open(self._get_filename(), 'a')
return True
return False
def close(self):
try:
if self.stream is not None:
self.stream.close()
finally:
self.stream = None
|
mit
|
aferr/TimingCompartments
|
src/arch/x86/isa/insts/general_purpose/control_transfer/interrupts_and_exceptions.py
|
25
|
7464
|
# Copyright (c) 2007-2008 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop IRET_REAL {
.serializing
panic "Real mode iret isn't implemented!"
};
def macroop IRET_PROT {
.serializing
.adjust_env oszIn64Override
# Check for a nested task. This isn't supported at the moment.
rflag t1, 14; #NT bit
panic "Task switching with iret is unimplemented!", flags=(nCEZF,)
#t1 = temp_RIP
#t2 = temp_CS
#t3 = temp_RFLAGS
#t4 = handy m5 register
# Pop temp_RIP, temp_CS, and temp_RFLAGS
ld t1, ss, [1, t0, rsp], "0 * env.stackSize", dataSize=ssz
ld t2, ss, [1, t0, rsp], "1 * env.stackSize", dataSize=ssz
ld t3, ss, [1, t0, rsp], "2 * env.stackSize", dataSize=ssz
# Read the handy m5 register for use later
rdm5reg t4
###
### Handle if we're returning to virtual 8086 mode.
###
#IF ((temp_RFLAGS.VM=1) && (CPL=0) && (LEGACY_MODE))
# IRET_FROM_PROTECTED_TO_VIRTUAL
#temp_RFLAGS.VM != 1
rcri t0, t3, 18, flags=(ECF,)
br label("protToVirtFallThrough"), flags=(nCECF,)
#CPL=0
andi t0, t4, 0x30, flags=(EZF,)
br label("protToVirtFallThrough"), flags=(nCEZF,)
#(LEGACY_MODE)
rcri t0, t4, 1, flags=(ECF,)
br label("protToVirtFallThrough"), flags=(nCECF,)
panic "iret to virtual mode not supported"
protToVirtFallThrough:
#temp_CPL = temp_CS.rpl
andi t5, t2, 0x3
###
### Read in the info for the new CS segment.
###
#CS = READ_DESCRIPTOR (temp_CS, iret_chk)
andi t0, t2, 0xFC, flags=(EZF,), dataSize=2
br label("processCSDescriptor"), flags=(CEZF,)
andi t6, t2, 0xF8, dataSize=8
andi t0, t2, 0x4, flags=(EZF,), dataSize=2
br label("globalCSDescriptor"), flags=(CEZF,)
ld t8, tsl, [1, t0, t6], dataSize=8, atCPL0=True
br label("processCSDescriptor")
globalCSDescriptor:
ld t8, tsg, [1, t0, t6], dataSize=8, atCPL0=True
processCSDescriptor:
chks t2, t6, dataSize=8
###
### Get the new stack pointer and stack segment off the old stack if necessary,
### and piggyback on the logic to check the new RIP value.
###
#IF ((64BIT_MODE) || (temp_CPL!=CPL))
#{
#(64BIT_MODE)
andi t0, t4, 0xE, flags=(EZF,)
# Since we just found out we're in 64 bit mode, take advantage and
# do the appropriate RIP checks.
br label("doPopStackStuffAndCheckRIP"), flags=(CEZF,)
# Here, we know we're -not- in 64 bit mode, so we should do the
# appropriate/other RIP checks.
# if temp_RIP > CS.limit throw #GP(0)
rdlimit t6, cs, dataSize=8
sub t0, t1, t6, flags=(ECF,)
fault "new GeneralProtection(0)", flags=(CECF,)
#(temp_CPL!=CPL)
srli t7, t4, 4
xor t7, t7, t5
andi t0, t7, 0x3, flags=(EZF,)
br label("doPopStackStuff"), flags=(nCEZF,)
# We can modify user visible state here because we're know
# we're done with things that can fault.
addi rsp, rsp, "3 * env.stackSize"
br label("fallThroughPopStackStuff")
doPopStackStuffAndCheckRIP:
# Check if the RIP is canonical.
srai t7, t1, 47, flags=(EZF,), dataSize=ssz
# if t7 isn't 0 or -1, it wasn't canonical.
br label("doPopStackStuff"), flags=(CEZF,)
addi t0, t7, 1, flags=(EZF,), dataSize=ssz
fault "new GeneralProtection(0)", flags=(nCEZF,)
doPopStackStuff:
# POP.v temp_RSP
ld t6, ss, [1, t0, rsp], "3 * env.dataSize", dataSize=ssz
# POP.v temp_SS
ld t9, ss, [1, t0, rsp], "4 * env.dataSize", dataSize=ssz
# SS = READ_DESCRIPTOR (temp_SS, ss_chk)
andi t0, t9, 0xFC, flags=(EZF,), dataSize=2
br label("processSSDescriptor"), flags=(CEZF,)
andi t7, t9, 0xF8, dataSize=8
andi t0, t9, 0x4, flags=(EZF,), dataSize=2
br label("globalSSDescriptor"), flags=(CEZF,)
ld t7, tsl, [1, t0, t7], dataSize=8, atCPL0=True
br label("processSSDescriptor")
globalSSDescriptor:
ld t7, tsg, [1, t0, t7], dataSize=8, atCPL0=True
processSSDescriptor:
chks t9, t7, dataSize=8
# This actually updates state which is wrong. It should wait until we know
# we're not going to fault. Unfortunately, that's hard to do.
wrdl ss, t7, t9
wrsel ss, t9
###
### From this point downwards, we can't fault. We can update user visible state.
###
# RSP.s = temp_RSP
mov rsp, rsp, t6, dataSize=ssz
#}
fallThroughPopStackStuff:
# Update CS
wrdl cs, t8, t2
wrsel cs, t2
#CPL = temp_CPL
#IF (changing CPL)
#{
srli t7, t4, 4
xor t7, t7, t5
andi t0, t7, 0x3, flags=(EZF,)
br label("skipSegmentSquashing"), flags=(CEZF,)
# The attribute register needs to keep track of more info before this will
# work the way it needs to.
# FOR (seg = ES, DS, FS, GS)
# IF ((seg.attr.dpl < cpl && ((seg.attr.type = 'data')
# || (seg.attr.type = 'non-conforming-code')))
# {
# seg = NULL
# }
#}
skipSegmentSquashing:
# Ignore this for now.
#RFLAGS.v = temp_RFLAGS
wrflags t0, t3
# VIF,VIP,IOPL only changed if (old_CPL = 0)
# IF only changed if (old_CPL <= old_RFLAGS.IOPL)
# VM unchanged
# RF cleared
#RIP = temp_RIP
wrip t0, t1, dataSize=ssz
};
def macroop IRET_VIRT {
panic "Virtual mode iret isn't implemented!"
};
'''
#let {{
# class INT(Inst):
# "GenFault ${new UnimpInstFault}"
# class INTO(Inst):
# "GenFault ${new UnimpInstFault}"
#}};
|
bsd-3-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.