id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
3353441
|
import casadi as cas
from typing import Union, List
import numpy as np
import pytest
class Opti(cas.Opti):
def __init__(self,
variable_categories_to_freeze=[],
):
super().__init__()
p_opts = {}
s_opts = {}
s_opts["max_iter"] = 3000
s_opts["mu_strategy"] = "adaptive"
self.solver('ipopt', p_opts, s_opts) # Default to IPOPT solver
self.categories_to_freeze = variable_categories_to_freeze
def variable(self,
n_vars: int = 1,
init_guess: Union[float, np.ndarray] = None,
scale: float = None,
log_transform: bool = False,
category: str = "Default",
freeze: bool = False,
) -> cas.MX:
"""
Initializes a new decision variable.
It is recommended that you provide an initial guess (`init_guess`) and scale (`scale`) for each variable,
although these are not strictly required.
Args:
n_vars: Number of variables to initialize (used to initialize a vector of variables). If you are
initializing a scalar variable (the most typical case), leave this equal to 1. When using vector variables,
individual components of this vector of variables can be accessed via indexing.
Example:
>>> opti = asb.Opti()
>>> my_var = opti.variable(n_vars = 5)
>>> opti.subject_to(my_var[3] >= my_var[2]) # This is a valid way of indexing
>>> my_sum = asb.cas.sum1(my_var) # This will sum up all elements of `my_var`
init_guess: Initial guess for the variable being initialized. For scalar variables, this should be a
float. For vector variables (see `n_vars`), you can provide either a float (in which case all elements
of the vector will be initialized to the given value) or an iterable of equal length (in which case
each element will be initialized to the corresponding value in the given iterable).
In the case where the variable is to be log-transformed (see `log_transform`), the initial guess should
not be log-transformed as well; this happens under the hood. The initial guess must, of course, be a
positive number in this case.
If not specified, initial guess defaults to 0 for non-log-transformed variables and 1 for
log-transformed variables.
scale: Approximate scale of the variable
log_transform:
category:
Returns:
The variable itself as a symbolic CasADi variable (MX type).
"""
# Validate the inputs
if log_transform and init_guess is not None:
if np.any(init_guess <= 0):
raise ValueError(
"If you are initializing a log-transformed variable, the initial guess(es) must be positive.")
# Set defaults
if init_guess is None:
init_guess = 1 if log_transform else 0
if scale is None:
scale = init_guess if log_transform else 1
# Validate the inputs
if np.any(scale <= 0):
raise ValueError("The 'scale' argument must be a positive number.")
# If the variable is in a category to be frozen, fix the variable at the initial guess.
if category in self.categories_to_freeze:
freeze = True
# If the variable is to be frozen, return the initial guess. Otherwise, define the variable using CasADi symbolics.
if freeze:
var = init_guess * np.ones(n_vars)
else:
if not log_transform:
var = scale * super().variable(n_vars)
self.set_initial(var, init_guess)
else:
log_scale = scale / init_guess
log_var = log_scale * super().variable(n_vars)
var = cas.exp(log_var)
self.set_initial(log_var, cas.log(init_guess))
return var
def subject_to(self,
constraint: Union[cas.MX, bool, List],
) -> None:
# Determine whether you're dealing with a single (possibly vectorized) constraint or a list of constraints.
# If the latter, recursively apply them.
if isinstance(constraint, List):
for each_constraint in constraint:
self.subject_to(each_constraint)
return
# If it's a proper constraint (MX type), pass it into the problem formulation and be done with it.
if isinstance(constraint, cas.MX):
super().subject_to(constraint)
return
# If the constraint(s) always evaluates True (e.g. if you enter "5 > 3"), skip it.
# This allows you to toggle frozen variables without causing problems with setting up constraints.
elif np.all(constraint):
pass
# If any of the constraint(s) are always False (e.g. if you enter "5 < 3"), raise an error.
# This indicates that the problem is infeasible as-written, likely because the user has frozen too
# many decision variables using the Opti.variable(freeze=True) syntax.
elif np.any(np.logical_not(constraint)):
raise RuntimeError(f"""The problem is infeasible due to a constraint that always evaluates False. You
supplied the following constraint: {constraint}. This can happen if you've frozen too
many decision variables, leading to an overconstrained problem.""")
else: # In theory, this should never be called, so long as the constraints can be boolean-evaluated.
raise TypeError(f"""Opti.subject_to could not determine the truthiness of your constraint, and it
doesn't appear to be a symbolic type or a boolean type. You supplied the following constraint:
{constraint}""")
if __name__ == '__main__':
pytest.main()
|
StarcoderdataPython
|
6211
|
# Copyright 2021 The NetKet Authors - All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable
from . import struct
def get_afun_if_module(mod_or_fun) -> Callable:
"""Returns the apply function if it's a module. Does nothing otherwise."""
if hasattr(mod_or_fun, "apply"):
return mod_or_fun.apply
else:
return mod_or_fun
@struct.dataclass
class WrappedApplyFun:
"""Wraps a callable to be a module-like object with the method `apply`."""
apply: Callable
"""The wrapped callable."""
def __repr__(self):
return f"{type(self).__name__}(apply={self.apply}, hash={hash(self)})"
def wrap_afun(mod_or_fun):
"""Wraps a callable to be a module-like object with the method `apply`.
Does nothing if it already has an apply method.
"""
if hasattr(mod_or_fun, "apply"):
return mod_or_fun
else:
return WrappedApplyFun(mod_or_fun)
|
StarcoderdataPython
|
3278271
|
"""Remove None from nodes' logs lists."""
import sys
import logging
from website.app import init_app
from website.models import Node
from scripts import utils as script_utils
from modularodm import Q
logger = logging.getLogger(__name__)
def do_migration(records, dry=False):
count = 0
for node in records:
# Can't use in operator to check if None in node.logs
# Due to modm bug: https://github.com/CenterForOpenScience/modular-odm/issues/110
# So instead, we build an intermediate list
if None in [each for each in node.logs]:
logger.info(
'Removing None logs in node {}'.format(node._id)
)
node.logs = [each for each in node.logs if each is not None]
node.save()
count += 1
logger.info('Removed None logs from {} nodes'.format(count))
def get_targets():
return Node.find(Q('is_deleted', 'ne', True))
def main():
init_app(routes=False) # Sets the storage backends on all models
dry = 'dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
do_migration(get_targets(), dry)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1730795
|
<reponame>tuanavu/python-gitbook
print("I'm spam")
def hello(name):
print('Hello %s' % name)
|
StarcoderdataPython
|
109727
|
from saleor.celeryconf import app as celery_app
__all__ = ['celery_app']
__version__ = 'dev'
|
StarcoderdataPython
|
3312020
|
<reponame>state-of-the-art/BlendNet
#!/usr/bin/python3
# -*- coding: UTF-8 -*-
'''BlendNet TaskExecutorBase
Description: Common for all task executors (agent/manager)
'''
import os
import signal
import time # We need to sleep the watching thread
import threading # Sync between threads needed
import json # Used in the tasks save/load
import hashlib # Calculate sha1 to find a task snapshot name
from abc import ABC
from . import providers
from .Config import Config
from .TaskBase import TaskBase
from .FileCache import FileCache
class TaskExecutorConfig(Config):
_defs = {
'session_id': {
'description': '''Session identificator''',
'type': str,
'default': 'test',
},
'dist_url': {
'description': '''Blender distributive URL''',
'type': str,
'default': 'https://example.com/blender-test.tar.xz',
},
'dist_checksum': {
'description': '''Blender distributive checksum''',
'type': str,
'default': '',
},
'storage_url': {
'description': '''Storage URL used to store things''',
'type': str,
'default': lambda cfg: providers.getStorageUrl(cfg.session_id),
},
'listen_host': {
'description': '''Server listen host - ip address or name''',
'type': str,
'default': '',
},
'listen_port': {
'description': '''Server listen port''',
'type': int,
'min': 1,
'max': 65535,
'default': 8443,
},
'auth_user': {
'description': '''Server auth user name''',
'type': str,
'default': '',
},
'auth_password': {
'description': '''Server auth password''',
'type': str,
'default': '',
},
}
class TaskExecutorBase(ABC):
'''Class with the common task management functional'''
def __init__(self, task_type, config):
if not issubclass(task_type, TaskBase):
raise Exception('Unable use task type %s' % task_type)
if not isinstance(config, TaskExecutorConfig):
raise Exception('Unable to setup with configuration %s' % type(config))
self._enabled = True
self._task_type = task_type
self._cfg = config
self._fc = FileCache('.', 'BlendNet_cache')
self._tasks_lock = threading.Lock()
self._tasks = {}
self._tasks_dir = os.path.join('tasks', '%s-%s' % (self.__class__.__name__, self._cfg.session_id))
self._tasks_pending_lock = threading.Lock()
self._tasks_pending = []
self._tasks_running_lock = threading.Lock()
self._tasks_running = set()
self._tasks_watcher = threading.Thread(target=self._tasksWatcher)
self._tasks_watcher.start()
self._old_sigint = signal.signal(signal.SIGINT, self._termSignalHook)
self._old_sigterm = signal.signal(signal.SIGTERM, self._termSignalHook)
def __del__(self):
print('DEBUG: Deleting TaskExecutorBase instance')
self._enabled = False
def _termSignalHook(self, signum, frame):
print('WARN: Executor received TERM %s signal...' % signum)
self.setTerminating()
signal.signal(signal.SIGINT, self._old_sigint or signal.SIG_DFL)
signal.signal(signal.SIGTERM, self._old_sigterm or signal.SIG_DFL)
def tasks(self):
'''Returns a copy of tasks list'''
with self._tasks_lock:
return self._tasks.copy()
def tasksRunning(self):
'''Returns copy of the currently running tasks set'''
with self._tasks_running_lock:
return self._tasks_running.copy()
def tasksSave(self, tasks = []):
'''Save in-memory tasks to disk'''
if not tasks:
with self._tasks_lock:
tasks = list(self._tasks.values())
print('DEBUG: Saving %s tasks to disk' % len(tasks))
os.makedirs(self._tasks_dir, 0o700, True)
for task in tasks:
try:
filename = 'task-%s.json' % hashlib.sha1(task.name().encode('utf-8')).hexdigest()
with open(os.path.join(self._tasks_dir, filename), 'w') as f:
json.dump(task.snapshot(), f)
except Exception as e:
print('ERROR: Unable to save task "%s" to disk: %s' % (task.name(), e))
def tasksLoad(self):
'''Load tasks from disk'''
with self._tasks_lock:
if not os.path.isdir(self._tasks_dir):
return
with os.scandir(self._tasks_dir) as it:
for entry in it:
if not (entry.is_file() and entry.name.endswith('.json')):
continue
print('DEBUG: Loading task:', entry.name)
json_path = os.path.join(self._tasks_dir, entry.name)
try:
with open(json_path, 'r') as f:
data = json.load(f)
task = self._task_type(self, data['name'], data)
self._tasks[task.name()] = task
task.check()
if task.isPending():
self.taskAddToPending(task)
except Exception as e:
print('ERROR: Unable to load task file "%s" from disk: %s' % (json_path, e))
def taskExists(self, name):
'''Will check the existance of task'''
with self._tasks_lock:
return name in self._tasks
def taskGet(self, name):
'''Will return existing or new task object'''
with self._tasks_lock:
if name not in self._tasks:
self._tasks[name] = self._task_type(self, name)
return self._tasks[name]
def taskRemove(self, name):
'''Removes task from the task list'''
task = self.taskGet(name)
if task.isRunning():
task.stop()
if task.isPending():
self.taskRemoveFromPending(task)
with self._tasks_lock:
self._tasks.pop(name)
# Remove the snapshot file if existing
filename = 'task-%s.json' % hashlib.sha1(name.encode('utf-8')).hexdigest()
filepath = os.path.join(self._tasks_dir, filename)
try:
if os.path.exists(filepath):
os.remove(filepath)
except Exception as e:
# Could happen on Windows if file is used by some process
print('ERROR: Unable to remove file:', str(e))
def taskAddToPending(self, task):
'''Put task object into the pending list'''
with self._tasks_pending_lock:
if not task.check():
print('ERROR: Unable to set to pending not ready task %s' % task.name())
task.statePending()
self._tasks_pending.append(task)
print('DEBUG: Moved task to pending: "%s"' % task.name())
return True
def taskRemoveFromPending(self, task):
'''Remove task object from the pending list'''
with self._tasks_pending_lock:
task.stateCreated()
self._tasks_pending.remove(task)
print('DEBUG: Removed task from pending: "%s"' % task.name())
return True
def _taskPendingToRunning(self):
'''Put task object from pending into running list'''
task = None
with self._tasks_pending_lock:
task = self._tasks_pending.pop(0)
with self._tasks_running_lock:
self._tasks_running.add(task)
task.start()
print('DEBUG: Moved task from pending to running: "%s"' % task.name())
return True
def _tasksWatcher(self):
'''Watch on the running tasks and updating them from pending ones'''
print('DEBUG: Starting tasks watcher')
while self._enabled:
with self._tasks_running_lock:
tasks_running = self._tasks_running.copy()
for task in tasks_running:
if task.isEnded(): # Remove task from the list since it's ended
print('DEBUG: Removing from running list ended task "%s"' % task.name())
self._tasks_running.remove(task)
if self._tasks_pending:
if not self.tasksRunning(): # Empty running tasks
self._taskPendingToRunning()
# TODO: if the current executing task is going to complete - need
# to get the new one from pending to not spend time on preparing
time.sleep(1.0)
print('DEBUG: Stopped tasks watcher')
def getLoadStatus(self):
'''Return current load average 1, 5, 15 mins'''
load = (None, None, None)
if hasattr(os, 'getloadavg'): # Linux, Mac
load = os.getloadavg()
return load
def getMemoryStatus(self):
'''Return current memory status MemTotal, MemFree, MemAvailable in MB'''
memory = {}
if os.path.exists('/proc/meminfo'): # Linux
with open('/proc/meminfo', 'r') as f:
for line in f.readlines():
if line.startswith('Mem'):
memory[line.split(':')[0]] = float(line.split(' ')[-2])/1024.0
return memory
def getDiskStatus(self):
'''Return disk total and available space in MB'''
return {
'total': self._fc.getTotalSpace()/1024/1024,
'available': self._fc.getAvailableSpace()/1024/1024,
}
def blobStoreStream(self, stream, size, sha1):
return self._fc.blobStoreStream(stream, size, sha1)
def blobGet(self, sha1):
return self._fc.blobGet(sha1)
def blobGetStream(self, sha1):
return self._fc.blobGetStream(sha1)
|
StarcoderdataPython
|
185279
|
"""
This file contains different status codes for NetConnection, NetStream and SharedObject.
The source code was taken from the rtmpy project (https://github.com/hydralabs/rtmpy)
https://github.com/hydralabs/rtmpy/blob/master/rtmpy/status/codes.py
"""
# === NetConnection status codes and what they mean. ===
#: The URI specified in the NetConnection.connect method did not specify 'rtmp'
#: as the protocol. 'rtmp' must be specified when connecting to an RTMP server.
#: Either not supported version of AMF was used (3 when only 0 is supported).
NC_CALL_BAD_VERSION = 'NetConnection.Call.BadVersion'
#: The NetConnection.call method was not able to invoke the server-side method
#: or command.
NC_CALL_FAILED = 'NetConnection.Call.Failed'
#: The application has been shut down (for example, if the application is out
#: of memory resources and must shut down to prevent the server from crashing)
#: or the server has shut down.
NC_CONNECT_APP_SHUTDOWN = 'NetConnection.Connect.AppShutdown'
#: The connection was closed successfully.
NC_CONNECT_CLOSED = 'NetConnection.Connect.Closed'
#: The connection attempt failed.
NC_CONNECT_FAILED = 'NetConnection.Connect.Failed'
#: The application name specified during connect is invalid.
NC_CONNECT_INVALID_APPLICATION = 'NetConnection.Connect.InvalidApp'
#: The client does not have permission to connect to the application, the
#: application expected different parameters from those that were passed,
#: or the application name specified during the connection attempt was not
#: found on the server.
NC_CONNECT_REJECTED = 'NetConnection.Connect.Rejected'
#: The connection attempt succeeded.
NC_CONNECT_SUCCESS = 'NetConnection.Connect.Success'
# === NetStream status codes and what they mean. ===
#: A recorded stream failed to delete.
NS_CLEAR_FAILED = 'NetStream.Clear.Failed'
# A recorded stream was deleted successfully.
NS_CLEAR_SUCCESS = 'NetStream.Clear.Success'
#: An attempt to use a Stream method (at client-side) failed.
NS_FAILED = 'NetStream.Failed'
#: Invalid arguments were passed to a NetStream method.
NS_INVALID_ARGUMENT = 'NetStream.InvalidArg'
#: Playlist playback is complete.
NS_PLAY_COMPLETE = 'NetStream.Play.Complete'
#: An attempt to play back a stream failed.
NS_PLAY_FAILED = 'NetStream.Play.Failed'
#: Data is playing behind the normal speed.
NS_PLAY_INSUFFICIENT_BW = 'NetStream.Play.InsufficientBW'
#: Playback was started.
NS_PLAY_START = 'NetStream.Play.Start'
#: An attempt was made to play a stream that does not exist.
NS_PLAY_STREAM_NOT_FOUND = 'NetStream.Play.StreamNotFound'
#: Playback was stopped.
NS_PLAY_STOP = 'NetStream.Play.Stop'
#: A playlist was reset.
NS_PLAY_RESET = 'NetStream.Play.Reset'
#: The initial publish to a stream was successful. This message is sent to
#: all subscribers.
NS_PLAY_PUBLISH_NOTIFY = 'NetStream.Play.PublishNotify'
#: An unpublish from a stream was successful. This message is sent to all
#: subscribers.
NS_PLAY_UNPUBLISH_NOTIFY = 'NetStream.Play.UnpublishNotify'
#: Playlist playback switched from one stream to another.
NS_PLAY_SWITCH = 'NetStream.Play.Switch'
#: Flash Player detected an invalid file structure and will not try to
#: play this type of file.
NS_PLAY_FILE_STRUCTURE_INVALID = 'NetStream.Play.FileStructureInvalid'
#: Flash Player did not detect any supported tracks (video, audio or data)
#: and will not try to play the file.
NS_PLAY_NO_SUPPORTED_TRACK_FOUND = 'NetStream.Play.NoSupportedTrackFound'
#: An attempt was made to publish a stream that is already being published
#: by someone else.
NS_PUBLISH_BAD_NAME = 'NetStream.Publish.BadName'
#: An attempt to publish was successful.
NS_PUBLISH_START = 'NetStream.Publish.Start'
#: An attempt was made to record a read-only stream.
NS_RECORD_NO_ACCESS = 'NetStream.Record.NoAccess'
#: An attempt to record a stream failed.
NS_RECORD_FAILED = 'NetStream.Record.Failed'
#: Recording was started.
NS_RECORD_START = 'NetStream.Record.Start'
#: Recording was stopped.
NS_RECORD_STOP = 'NetStream.Record.Stop'
#: An attempt to unpublish was successful.
NS_UNPUBLISHED_SUCCESS = 'NetStream.Unpublish.Success'
#: The subscriber has used the seek command to move to a particular
#: location in the recorded stream.
NS_SEEK_NOTIFY = 'NetStream.Seek.Notify'
#: The stream doesn't support seeking.
NS_SEEK_FAILED = 'NetStream.Seek.Failed'
#: The subscriber has used the seek command to move to a particular
#: location in the recorded stream.
NS_PAUSE_NOTIFY = 'NetStream.Pause.Notify'
#: Publishing has stopped.
NS_UNPAUSE_NOTIFY = 'NetStream.Unpause.Notify'
#: Unknown
NS_DATA_START = 'NetStream.Data.Start'
# === SharedObject status codes and what they mean. ===
#: Read access to a shared object was denied.
SO_NO_READ_ACCESS = 'SharedObject.NoReadAccess'
#: Write access to a shared object was denied.
SO_NO_WRITE_ACCESS = 'SharedObject.NoWriteAccess'
#: The creation of a shared object was denied.
SO_CREATION_FAILED = 'SharedObject.ObjectCreationFailed'
#: The persistence parameter passed to SharedObject.getRemote() is
#: different from the one used when the shared object was created.
SO_PERSISTENCE_MISMATCH = 'SharedObject.BadPersistence'
|
StarcoderdataPython
|
33383
|
# Programmer friendly subprocess wrapper.
#
# Author: <NAME> <<EMAIL>>
# Last Change: March 2, 2020
# URL: https://executor.readthedocs.io
"""
Portable process control functionality for the `executor` package.
The :mod:`executor.process` module defines the :class:`ControllableProcess`
abstract base class which enables process control features like waiting for a
process to end, gracefully terminating it and forcefully killing it. The
process control functionality in :class:`ControllableProcess` is separated from
the command execution functionality in :class:`~executor.ExternalCommand` to
make it possible to re-use the process control functionality in other Python
packages, see for example the :class:`proc.core.Process` class.
"""
# Standard library modules.
import logging
# External dependencies.
from humanfriendly import Timer
from humanfriendly.terminal.spinners import Spinner
from property_manager import PropertyManager, mutable_property, required_property
# Initialize a logger for this module.
logger = logging.getLogger(__name__)
DEFAULT_TIMEOUT = 10
"""The default timeout used to wait for process termination (number of seconds)."""
class ControllableProcess(PropertyManager):
"""
Abstract, portable process control functionality.
By defining a subclass of :class:`ControllableProcess` and implementing the
:attr:`pid`, :attr:`command_line` and :attr:`is_running` properties and the
:func:`terminate_helper()` and :func:`kill_helper()` methods you get the
:func:`wait_for_process()`, :func:`terminate()` and :func:`kill()` methods
for free. This decoupling has enabled me to share a lot of code between two
Python projects of mine with similar goals but very different
requirements:
1. The `executor` package builds on top of the :mod:`subprocess` module
in the Python standard library and strives to be as cross platform
as possible. This means things like UNIX signals are not an option
(although signals exist on Windows they are hardly usable). The package
mostly deals with :class:`subprocess.Popen` objects internally (to hide
platform specific details as much as possible).
2. The proc_ package exposes process information available in the Linux
process information pseudo-file system available at ``/proc``. The
package mostly deals with process IDs internally. Because this is
completely specialized to a UNIX environment the use of things
like UNIX signals is not a problem at all.
.. _proc: http://proc.readthedocs.org/en/latest/
"""
@mutable_property
def command_line(self):
"""
A list of strings with the command line used to start the process.
This property may be set or implemented by subclasses to enable
:func:`__str__()` to render a human friendly representation of a
:class:`ControllableProcess` object.
"""
return []
@property
def is_running(self):
"""
:data:`True` if the process is running, :data:`False` otherwise.
This property must be implemented by subclasses to enable
:func:`wait_for_process()`, :func:`terminate()` and :func:`kill()` to
work properly.
"""
raise NotImplementedError("You need to implement the `is_running' property!")
@mutable_property
def logger(self):
"""
The :class:`logging.Logger` object to use (defaults to the :mod:`executor.process` logger).
If you are using Python's :mod:`logging` module and you find it
confusing that command manipulation is logged under the
:mod:`executor.process` name space instead of the name space of the
application or library using :mod:`executor` you can set this
attribute to inject a custom (and more appropriate) logger.
"""
return logger
@mutable_property
def pid(self):
"""
The process ID (a number) or :data:`None`.
This property must be set or implemented by subclasses:
- It provides :func:`wait_for_process()` with a short and unique
representation of a process that most users will understand.
- It enables :func:`__str__()` to render a human friendly
representation of a :class:`ControllableProcess` object.
"""
def wait_for_process(self, timeout=0, use_spinner=None):
"""
Wait until the process ends or the timeout expires.
:param timeout: The number of seconds to wait for the process to
terminate after we've asked it nicely (defaults
to zero which means we wait indefinitely).
:param use_spinner: Whether or not to display an interactive spinner
on the terminal (using :class:`~humanfriendly.Spinner`)
to explain to the user what they are waiting for:
- :data:`True` enables the spinner,
- :data:`False` disables the spinner,
- :data:`None` (the default) means the spinner is
enabled when the program is connected to an
interactive terminal, otherwise it's disabled.
:returns: A :class:`~humanfriendly.Timer` object telling you how long
it took to wait for the process.
"""
with Timer(resumable=True) as timer:
with Spinner(interactive=use_spinner, timer=timer) as spinner:
while self.is_running:
if timeout and timer.elapsed_time >= timeout:
break
spinner.step(label="Waiting for process %i to terminate" % self.pid)
spinner.sleep()
return timer
def terminate(self, wait=True, timeout=DEFAULT_TIMEOUT, use_spinner=None):
"""
Gracefully terminate the process.
:param wait: Whether to wait for the process to end (a boolean,
defaults to :data:`True`).
:param timeout: The number of seconds to wait for the process to
terminate after we've signaled it (defaults to
:data:`DEFAULT_TIMEOUT`). Zero means to wait
indefinitely.
:param use_spinner: See the :func:`wait_for_process()` documentation.
:returns: :data:`True` if the process was terminated, :data:`False`
otherwise.
:raises: Any exceptions raised by :func:`terminate_helper()`
implementations of subclasses or :func:`kill()`.
This method works as follows:
1. Signal the process to gracefully terminate itself. Processes can
choose to intercept termination signals to allow for graceful
termination (many UNIX daemons work like this) however the default
action is to simply exit immediately.
2. If `wait` is :data:`True` and we've signaled the process, we wait
for it to terminate gracefully or `timeout` seconds have passed
(whichever comes first).
3. If `wait` is :data:`True` and the process is still running after
`timeout` seconds have passed, it will be forcefully terminated
using :func:`kill()` (the value of `timeout` that was given to
:func:`terminate()` will be passed on to :func:`kill()`).
This method does nothing when :attr:`is_running` is :data:`False`.
"""
if self.is_running:
self.logger.info("Gracefully terminating process %s ..", self)
self.terminate_helper()
if wait:
timer = self.wait_for_process(timeout=timeout, use_spinner=use_spinner)
if self.is_running:
self.logger.warning("Failed to gracefully terminate process! (waited %s)", timer)
return self.kill(wait=True, timeout=timeout)
else:
self.logger.info("Successfully terminated process in %s.", timer)
return True
return not self.is_running
else:
return False
def terminate_helper(self):
"""Request the process to gracefully terminate itself (needs to be implemented by subclasses)."""
raise NotImplementedError("You need to implement the terminate_helper() method!")
def kill(self, wait=True, timeout=DEFAULT_TIMEOUT, use_spinner=None):
"""
Forcefully kill the process.
:param wait: Whether to wait for the process to end (a boolean,
defaults to :data:`True`).
:param timeout: The number of seconds to wait for the process to
terminate after we've signaled it (defaults to
:data:`DEFAULT_TIMEOUT`). Zero means to wait
indefinitely.
:param use_spinner: See the :func:`wait_for_process()` documentation.
:returns: :data:`True` if the process was killed, :data:`False`
otherwise.
:raises: - Any exceptions raised by :func:`kill_helper()`
implementations of subclasses.
- :exc:`ProcessTerminationFailed` if the process is still
running after :func:`kill_helper()` and
:func:`wait_for_process()` have been called.
This method does nothing when :attr:`is_running` is :data:`False`.
"""
if self.is_running:
self.logger.info("Forcefully killing process %s ..", self)
self.kill_helper()
if wait:
timer = self.wait_for_process(timeout=timeout, use_spinner=use_spinner)
if self.is_running:
self.logger.warning("Failed to forcefully kill process! (waited %s)", timer)
raise ProcessTerminationFailed(process=self, message="Failed to kill process! (%s)" % self)
else:
self.logger.info("Successfully killed process in %s.", timer)
return True
return not self.is_running
else:
return False
def kill_helper(self):
"""Forcefully kill the process (needs to be implemented by subclasses)."""
raise NotImplementedError("You need to implement the kill_helper() method!")
def __str__(self):
"""
Render a human friendly representation of a :class:`ControllableProcess` object.
:returns: A string describing the process. Includes the process ID and the
command line (when available).
"""
text = []
# Include the process ID? (only when it's available)
if self.pid is not None:
text.append(str(self.pid))
# Include the command line? (again, only when it's available)
if self.command_line:
# We import here to avoid circular imports.
from executor import quote
text.append("(%s)" % quote(self.command_line))
if not text:
# If all else fails we fall back to the super class.
text.append(object.__str__(self))
return " ".join(text)
class ProcessTerminationFailed(PropertyManager, Exception):
"""Raised when process termination fails."""
def __init__(self, *args, **kw):
"""
Initialize a :class:`ProcessTerminationFailed` object.
This method's signature is the same as the initializer of the
:class:`~property_manager.PropertyManager` class.
"""
PropertyManager.__init__(self, *args, **kw)
Exception.__init__(self, self.message)
@required_property(usage_notes=False)
def process(self):
"""The :class:`ControllableProcess` object that triggered the exception."""
@required_property(usage_notes=False)
def message(self):
"""An error message that explains how the process termination failed."""
|
StarcoderdataPython
|
3357880
|
<reponame>ProSeCo-Planning/proseco_planning<gh_stars>0
"""To be used in conjunction with stateAnalysis.cpp. This file generates plots of the respective action classes given a specific vehicular state."""
import plotly.express as px
import pandas as pd
import json
import tool as tl
from typing import Tuple, Dict
def load_data() -> Tuple[Dict, pd.DataFrame]:
"""Loads the data generated by actionAnalysis.cpp.
Returns:
Tuple[dict, pd.DataFrame]: A tuple with the raw data and the normalized actions.
"""
data = {}
children = []
with open(f"{tl.file_dir}/output/state_analysis.json") as json_data:
data = json.load(json_data)
for node in data["childMap"]:
children.append(
{
"d_lateral": node[1]["action_set"][0]["lateral_change"],
"d_velocity": node[1]["action_set"][0]["velocity_change"],
"invalid": node[1]["invalid"],
"collision": node[1]["collision"],
}
)
actions = pd.DataFrame(children)
return data, actions
def plot_state_analysis(vehicle: Dict, actions: pd.DataFrame) -> None:
labels = {}
labels["d_velocity"] = tl.l_math(r"\Delta v_\text{lon} [m/s]")
labels["d_lateral"] = tl.l_math(r"\Delta y_\text{lat} [m]")
labels["collision"] = "Collision State"
labels["invalid"] = "Invalid State"
for state in ["collision", "invalid"]:
# stringify for coloring
actions[state] = actions[state].astype(str)
title = f"{labels[state]}"
labels[state] = state.capitalize()
fig = px.scatter(
actions,
x="d_velocity",
y="d_lateral",
hover_data=[state],
title=title,
labels=labels,
width=600,
height=600,
color=state,
color_discrete_map={"True": "red", "False": "green"},
)
fig.update_traces(marker=dict(size=12))
fig.update_layout(
xaxis=dict(tickmode="linear", tick0=min(actions["d_velocity"])),
yaxis=dict(tickmode="linear", tick0=min(actions["d_lateral"])),
font=dict(family=tl.font_family, size=tl.font_size),
template=tl.theme_template
)
tl.generate_output(fig, f"state_analysis_{state}")
if __name__ == "__main__":
# The tool to run.
tool = "proseco_planning_tool_state_analysis"
# The options file to load.
options = "example_options.json"
# The scenario file to load.
scenario = "sc00.json"
tl.create_output_dir()
tl.run_tool(tool, options, scenario)
data, actions = load_data()
plot_state_analysis(data["agents"][0]["vehicle"], actions)
|
StarcoderdataPython
|
67314
|
<reponame>balmasea/genieparser<filename>src/genie/libs/parser/iosxe/tests/ShowRunInterface/cli/equal/golden_output12_expected.py
expected_output={
"interfaces": {
"Tunnel100": {
"autoroute_announce": "enabled",
"src_ip": "Loopback0",
"tunnel_bandwidth": 500,
"tunnel_dst": "2.2.2.2",
"tunnel_mode": "mpls traffic-eng",
"tunnel_path_option": {
"1": {
"path_type": "dynamic"
}
},
"tunnel_priority": [
"7 7"
]
}
}
}
|
StarcoderdataPython
|
35835
|
<filename>web/datasets/tasks.py
from datetime import datetime
from logging import info
from pathlib import Path
from typing import List
import requests
from celery import shared_task
from django.conf import settings
from django.contrib.admin.options import get_content_type_for_model
from requests import HTTPError
from tika import parser
from web.datasets.adapters import (
to_citycouncil_bid,
to_citycouncil_contract,
to_citycouncil_expense,
to_citycouncil_revenue,
)
from web.datasets.models import (
CityCouncilBid,
CityCouncilContract,
CityCouncilExpense,
CityCouncilRevenue,
File,
SyncInformation,
)
from web.datasets.services import get_s3_client
client = get_s3_client(settings)
class WebserviceException(Exception):
pass
@shared_task
def content_from_file(file_pk=None, path=None, keep_file=True):
if not any([file_pk, path]):
raise Exception("Ou `file_pk` ou `path` devem ser informados.")
a_file = None
if file_pk:
a_file = File.objects.get(pk=file_pk)
if a_file.content is not None:
return a_file.content
path = client.download_file(a_file.s3_file_path)
keep_file = False
if not Path(path).exists():
info(f"Arquivo {path} não encontrado.")
return
raw = parser.from_file(path)
if not keep_file:
Path(path).unlink()
if a_file:
a_file.content = raw["content"] or ""
a_file.save()
return a_file.content
return raw["content"]
@shared_task
def backup_file(file_id):
try:
file_obj = File.objects.get(pk=file_id, s3_url__isnull=True)
except File.DoesNotExist:
info(f"O arquivo ({file_id}) não existe ou já possui backup.")
return
if not file_obj.url and not file_obj.local_path:
info(f"O arquivo ({file_id}) não tem URL ou não existe localmente.")
return
model_name = file_obj.content_object._meta.model_name
relative_file_path = (
f"{model_name}/{file_obj.created_at.year}/"
f"{file_obj.created_at.month}/{file_obj.created_at.day}/"
)
location = file_obj.local_path or file_obj.url
s3_url, s3_file_path = client.upload_file(
location, relative_file_path, prefix=file_obj.checksum
)
file_obj.s3_file_path = s3_file_path
file_obj.s3_url = s3_url
file_obj.save()
return s3_url
@shared_task
def get_city_council_updates(formatted_date):
"""Solicita atualizações ao webservice da Câmara."""
target_date = datetime.strptime(formatted_date, "%Y-%m-%d").date()
sync_info, _ = SyncInformation.objects.get_or_create(
date=target_date, source="camara", defaults={"succeed": False}
)
response = requests.get(
settings.CITY_COUNCIL_WEBSERVICE_ENDPOINT,
params={
"data": formatted_date, # formato aaaa-mm-dd
"token": settings.CITY_COUNCIL_WEBSERVICE_TOKEN,
},
headers={"User-Agent": "Maria Quitéria"},
)
try:
response.raise_for_status()
sync_info.succeed = True
except HTTPError:
sync_info.succeed = False
sync_info.save()
raise HTTPError
response = response.json()
sync_info.response = response
if response.get("erro"):
sync_info.succeed = False
sync_info.save()
raise WebserviceException(response["erro"])
sync_info.save()
return response
@shared_task(ignore_result=True)
def distribute_city_council_objects_to_sync(payload):
"""Recebe o payload e dispara uma task para cada registro.
O webservice da Câmara retorna uma lista de ações (inserção,
atualização e deleção) e os registros que sofreram cada uma
delas. Essa task executa cada uma de maneira separada para que,
caso tenham algum erro, possam ser tratados de maneira separada.
"""
action_methods = {
"inclusoesContrato": add_citycouncil_contract,
"alteracoesContrato": update_citycouncil_contract,
"exclusoesContrato": remove_citycouncil_contract,
"inclusoesLicitacao": add_citycouncil_bid,
"alteracoesLicitacao": update_citycouncil_bid,
"exclusoesLicitacao": remove_citycouncil_bid,
"inclusoesReceita": add_citycouncil_revenue,
"alteracoesReceita": update_citycouncil_revenue,
"exclusoesReceita": remove_citycouncil_revenue,
"inclusoesDespesa": add_citycouncil_expense,
"alteracoesDespesa": update_citycouncil_expense,
"exclusoesDespesa": remove_citycouncil_expense,
}
for action_name, records in payload.items():
info(f"{action_name}: {len(records)} registros")
task = action_methods.get(action_name)
if action_name.startswith("exclusoes"):
task.delay(records)
else:
for record in records:
task.delay(record)
@shared_task(retry_kwargs={"max_retries": 1}, ignore_result=True)
def save_citycouncil_files(files, object, url_key):
if not files:
return
content_type = get_content_type_for_model(object)
from web.datasets.management.commands._file import save_file
if files:
for file_ in files:
save_file(file_[url_key], content_type, object.pk)
@shared_task(retry_kwargs={"max_retries": 1}, ignore_result=True)
def add_citycouncil_bid(record):
new_item = to_citycouncil_bid(record)
new_item["crawled_at"] = datetime.now()
new_item["crawled_from"] = settings.CITY_COUNCIL_WEBSERVICE_ENDPOINT
bid, _ = CityCouncilBid.objects.get_or_create(
external_code=new_item["external_code"], defaults=new_item
)
save_citycouncil_files(record.get("arquivos"), bid, "caminhoArqLic")
return bid
@shared_task(retry_kwargs={"max_retries": 1}, ignore_result=True)
def update_citycouncil_bid(record):
bid = CityCouncilBid.objects.get(external_code=record["codLic"])
updated_item = to_citycouncil_bid(record)
for key, value in updated_item.items():
setattr(bid, key, value)
bid.save()
save_citycouncil_files(record.get("arquivos"), bid, "caminhoArqLic")
return bid
@shared_task(retry_kwargs={"max_retries": 1}, ignore_result=True)
def remove_citycouncil_bid(records: List[dict]):
to_be_removed = [record["codLic"] for record in records]
CityCouncilBid.objects.filter(external_code__in=to_be_removed).update(excluded=True)
@shared_task(retry_kwargs={"max_retries": 1}, ignore_result=True)
def add_citycouncil_contract(record):
new_item = to_citycouncil_contract(record)
new_item["crawled_at"] = datetime.now()
new_item["crawled_from"] = settings.CITY_COUNCIL_WEBSERVICE_ENDPOINT
contract, _ = CityCouncilContract.objects.get_or_create(
external_code=new_item["external_code"], defaults=new_item
)
save_citycouncil_files(record.get("arquivos"), contract, "caminho")
return contract
@shared_task(retry_kwargs={"max_retries": 1}, ignore_result=True)
def update_citycouncil_contract(record):
contract = CityCouncilContract.objects.get(external_code=record["codCon"])
updated_item = to_citycouncil_contract(record)
for key, value in updated_item.items():
setattr(contract, key, value)
contract.save()
save_citycouncil_files(record.get("arquivos"), contract, "caminho")
return contract
@shared_task(retry_kwargs={"max_retries": 1}, ignore_result=True)
def remove_citycouncil_contract(records: List[dict]):
to_be_removed = [record["codCon"] for record in records]
CityCouncilContract.objects.filter(external_code__in=to_be_removed).update(
excluded=True
)
@shared_task(retry_kwargs={"max_retries": 1}, ignore_result=True)
def add_citycouncil_revenue(record):
new_item = to_citycouncil_revenue(record)
new_item["crawled_at"] = datetime.now()
new_item["crawled_from"] = settings.CITY_COUNCIL_WEBSERVICE_ENDPOINT
revenue, _ = CityCouncilRevenue.objects.get_or_create(
external_code=new_item["external_code"], defaults=new_item
)
return revenue
@shared_task(retry_kwargs={"max_retries": 1}, ignore_result=True)
def update_citycouncil_revenue(record):
revenue = CityCouncilRevenue.objects.get(external_code=record["codLinha"])
updated_item = to_citycouncil_revenue(record)
for key, value in updated_item.items():
setattr(revenue, key, value)
revenue.save()
return revenue
@shared_task(retry_kwargs={"max_retries": 1}, ignore_result=True)
def remove_citycouncil_revenue(records: List[dict]):
to_be_removed = [record["codLinha"] for record in records]
CityCouncilRevenue.objects.filter(external_code__in=to_be_removed).update(
excluded=True
)
@shared_task(retry_kwargs={"max_retries": 1}, ignore_result=True)
def add_citycouncil_expense(record):
new_item = to_citycouncil_expense(record)
new_item["crawled_at"] = datetime.now()
new_item["crawled_from"] = settings.CITY_COUNCIL_WEBSERVICE_ENDPOINT
expense, _ = CityCouncilExpense.objects.get_or_create(
external_file_code=new_item["external_file_code"],
external_file_line=new_item["external_file_line"],
number=new_item["number"],
phase=new_item["phase"],
defaults=new_item,
)
return expense
@shared_task(retry_kwargs={"max_retries": 1}, ignore_result=True)
def update_citycouncil_expense(record):
expense = CityCouncilExpense.objects.get(
external_file_code=record["codArquivo"],
external_file_line=record["codLinha"],
)
updated_item = to_citycouncil_expense(record)
for key, value in updated_item.items():
setattr(expense, key, value)
expense.save()
return expense
@shared_task(retry_kwargs={"max_retries": 1}, ignore_result=True)
def remove_citycouncil_expense(records: List[dict]):
for record in records:
CityCouncilExpense.objects.filter(
external_file_code=record["codigo"], external_file_line=record["linha"]
).update(excluded=True)
|
StarcoderdataPython
|
3314976
|
i=0
while i<1000:
i+=1
print(i)
|
StarcoderdataPython
|
1619907
|
<reponame>TopBoxBox/ENERGY_MANAGEMENT
"""Network of power devices."""
import cvxpy as cvx
import numpy as np
import tqdm
def _get_all_terminals(device):
"""Gets all terminals, including those nested within child devices."""
terms = device.terminals
if hasattr(device, 'internal_terminal'):
terms += [device.internal_terminal]
if hasattr(device, 'devices'):
terms += [t for d in device.devices for t in _get_all_terminals(d)]
return terms
class Terminal(object):
"""Device terminal."""
@property
def power_var(self):
return self._power
@property
def power(self):
"""Power consumed (positive value) or produced (negative value) at this
terminal."""
return self._power.value
def _init_problem(self, time_horizon, num_scenarios):
self._power = cvx.Variable(shape=(time_horizon, num_scenarios))
def _set_payments(self, price):
if price is not None and self._power.value is not None:
self.payment = price * self._power.value
else:
self.payment = None
class Net(object):
r"""Connection point for device terminals.
A net defines the power balance constraint ensuring that power sums to zero
across all connected terminals at each time point.
:param terminals: The terminals connected to this net
:param name: (optional) Display name of net
:type terminals: list of :class:`Terminal`
:type name: string
"""
def __init__(self, terminals, name=None):
self.name = "Net" if name is None else name
self.terminals = terminals
def _init_problem(self, time_horizon, num_scenarios):
self.num_scenarios = num_scenarios
# self.constraints = [sum(t._power[:, k] for t in self.terminals) == 0
# for k in range(num_scenarios)]
self.constraints = [
sum(t._power for t in self.terminals) / num_scenarios == 0]
self.problem = cvx.Problem(cvx.Minimize(0), self.constraints)
def _set_payments(self):
for t in self.terminals:
t._set_payments(self.price)
@property
def results(self):
return Results(price={self: self.price})
@property
def price(self):
"""Price associated with this net."""
return self.constraints[0].dual_value
#print([c.dual_value for c in self.constraints])
# raise
# if (len(self.constraints) == 1 and
# np.size(self.constraints[0].dual_value)) == 1:
# return self.constraints[0].dual_value
# TODO(enzo) hardcoded 1/K probability
# return np.sum(constr.dual_value
# for constr in self.constraints)
# if self.num_scenarios > 1:
# return np.matrix(np.sum([constr.dual_value[0]
# for constr in self.constraints], 0))
# return np.hstack(constr.dual_value.reshape(-1, 1)
# for constr in self.constraints)
class Device(object):
"""Base class for network device.
Subclasses are expected to override :attr:`constraints` and/or
:attr:`cost` to define the device-specific cost function.
:param terminals: The terminals of the device
:param name: (optional) Display name of device
:type terminals: list of :class:`Terminal`
:type name: string
"""
def __init__(self, terminals, name=None):
self.name = type(self).__name__ if name is None else name
self.terminals = terminals
self.problem = None
@property
def cost(self):
"""Device objective, to be overriden by subclasses.
:rtype: cvxpy expression of size :math:`T \times K`
"""
return np.matrix(0.0)
@property
def constraints(self):
"""Device constraints, to be overriden by subclasses.
:rtype: list of cvxpy constraints
"""
return []
@property
def results(self):
"""Network optimization results.
:rtype: :class:`Results`
"""
status = self.problem.status if self.problem else None
return Results(power={(self, i): t.power
for i, t in enumerate(self.terminals)},
payments={(self, i): t.payment
for i, t in enumerate(self.terminals)},
status=status)
def _init_problem(self, time_horizon, num_scenarios):
self.problem = cvx.Problem(
cvx.Minimize(cvx.sum(cvx.sum(self.cost, axis=1)) / num_scenarios),
# TODO(enzo) we should weight by probs
self.constraints +
[terminal._power[0, k] == terminal._power[0, 0]
for terminal in self.terminals
for k in range(1, terminal._power.shape[1] if
len(terminal._power.shape) > 1 else 0)])
def init_problem(self, time_horizon=1, num_scenarios=1):
"""Initialize the network optimization problem.
:param time_horizon: The time horizon :math:`T` to optimize over.
:param num_scenarios: The number of scenarios for robust MPC.
:type time_horizon: int
:type num_scenarios: int
"""
for terminal in _get_all_terminals(self):
terminal._init_problem(time_horizon, num_scenarios)
self._init_problem(time_horizon, num_scenarios)
def optimize(self, time_horizon=1, num_scenarios=1, **kwargs):
self.init_problem(time_horizon, num_scenarios)
self.problem.solve(**kwargs)
return self.results
class Group(Device):
"""A single device composed of multiple devices and nets.
The `Group` device allows for creating new devices composed of existing base
devices or other groups.
:param devices: Internal devices to be included.
:param nets: Internal nets to be included.
:param terminals: (optional) Terminals for new device.
:param name: (optional) Display name of group device
:type devices: list of :class:`Device`
:type nets: list of :class:`Net`
:type terminals: list of :class:`Terminal`
:type name: string
"""
def __init__(self, devices, nets, terminals=[], name=None):
super(Group, self).__init__(terminals, name)
self.devices = devices
self.nets = nets
@property
def results(self):
for n in self.nets:
n._set_payments()
results = sum(x.results for x in self.devices + self.nets)
results.status = self.problem.status if self.problem else None
return results
def _init_problem(self, time_horizon, num_scenarios):
for device in self.devices:
device._init_problem(time_horizon, num_scenarios)
for net in self.nets:
net._init_problem(time_horizon, num_scenarios)
self.problem = sum(x.problem for x in self.devices + self.nets)
# def optimize(self, **kwargs):
# super(Group, self).optimize(**kwargs)
# for n in self.nets:
# n._set_payments
# raise
class Results(object):
"""Network optimization results."""
def __init__(self, power=None, payments=None, price=None, status=None):
self.power = power if power else {}
self.payments = payments if payments else {}
self.price = price if price else {}
self.status = status
def __radd__(self, other):
return self.__add__(other)
def __add__(self, other):
if other == 0:
return self
power = self.power.copy()
payments = self.payments.copy()
price = self.price.copy()
power.update(other.power)
payments.update(other.payments)
price.update(other.price)
status = self.status if self.status is not None else other.status
return Results(power, payments, price, status)
def __str__(self):
return self.summary()
def __repr__(self):
return self.summary()
def summary(self):
"""Summary of results. Only works for single period optimization.
:rtype: str
"""
retval = "Status: " + self.status if self.status else "none"
if not self.status in {cvx.OPTIMAL, cvx.OPTIMAL_INACCURATE}:
return retval
retval += "\n"
retval += "%-20s %10s\n" % ("Terminal", "Power")
retval += "%-20s %10s\n" % ("--------", "-----")
averages = False
for device_terminal, value in self.power.items():
label = "%s[%d]" % (device_terminal[0].name, device_terminal[1])
if isinstance(value, np.ndarray):
value = np.mean(value)
averages = True
retval += "%-20s %10.2f\n" % (label, value)
retval += "\n"
retval += "%-20s %10s\n" % ("Net", "Price")
retval += "%-20s %10s\n" % ("---", "-----")
for net, value in self.price.items():
if isinstance(value, np.ndarray):
value = np.mean(value)
retval += "%-20s %10.4f\n" % (net.name, value)
retval += "\n"
retval += "%-20s %10s\n" % ("Device", "Payment")
retval += "%-20s %10s\n" % ("------", "-------")
device_payments = {d[0][0]: 0 for d in self.payments.items()}
for device_terminal, value in self.payments.items():
if isinstance(value, np.ndarray):
value = np.sum(value)
device_payments[device_terminal[0]] += value
for d in device_payments.keys():
retval += "%-20s %10.2f\n" % (d.name, device_payments[d])
if averages:
retval += "\nPower and price are averages over the time horizon. Payment is total.\n"
return retval
def plot(self, index=None, **kwargs): # , print_terminals=True):
"""Plot results."""
import matplotlib.pyplot as plt
fig, ax = plt.subplots(nrows=2, ncols=1, **kwargs)
ax[0].set_ylabel("power")
for device_terminal, value in self.power.items():
label = "%s[%d]" % (device_terminal[0].name,
device_terminal[1])
if index is None:
ax[0].plot(value, label=label)
else:
ax[0].plot(index, value, label=label)
ax[0].legend(loc="best")
ax[1].set_ylabel("price")
for net, value in self.price.items():
if index is None:
ax[1].plot(value, label=net.name)
else:
ax[1].plot(index, value, label=net.name)
ax[1].legend(loc="best")
return ax
def _update_mpc_results(t, time_steps, results_t, results_mpc):
for key, val in results_t.power.items():
results_mpc.power.setdefault(key, np.empty(time_steps))[t] = val[0, 0]
for key, val in results_t.price.items():
results_mpc.price.setdefault(key, np.empty(time_steps))[t] = val[0, 0]
for key, val in results_t.payments.items():
results_mpc.payments.setdefault(
key, np.empty(time_steps))[t] = val[0, 0]
class OptimizationError(Exception):
"""Error due to infeasibility or numerical problems during optimization."""
pass
def run_mpc(device, time_steps, predict, execute, **kwargs):
"""Execute model predictive control.
This method executes the model predictive control loop, roughly:
.. code:: python
for t in time_steps:
predict(t)
device.problem.solve()
execute(t)
..
It is the responsibility of the provided `predict` and `execute` functions
to update the device models with the desired predictions and execute the
actions as appropriate.
:param device: Device (or network of devices) to optimize
:param time_steps: Time steps to optimize over
:param predict: Prediction step
:param execute: Execution step
:type device: :class:`Device`
:type time_steps: sequence
:type predict: single argument function
:type execute: single argument function
:returns: Model predictive control results
:rtype: :class:`Results`
:raise: :class:`OptimizationError`
"""
total_cost = 0.
results = Results()
T_MPC = device
for t in tqdm.trange(time_steps):
predict(t)
device.init_problem(time_horizon=1)
device.problem.solve(**kwargs)
if device.problem.status != cvx.OPTIMAL:
# temporary
raise OptimizationError(
"failed at iteration %d, %s" % (t, device.problem.status))
stage_cost = sum([device.cost[0, 0]
for device in device.devices]).value
#print('at time %s, adding cost %f' % (t, stage_cost))
total_cost += stage_cost
execute(t)
_update_mpc_results(t, time_steps, device.results, results)
return total_cost, results
|
StarcoderdataPython
|
1775453
|
#!/usr/bin/env python3
import sys
import argparse
def make_range(s):
r = (start, end, step) = [int(x) for x in s.split(',')]
return r
def make_log_range(s):
r = (base, start, end, step) = [int(x) for x in s.split(',')]
return r
def log_range(base, start, end, step):
for p in range(start, end, step):
yield base ** p
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-w', '--workers_range', nargs='+', metavar='start,end,step', type=make_range, required=True)
parser.add_argument('-i', '--items_log_range', nargs='+', metavar='base,start,end,step', type=make_log_range, required=True)
args = parser.parse_args()
test_num = 1
for workers_range in args.workers_range:
for items_log_range in args.items_log_range:
for workers in range(*workers_range):
for items in log_range(*items_log_range):
with open("{test_num:02d}.in".format(**locals()), "w") as f:
print("{workers} {items}".format(**locals()), file=f)
test_num += 1
|
StarcoderdataPython
|
133029
|
<filename>depiction/models/uri/rest_api/__init__.py
"""Initialize rest_api module."""
from .rest_api_model import RESTAPIModel # noqa
from .max_model import MAXModel # noqa
|
StarcoderdataPython
|
3265227
|
import configparser
import os
import pandas as pd
import pydash
from config import project_config
coin_paprika_host = 'https://api.coinpaprika.com'
price_columns = [
'address',
'price',
'minute',
]
def read_tokens():
cfg = configparser.ConfigParser()
cfg.read('./prices.ini')
return cfg
def read_lending_tokens():
df = pd.read_csv('./lending_tokens.csv')
tokens = df['address'].tolist()
return tokens
def write_tokens():
dags_folder = project_config.dags_folder
cfg = configparser.ConfigParser()
cfg_path = os.path.join(dags_folder, 'token_stats/coin_paprika/prices.ini')
cfg.read(cfg_path)
return cfg
def gen():
lending_tokens = read_lending_tokens()
tokens_cfg = read_tokens()
tokens = tokens_cfg.sections()
lending_config = configparser.ConfigParser()
for name_index in range(len(tokens)):
item = tokens_cfg[tokens[name_index]]
address = pydash.get(item, 'address', '')
if address.lower() in lending_tokens:
print(address.lower())
lending_config[tokens[name_index]] = item
with open('prices_lending.ini', 'w') as configfile:
lending_config.write(configfile)
if __name__ == '__main__':
gen()
|
StarcoderdataPython
|
1753530
|
<filename>ships/gametest/channel_management.py
#!/usr/bin/env python3
# Copyright (C) 2019-2021 The Xaya developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from shipstest import ShipsTest
"""
Tests basic management of channels, i.e. creating / joining / aborting them
as well as declaring loss to close a channel in agreement.
Disputes and resolutions (which deal with protos and signatures) are left
for another test.
"""
class ChannelManagementTest (ShipsTest):
def run (self):
self.generate (110)
self.expectGameState ({
"gamestats": {},
"channels": {},
})
# Create three channels with single participants for now.
self.mainLogger.info ("Creating two channels...")
addr1 = self.rpc.xaya.getnewaddress ()
id1 = self.sendMove ("foo", {"c": {"addr": addr1}})
addr2 = self.rpc.xaya.getnewaddress ()
id2 = self.sendMove ("bar", {"c": {"addr": addr2}})
addr3 = self.rpc.xaya.getnewaddress ()
id3 = self.sendMove ("baz", {"c": {"addr": addr3}})
self.generate (1)
state = self.getGameState ()
self.assertEqual (state["gamestats"], {})
channels = state["channels"]
self.assertEqual (len (channels), 3)
assert id1 in channels
ch1 = channels[id1]
self.assertEqual (ch1["meta"]["participants"], [
{"name": "foo", "address": addr1}
])
self.assertEqual (ch1["state"]["parsed"]["phase"], "single participant")
assert id2 in channels
ch2 = channels[id2]
self.assertEqual (ch2["meta"]["participants"], [
{"name": "bar", "address": addr2}
])
self.assertEqual (ch2["state"]["parsed"]["phase"], "single participant")
assert id3 in channels
# Perform an invalid join and abort on the channels. This should not affect
# the state at all.
self.mainLogger.info ("Trying invalid operations...")
addr3 = self.rpc.xaya.getnewaddress ()
self.sendMove ("foo", {"j": {"id": id1, "addr": addr3}})
self.sendMove ("baz", {"a": {"id": id2}})
self.generate (1)
self.expectGameState (state)
# Join one of the channels and abort the other, this time for real.
self.mainLogger.info ("Joining and aborting the channels...")
self.sendMove ("baz", {"j": {"id": id1, "addr": addr3}})
self.sendMove ("bar", {"a": {"id": id2}})
self.generate (1)
state = self.getGameState ()
self.assertEqual (state["gamestats"], {})
channels = state["channels"]
self.assertEqual (len (channels), 2)
assert id1 in channels
ch1 = channels[id1]
self.assertEqual (ch1["meta"]["participants"], [
{"name": "foo", "address": addr1},
{"name": "baz", "address": addr3},
])
self.assertEqual (ch1["state"]["parsed"]["phase"], "first commitment")
assert id2 not in channels
assert id3 in channels
# Let the third channel time out.
self.generate (9)
channels = self.getGameState ()["channels"]
self.assertEqual (len (channels), 2)
assert id1 in channels
assert id3 in channels
self.generate (1)
channels = self.getGameState ()["channels"]
self.assertEqual (len (channels), 1)
assert id1 in channels
assert id3 not in channels
# Declare loss in the channel.
self.mainLogger.info ("Declaring loss in a game to close the channel...")
self.sendMove ("foo", {"l": {"id": id1, "r": ch1["meta"]["reinit"]}})
self.generate (1)
state = self.getGameState ()
self.assertEqual (state["channels"], {})
self.assertEqual (state["gamestats"], {
"foo": {"lost": 1, "won": 0},
"baz": {"lost": 0, "won": 1},
})
if __name__ == "__main__":
ChannelManagementTest ().main ()
|
StarcoderdataPython
|
1615515
|
<gh_stars>0
RTL_LANGUAGES = {
'he', 'ar', 'arc', 'dv', 'fa', 'ha',
'khw', 'ks', 'ku', 'ps', 'ur', 'yi',
}
|
StarcoderdataPython
|
90686
|
<reponame>FixturFab/pcb-tools
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2013-2014 <NAME> <<EMAIL>>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
METADATA = {
'name': 'pcb-tools',
'version': 0.2,
'author': '<NAME> <<EMAIL>>, <NAME> <<EMAIL>>',
'author_email': "<EMAIL>, <EMAIL>",
'description': "Utilities to handle Gerber (RS-274X) files.",
'license': "Apache",
'keywords': "pcb gerber tools",
'url': "http://github.com/curtacircuitos/pcb-tools",
'packages': ['gerber', 'gerber.render'],
'long_description': read('README.md'),
'classifiers': [
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"License :: OSI Approved :: Apple Public Source License",
],
}
SETUPTOOLS_METADATA = {
'install_requires': ['cairocffi==0.6'],
}
def install():
""" Install using setuptools, fallback to distutils
"""
try:
from setuptools import setup
METADATA.update(SETUPTOOLS_METADATA)
setup(**METADATA)
except ImportError:
from sys import stderr
stderr.write('Could not import setuptools, using distutils')
stderr.write('NOTE: You will need to install dependencies manualy')
from distutils.core import setup
setup(**METADATA)
if __name__ == '__main__':
install()
|
StarcoderdataPython
|
3353670
|
from pyteal import *
def approval_program():
seller_key = Bytes("seller")
nft_id_key = Bytes("nft_id")
start_time_key = Bytes("start")
end_time_key = Bytes("end")
reserve_amount_key = Bytes("reserve_amount")
min_bid_increment_key = Bytes("min_bid_inc")
num_bids_key = Bytes("num_bids")
lead_bid_amount_key = Bytes("bid_amount")
lead_bid_account_key = Bytes("bid_account")
@Subroutine(TealType.none)
def closeNFTTo(assetID: Expr, account: Expr) -> Expr:
asset_holding = AssetHolding.balance(
Global.current_application_address(), assetID
)
return Seq(
asset_holding,
If(asset_holding.hasValue()).Then(
Seq(
InnerTxnBuilder.Begin(),
InnerTxnBuilder.SetFields(
{
TxnField.type_enum: TxnType.AssetTransfer,
TxnField.xfer_asset: assetID,
TxnField.asset_close_to: account,
}
),
InnerTxnBuilder.Submit(),
)
),
)
@Subroutine(TealType.none)
def repayPreviousLeadBidder(prevLeadBidder: Expr, prevLeadBidAmount: Expr) -> Expr:
return Seq(
InnerTxnBuilder.Begin(),
InnerTxnBuilder.SetFields(
{
TxnField.type_enum: TxnType.Payment,
TxnField.amount: prevLeadBidAmount - Global.min_txn_fee(),
TxnField.receiver: prevLeadBidder,
}
),
InnerTxnBuilder.Submit(),
)
@Subroutine(TealType.none)
def closeAccountTo(account: Expr) -> Expr:
return If(Balance(Global.current_application_address()) != Int(0)).Then(
Seq(
InnerTxnBuilder.Begin(),
InnerTxnBuilder.SetFields(
{
TxnField.type_enum: TxnType.Payment,
TxnField.close_remainder_to: account,
}
),
InnerTxnBuilder.Submit(),
)
)
on_create_start_time = Btoi(Txn.application_args[2])
on_create_end_time = Btoi(Txn.application_args[3])
on_create = Seq(
App.globalPut(seller_key, Txn.application_args[0]),
App.globalPut(nft_id_key, Btoi(Txn.application_args[1])),
App.globalPut(start_time_key, on_create_start_time),
App.globalPut(end_time_key, on_create_end_time),
App.globalPut(reserve_amount_key, Btoi(Txn.application_args[4])),
App.globalPut(min_bid_increment_key, Btoi(Txn.application_args[5])),
App.globalPut(lead_bid_account_key, Global.zero_address()),
Assert(
And(
Global.latest_timestamp() < on_create_start_time,
on_create_start_time < on_create_end_time,
# TODO: should we impose a maximum auction length?
)
),
Approve(),
)
on_setup = Seq(
Assert(Global.latest_timestamp() < App.globalGet(start_time_key)),
# opt into NFT asset -- because you can't opt in if you're already opted in, this is what
# we'll use to make sure the contract has been set up
InnerTxnBuilder.Begin(),
InnerTxnBuilder.SetFields(
{
TxnField.type_enum: TxnType.AssetTransfer,
TxnField.xfer_asset: App.globalGet(nft_id_key),
TxnField.asset_receiver: Global.current_application_address(),
}
),
InnerTxnBuilder.Submit(),
Approve(),
)
on_bid_txn_index = Txn.group_index() - Int(1)
on_bid_nft_holding = AssetHolding.balance(
Global.current_application_address(), App.globalGet(nft_id_key)
)
on_bid = Seq(
on_bid_nft_holding,
Assert(
And(
# the auction has been set up
on_bid_nft_holding.hasValue(),
on_bid_nft_holding.value() > Int(0),
# the auction has started
App.globalGet(start_time_key) <= Global.latest_timestamp(),
# the auction has not ended
Global.latest_timestamp() < App.globalGet(end_time_key),
# the actual bid payment is before the app call
Gtxn[on_bid_txn_index].type_enum() == TxnType.Payment,
Gtxn[on_bid_txn_index].sender() == Txn.sender(),
Gtxn[on_bid_txn_index].receiver()
== Global.current_application_address(),
Gtxn[on_bid_txn_index].amount() >= Global.min_txn_fee(),
)
),
If(
Gtxn[on_bid_txn_index].amount()
>= App.globalGet(lead_bid_amount_key) + App.globalGet(min_bid_increment_key)
).Then(
Seq(
If(App.globalGet(lead_bid_account_key) != Global.zero_address()).Then(
repayPreviousLeadBidder(
App.globalGet(lead_bid_account_key),
App.globalGet(lead_bid_amount_key),
)
),
App.globalPut(lead_bid_amount_key, Gtxn[on_bid_txn_index].amount()),
App.globalPut(lead_bid_account_key, Gtxn[on_bid_txn_index].sender()),
App.globalPut(num_bids_key, App.globalGet(num_bids_key) + Int(1)),
Approve(),
)
),
Reject(),
)
on_call_method = Txn.application_args[0]
on_call = Cond(
[on_call_method == Bytes("setup"), on_setup],
[on_call_method == Bytes("bid"), on_bid],
)
on_delete = Seq(
If(Global.latest_timestamp() < App.globalGet(start_time_key)).Then(
Seq(
# the auction has not yet started, it's ok to delete
Assert(
Or(
# sender must either be the seller or the auction creator
Txn.sender() == App.globalGet(seller_key),
Txn.sender() == Global.creator_address(),
)
),
# if the auction contract account has opted into the nft, close it out
closeNFTTo(App.globalGet(nft_id_key), App.globalGet(seller_key)),
# if the auction contract still has funds, send them all to the seller
closeAccountTo(App.globalGet(seller_key)),
Approve(),
)
),
If(App.globalGet(end_time_key) <= Global.latest_timestamp()).Then(
Seq(
# the auction has ended, pay out assets
If(App.globalGet(lead_bid_account_key) != Global.zero_address())
.Then(
If(
App.globalGet(lead_bid_amount_key)
>= App.globalGet(reserve_amount_key)
)
.Then(
# the auction was successful: send lead bid account the nft
closeNFTTo(
App.globalGet(nft_id_key),
App.globalGet(lead_bid_account_key),
)
)
.Else(
Seq(
# the auction was not successful because the reserve was not met: return
# the nft to the seller and repay the lead bidder
closeNFTTo(
App.globalGet(nft_id_key), App.globalGet(seller_key)
),
repayPreviousLeadBidder(
App.globalGet(lead_bid_account_key),
App.globalGet(lead_bid_amount_key),
),
)
)
)
.Else(
# the auction was not successful because no bids were placed: return the nft to the seller
closeNFTTo(App.globalGet(nft_id_key), App.globalGet(seller_key))
),
# send remaining funds to the seller
closeAccountTo(App.globalGet(seller_key)),
Approve(),
)
),
Reject(),
)
program = Cond(
[Txn.application_id() == Int(0), on_create],
[Txn.on_completion() == OnComplete.NoOp, on_call],
[
Txn.on_completion() == OnComplete.DeleteApplication,
on_delete,
],
[
Or(
Txn.on_completion() == OnComplete.OptIn,
Txn.on_completion() == OnComplete.CloseOut,
Txn.on_completion() == OnComplete.UpdateApplication,
),
Reject(),
],
)
return program
def clear_state_program():
return Approve()
if __name__ == "__main__":
with open("auction_approval.teal", "w") as f:
compiled = compileTeal(approval_program(), mode=Mode.Application, version=5)
f.write(compiled)
with open("auction_clear_state.teal", "w") as f:
compiled = compileTeal(clear_state_program(), mode=Mode.Application, version=5)
f.write(compiled)
|
StarcoderdataPython
|
1631857
|
<filename>infogain/__main__.py
import argparse
import sys
parser = argparse.ArgumentParser(
prog="InfoGain",
description="Information Gain - Extract information\n"
)
parser.add_argument(
"command",
help="Select a command to run:\n\tDocument"
)
args = parser.parse_args(sys.argv[1:2])
if args.command == "Document":
parser = argparse.ArgumentParser(
prog="{} {}".format(parser.prog, "InfoGain Document"),
description="{}{}".format(parser.description, "Access the functions within the document")
)
commands = parser.add_mutually_exclusive_group(required=True)
commands.add_argument(
"--annotate",
nargs=2,
metavar=("OntologyPath", "DocumentPath"),
help="Annotate a document acording to an ontology."
)
commands.add_argument(
"--score",
nargs=2,
metavar=("OntologyPath", "DocumentPath"),
help="Produce metric results for a document, according to an ontology"
)
args = parser.parse_args(sys.argv[2:])
if args.annotate:
from . import artefact
from . import knowledge
artefact.annotate(
knowledge.Ontology(filepath=args.annotate[0]),
artefact.Document(filepath=args.annotate[1])
)
elif args.score:
from . import artefact
from . import knowledge
artefact.score(
knowledge.Ontology(filepath=args.annotate[0]),
artefact.Document(filepath=args.annotate[1]),
True
)
|
StarcoderdataPython
|
100112
|
import sys
# Given L, adds a column based on the <where>
# parameter, either at the begginning or end
def add_col(cubes,where):
for cube in cubes:
for grid in cube:
for row in grid:
if where == "end":
row.append(".")
elif where == "begin":
row.insert(0,".")
return cubes
def add_row(cubes,where):
for cube in cubes:
for grid in cube:
new_row = []
for i in range(len(grid[0])):
new_row.append(".")
if where == "end":
grid.append(new_row)
elif where == "begin":
grid.insert(0,new_row)
return cubes
def add_grid(cubes,where):
for cube in cubes:
new_grid = []
for i in range(len(cube[0])):
new_row = []
for j in range(len(cube[0][0])):
new_row.append(".")
new_grid.append(new_row)
if where == "end":
cube.append(new_grid)
elif where == "begin":
cube.insert(0,new_grid)
return cubes
def add_cube(cubes, where):
new_cube = []
for i in range(len(cubes[0])):
new_grid = []
for j in range(len(cubes[0][0])):
new_row = []
for k in range(len(cubes[0][0][0])):
new_row.append(".")
new_grid.append(new_row)
new_cube.append(new_grid)
if where == "end":
cubes.append(new_cube)
elif where == "begin":
cubes.insert(0,new_cube)
return cubes
def add_dimensions(cubes):
cubes = add_col(cubes, "end")
cubes = add_col(cubes, "begin")
cubes = add_grid(cubes, "end")
cubes = add_grid(cubes,"begin")
cubes = add_row(cubes, "end")
cubes = add_row(cubes, "begin")
cubes = add_cube(cubes, "end")
cubes = add_cube(cubes, "begin")
return cubes
def print_cubes(cubes):
for cube in cubes:
for grid in cube:
for row in grid:
for item in row:
print(item,end = "")
print()
print()
print("\n")
# returns a list of tuples which represent the offsets
# that we look for neighbors:
def get_combos():
result = []
ranges = [0,-1,1]
for i in range(3):
for j in range(3):
for k in range(3):
for l in range(3):
if not (ranges[i] == 0 and ranges[j] == 0 and ranges[k] == 0 and ranges[l] == 0):
result.append((ranges[i],ranges[j],ranges[k],ranges[l]))
return result
L = []
f = open(sys.argv[1],"r")
for item in f:
item = item.strip()
R = []
for c in item:
R.append(c)
L.append(R)
cubes = [[L]]
# add an extra layer on each dimension:
cubes = add_dimensions(cubes)
combos = get_combos()
cycles = 0
while cycles < 6:
new_cubes = []
for h in range(0,len(cubes)):
cube = []
for i in range(0,len(cubes[0])):
grid = []
for j in range(0,len(cubes[0][0])):
row = []
for k in range(0,len(cubes[0][0][0])):
neighbors = []
for combo in combos:
h_ = h + combo[0]
i_ = i + combo[1]
j_ = j + combo[2]
k_ = k + combo[3]
# if neighbor is of bounds (exceeds max):
if (h_ >= len(cubes)) or (i_ >= len(cubes[h])) or (j_ >= len(cubes[h][i])) or (k_ >= len(cubes[h][i][j])):
neighbors.append(".")
# if neighbor exceeds minimum bounds:
elif (h_ < 0) or (i_ < 0) or (j_ < 0) or (k_ < 0):
neighbors.append(".")
else:
neighbors.append(cubes[h_][i_][j_][k_])
# if active
if cubes[h][i][j][k] == "#":
# if 2 or 3 neighbors stay active, otherwise become inactive:
if not (neighbors.count("#") == 3 or neighbors.count("#") == 2):
row.append(".")
else:
row.append("#")
# if inactive:
elif cubes[h][i][j][k] == ".":
# if 3 active neighbors, become active, otherwise remain inactive:
if neighbors.count("#") == 3:
row.append("#")
else:
row.append(".")
# append row to grid
grid.append(row)
# append grid to grids:
cube.append(grid)
new_cubes.append(cube)
cubes = new_cubes.copy()
# add new dimension each time:
cubes = add_dimensions(cubes)
cycles += 1
# count how many active
num_active = 0
for cube in cubes:
for grid in cube:
for row in grid:
for item in row:
if item == "#":
num_active += 1
print(num_active)
|
StarcoderdataPython
|
1779415
|
<reponame>e-koch/VLA_Lband<filename>14B-088/HI/imaging/jasper/HI_mask_channel_split.py
'''
Split the HI Arecibo mask for 14B-088 into individual
channels.
'''
from casa_tools import image_split_by_channel
start_chan = 0
nchan = 1231
mask = "/home/ekoch/m33/14B-088/M33_14B-088_HI_mask.image"
output_dir = "/home/ekoch/m33/14B-088/mask_channels/"
image_split_by_channel(mask, nchan=nchan, start=start_chan,
output_dir=output_dir)
|
StarcoderdataPython
|
1716362
|
<gh_stars>0
from typing import (
Dict,
Iterable,
)
import orjson as json
from aiobaseclient import BaseClient
from aiolibgen.exceptions import (
ClientError,
ExceededConnectionsError,
ExternalServiceError,
NotFoundError,
)
class LibgenClient(BaseClient):
default_fields = [
'title',
'author',
'md5',
'filesize',
'descr',
'edition',
'extension',
'pages',
'series',
'year',
'language',
'identifier',
'id',
'coverurl',
'doi',
'tags',
'timelastmodified',
'visible',
]
async def by_ids(self, ids, fields=None):
if not fields:
fields = self.default_fields
if not isinstance(ids, Iterable):
ids = [ids]
ids = list(map(str, ids))
r = await self.get(
'/json.php',
params={
'ids': ','.join(ids),
'fields': ','.join(fields),
}
)
return r
async def newer(self, timenewer, idnewer=0, fields=None):
if not fields:
fields = self.default_fields
while True:
rows = await self.get(
'/json.php',
params={
'fields': ','.join(fields),
'mode': 'newer',
'timenewer': timenewer,
'idnewer': idnewer,
}
)
if not rows:
return
for row in rows:
timenewer = row['timelastmodified']
idnewer = row['id']
yield row
async def response_processor(self, response):
text = await response.text()
if response.status == 404:
raise NotFoundError(status=response.status, text=text, url=str(response.url))
elif response.status == 500 and 'max_user_connections' in text:
raise ExceededConnectionsError()
elif response.status != 200:
raise ExternalServiceError(str(response.url), response.status, text)
data = json.loads(text)
if isinstance(data, Dict) and 'error' in data:
raise ClientError(**data)
return data
|
StarcoderdataPython
|
1799690
|
import requests
from bs4 import Beautifulsoup
# Scrapes transcript data from scrapsfromtheloft.com
def url_to_transcript(url):
'''Returns transcript data specifically from scrapsfromtheloft.com.'''
page = requests.get(url).text
soup = BeautifulSoup(page, "html5lib")
text = [p.text for p in soup.find(class_="post-content").find_all('p')]
# convert to text all paragraphs in the class (post-content)
print(url + "DONE")
# Once done print the specific url and the "DONE" after it
return text
# A list of URLs to transcripts in our scope
urls = ['http://scrapsfromtheloft.com/2017/05/06/louis-ck-oh-my-god-full-transcript/',
'http://scrapsfromtheloft.com/2017/04/11/dave-chappelle-age-spin-2017-full-transcript/',
'http://scrapsfromtheloft.com/2018/03/15/ricky-gervais-humanity-transcript/',
'http://scrapsfromtheloft.com/2017/08/07/bo-burnham-2013-full-transcript/',
'http://scrapsfromtheloft.com/2017/05/24/bill-burr-im-sorry-feel-way-2014-full-transcript/',
'http://scrapsfromtheloft.com/2017/04/21/jim-jefferies-bare-2014-full-transcript/',
'http://scrapsfromtheloft.com/2017/08/02/john-mulaney-comeback-kid-2015-full-transcript/',
'http://scrapsfromtheloft.com/2017/10/21/hasan-minhaj-homecoming-king-2017-full-transcript/',
'http://scrapsfromtheloft.com/2017/09/19/ali-wong-baby-cobra-2016-full-transcript/',
'http://scrapsfromtheloft.com/2017/08/03/anthony-jeselnik-thoughts-prayers-2015-full-transcript/',
'http://scrapsfromtheloft.com/2018/03/03/mike-birbiglia-my-girlfriends-boyfriend-2013-full-transcript/',
'http://scrapsfromtheloft.com/2017/08/19/joe-rogan-triggered-2016-full-transcript/',
]
# Comedian names in order
comedians = ['louis', 'dave', 'ricky', 'bo', 'bill', 'jim', 'john', 'hasan', 'ali', 'anthony', 'mike', 'joe']
# Actually request transcripts
transcripts = [url_to_transcript(u) for u in urls]
|
StarcoderdataPython
|
4814381
|
import sys
from functools import partial
import argparse
from cmd import Cmd
from .cpu import INSTRUCTION_SET
from .printer.instr import InstrJsonPrinter
from .printer.instr import InstrPrettyPrinter
class ArgumentError(Exception):
pass
class ArgumentParser(argparse.ArgumentParser):
def exit(self, status=0, message=''):
raise ArgumentError(message)
class InstrListArgumentParser(ArgumentParser):
def __init__(self, *args, **kwargs):
gr_name = kwargs.pop('group_name', None)
kwargs['add_help'] = False
if gr_name is None:
kwargs['description'] = 'List all instructions'
else:
kwargs['description'] = 'List all instructions of ' + gr_name
kwargs['usage'] = 'ls [OPTIONS]'
super(InstrListArgumentParser, self).__init__(*args, **kwargs)
self.add_argument('--json', action='store_true',
help="json format")
class BasicShell(Cmd):
def __init__(self, *args, **kwargs):
Cmd.__init__(self, *args, **kwargs)
def completedefault(self, text, line, begin, end):
args = line.split(None, 2)
subshell = "{0}_shell".format(args[0])
if hasattr(self, subshell):
subshell = getattr(self, subshell)
if len(args) == 1:
return subshell.completenames('')
elif len(args) == 2 and text:
return subshell.completenames(args[1])
elif len(args) >= 2 and hasattr(subshell,
'complete_{0}'.format(args[1])):
func = getattr(subshell,
'complete_{0}'.format(args[1]))
return func(text, line, begin, end,
subline=line.split(None, 1)[1])
else:
return []
else:
return []
def enter_subshell(self, shell, args):
args = args.strip()
if not args:
shell.cmdloop()
else:
shell.onecmd(args)
class CpuShell(BasicShell):
prompt = "z80/cpu> "
def do_ls(self, args):
pass
def do_exit(self, args):
return True
class InstrGroupShell(BasicShell):
prompt = "z80/instr/group> "
@staticmethod
def enter(args, shell):
shell.enter_subshell(shell, args)
def __init__(self, group, *args, **kwargs):
BasicShell.__init__(self, *args, **kwargs)
self.group = group
self.parser_ls = InstrListArgumentParser(group_name=group.name)
def do_exit(self, args):
return True
def cmdloop(self):
InstrGroupShell.prompt = "z80/instr/" + self.group.short_name + "> "
BasicShell.cmdloop(self)
def help_ls(self):
self.parser_ls.print_help()
def do_ls(self, args):
try:
args = self.parser_ls.parse_args(args.split())
except ArgumentError as e:
sys.stderr.write(str(e))
return
if args.json:
printer = InstrJsonPrinter()
else:
printer = InstrPrettyPrinter()
printer.write([self.group.to_dict()])
class InstrShell(BasicShell):
prompt = "z80/instr> "
@classmethod
def add_group_commands(cls, groups):
for group in groups:
group_shell = InstrGroupShell(group)
setattr(cls, group.short_name + "_shell", group_shell)
setattr(cls, "do_" + group.short_name,
partial(InstrGroupShell.enter, **{'shell': group_shell}))
def __init__(self, *args, **kwargs):
BasicShell.__init__(self, *args, **kwargs)
self.parser_ls = InstrListArgumentParser()
def do_ls(self, args):
try:
args = self.parser_ls.parse_args(args.split())
except ArgumentError as e:
sys.stderr.write(str(e))
return
if args.json:
printer = InstrJsonPrinter()
else:
printer = InstrPrettyPrinter()
printer.write([group.to_dict()
for group in INSTRUCTION_SET.groups])
def complete_ls(self, text, line, begin, end, subline=None):
# print("'{0}_{1}'".format(line, text))
def complete_assembler(assembler, prefix):
first = prefix[0]
if len(prefix) == 1:
if not first:
return sorted(assembler.keys())
else:
return [f
for f in sorted(assembler.keys())
if f.startswith(first)]
else:
if first.startswith('(('):
first = first[1:]
if first.endswith('))'):
first = first[:-1]
if first in assembler:
return complete_assembler(assembler[first], prefix[1:])
else:
return []
if subline is None:
subline = line
args = subline.split()
# fix: text is empty if line ends with '('
# determine text manually
if subline.endswith(' '):
text = ''
else:
text = args[-1]
if len(args) == 1:
return complete_assembler(INSTRUCTION_SET.assembler, [''])
else:
if not text:
args.append('')
return complete_assembler(INSTRUCTION_SET.assembler, args[1:])
return complete_assembler(INSTRUCTION_SET.assembler, text)
def do_exit(self, args):
return True
class Shell(BasicShell):
prompt = "z80> "
def __init__(self, *args, **kwargs):
Cmd.__init__(self, *args, **kwargs)
self.cpu_shell = CpuShell()
self.instr_shell = InstrShell()
def do_instr(self, args):
self.enter_subshell(self.instr_shell, args)
def do_cpu(self, args):
self.enter_subshell(self.cpu_shell, args)
def do_exit(self, args):
return True
# Initialization
InstrShell.add_group_commands(INSTRUCTION_SET.groups)
if __name__ == "__main__":
shell = Shell()
if len(sys.argv) > 1:
shell.onecmd(' '.join(sys.argv[1:]))
else:
shell.cmdloop()
|
StarcoderdataPython
|
1716296
|
<reponame>DerPhysikeR/bookcut
from bookcut.repositories import open_access_button
from bookcut.downloader import filename_refubrished
from bookcut.search import search_downloader
from click import confirm
"""
Article.py is using from article command and searches repositories for
published articles.
"""
def article_search(doi, title):
try:
article_json_data = open_access_button(doi, title)
url = article_json_data["url"]
metadata = article_json_data["metadata"]
title = metadata["title"]
filename = filename_refubrished(title)
filename = filename + ".pdf"
ask_for_downloading(filename, url)
except KeyError:
print("\nCan not find the given article.\nPlease try another search!")
def ask_for_downloading(articlefilename, url):
ask = confirm(f"Do you want to download:\n {articlefilename}")
if ask is True:
search_downloader(articlefilename, url)
else:
print("Aborted!")
|
StarcoderdataPython
|
3200840
|
<filename>tibanna/exceptions.py
# custom exceptions
class StillRunningException(Exception):
"""EC2 AWSEM instance is still running (job not complete)"""
pass
class EC2StartingException(Exception):
"""EC2 AWSEM instance is still starting (job not complete)"""
pass
class AWSEMJobErrorException(Exception):
"""There is an error from a worklow run on the EC2 AWSEM instance"""
pass
class DependencyStillRunningException(Exception):
pass
class DependencyFailedException(Exception):
pass
class EC2LaunchException(Exception):
pass
class EC2UnintendedTerminationException(Exception):
pass
class EC2IdleException(Exception):
pass
class EC2InstanceLimitException(Exception):
pass
class EC2InstanceLimitWaitException(Exception):
pass
class MissingFieldInInputJsonException(Exception):
pass
class MalFormattedInputJsonException(Exception):
pass
class MalFormattedPostrunJsonException(Exception):
pass
class MetricRetrievalException(Exception):
pass
|
StarcoderdataPython
|
3245481
|
<filename>fabfile.py
from fabric.api import local, run
def info():
run("uname -a")
run("lsb_release -a")
# def freeze():
# local("pip freeze > requirements.txt")
# local("git add requirements.txt")
# local("git commit -v")
def pip_upgrade():
import pip
for dist in pip.get_installed_distributions():
local("pip install --upgrade {0}".format(dist.project_name))
def clean_pyc():
local("find -name '*.pyc' -delete")
|
StarcoderdataPython
|
25408
|
<reponame>lindsey98/dml_cross_entropy
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
def state_dict_to_cpu(state_dict: OrderedDict):
"""Moves a state_dict to cpu and removes the module. added by DataParallel.
Parameters
----------
state_dict : OrderedDict
State_dict containing the tensors to move to cpu.
Returns
-------
new_state_dict : OrderedDict
State_dict on cpu.
"""
new_state = OrderedDict()
for k in state_dict.keys():
newk = k.replace('module.', '') # remove "module." if model was trained using DataParallel
new_state[newk] = state_dict[k].cpu()
return new_state
class SmoothCrossEntropy(nn.Module):
def __init__(self, epsilon: float = 0.1):
super(SmoothCrossEntropy, self).__init__()
self.epsilon = float(epsilon)
def forward(self, logits: torch.Tensor, labels: torch.LongTensor) -> torch.Tensor:
# target probs is of shape [N x C], only the gt labels get values 1 - self.epsilon,
# other entries for other labels get values (self.epsilon)/(C - 1)
target_probs = torch.full_like(logits, self.epsilon / (logits.shape[1] - 1))
target_probs.scatter_(1, labels.unsqueeze(1), 1 - self.epsilon)
# LogSoftMax for logits
softmax_logits = F.softmax(logits, 1)
logsoftmax_logits = torch.log(softmax_logits + 1e-5) # manually control underflow
loss = F.kl_div(logsoftmax_logits, target_probs, reduction='none').sum(1)
# kl_divergence = \sum p(y) * log(p(yhat)/p(y)) while CE = - \sum p(y) * log(p(yhat))
if torch.isnan(loss).any():
# print('labels:', labels)
print(labels.shape)
print('Labels Min: {}, Max: {}'.format(torch.min(labels), torch.max(labels)))
# print('target prob:', target_probs)
print(target_probs.shape)
print(torch.sum(target_probs == 0))
print('Target probs Min: {}, Max: {}'.format(torch.min(target_probs), torch.max(target_probs)))
# print('log_softmax logits: ', torch.log_softmax(logits, 1))
print(logsoftmax_logits.shape)
print(torch.sum(logsoftmax_logits == 0))
print('logsoftmax_logits Min: {}, Max: {}'.format(torch.min(logsoftmax_logits), torch.max(logsoftmax_logits)))
print(loss)
raise RuntimeError('Loss has nan values, probably because the log operations lead to -inf')
return loss
class VAELoss(nn.Module):
def __init__(self, kld_weight: float = 0.005):
super(VAELoss, self).__init__()
self.kld_weight = float(kld_weight)
def forward(self, recons: torch.Tensor, input: torch.Tensor, mu: torch.Tensor, log_var: torch.Tensor) -> dict:
recons_loss = F.binary_cross_entropy(recons, input)
kld_loss = torch.mean(-0.5 * torch.sum(1 + log_var - mu ** 2 - log_var.exp(), dim = 1), dim = 0)
loss = recons_loss + self.kld_weight * kld_loss
loss_dict = {'loss':loss, 'reconstruct':recons_loss, 'kld_loss': -kld_loss}
print(loss_dict)
return loss_dict
class AELoss(nn.Module):
def __init__(self):
super(AELoss, self).__init__()
def forward(self, recons: torch.Tensor, input: torch.Tensor) -> dict:
recons_loss = F.binary_cross_entropy(recons, input)
loss = recons_loss
loss_dict = {'loss':loss, 'reconstruct':recons_loss}
# print(loss_dict)
return loss_dict
|
StarcoderdataPython
|
115189
|
from django.urls import include, path
from django.conf.urls import url
from rest_framework import routers
from . import views
# router = routers.DefaultRouter()
# router.register('jobpostings', views.JobPostingList)
# router.register('jobpostings/<int:id>', views.JobPostingDetail)
urlpatterns = [
# url(r'^jobposting/', views.JobPostingList.as_view()),
# url(r'^jobpostings/(?P<pk>[0-9]+)/$', views.JobPostingDetail.as_view()),
path("jobpostings/", views.JobPostingList),
path("jobpostings/<int:pk>/", views.JobPostingDetails),
path("resources/", views.ResourceList),
path("resources/<int:pk>/", views.ResourceDetails),
path("resources_pages/", views.ResourcePageList),
path("resources_pages/<int:pk>/", views.ResourcePageDetails),
# path('', include(router.urls)),
]
|
StarcoderdataPython
|
3246854
|
<gh_stars>10-100
from typing import List
from alteia.apis.provider import AssetManagementAPI
from alteia.core.resources.resource import Resource
from alteia.core.utils.typing import ResourceId, SomeResourceIds, SomeResources
class CarrierModelsImpl:
def __init__(self, asset_management_api: AssetManagementAPI, **kwargs):
self._provider = asset_management_api
def create(self, *, name: str, maker: str, type: str, company: ResourceId = None,
unloaded_weight: dict = None, flight_time: dict = None,
speed: dict = None, altitude: dict = None,
compatible_sensor_models: List[ResourceId] = None, **kwargs) -> Resource:
"""Create a carrier model.
Args:
name: Carrier model name.
maker: Maker name.
type: Model type, among``fixed-wind``, ``multirotor``, ``ground-robot``,
``helicopter``, ``pedestrian``.
company: Optional identifier of the company.
unloaded_weight: Optional unloaded weight
``{ value: weight without sensor, unit: unit (g, kg) }``.
flight_time : Optional flight time
``{ value: maximum flight time, unit: unit (min) }``.
speed : Optional speed
``{ min: {value, unit}, max: {value, unit(m/s, mph ,ft/s, km/h, knot)} }``.
altitude : Optional altitude ``{ min: {value, unit}, max: {value, unit(m, ft)} }``.
compatible_sensor_models: Optional list of compatible sensors identifiers.
**kwargs: Optional keyword arguments. Those arguments are
passed as is to the API provider.
Returns:
Resource: A carrier model resource.
"""
data = kwargs
data.update({
'name': name,
'maker': maker,
'type': type
})
for param_name, param_value in (('company', company),
('unloaded_weight', unloaded_weight),
('flight_time', flight_time),
('speed', speed),
('altitude', altitude),
('unloaded_weight', unloaded_weight),
('compatible_sensor_models', compatible_sensor_models)):
if param_value is not None:
data[param_name] = param_value
content = self._provider.post(path='create-carrier-model', data=data)
return Resource(**content)
def search(self, *, filter: dict = None, limit: int = None,
page: int = None, sort: dict = None, **kwargs
) -> List[Resource]:
"""Search carrier models.
Args:
filter: Search filter dictionary.
limit: Maximum number of results to extract.
page: Page number (starting at page 0).
sort: Sort the results on the specified attributes
(``1`` is sorting in ascending order,
``-1`` is sorting in descending order).
return_total: Return the number of results found.
**kwargs: Optional keyword arguments. Those arguments are
passed as is to the API provider.
Returns:
Resources: A list of carrier models resources.
"""
data = kwargs
for name, value in [('filter', filter or {}),
('limit', limit),
('page', page),
('sort', sort)]:
if value is not None:
data.update({name: value})
r = self._provider.post('search-carrier-models', data=data)
results = r.get('results')
return [Resource(**m) for m in results]
def describe(self, carrier_models: SomeResourceIds, **kwargs) -> SomeResources:
"""Describe a carrier model or a list of carrier models.
Args:
carrier_models: Identifier of the carrier model to describe, or list of
such identifiers.
**kwargs: Optional keyword arguments. Those arguments are
passed as is to the API provider.
Returns:
Resource: The carrier model description
or a list of carrier model descriptions.
"""
data = kwargs
if isinstance(carrier_models, list):
data['carrier_models'] = carrier_models
descs = self._provider.post('describe-carrier-models', data=data)
return [Resource(**desc) for desc in descs]
else:
data['carrier_model'] = carrier_models
desc = self._provider.post('describe-carrier-model', data=data)
return Resource(**desc)
def delete(self, carrier_model: ResourceId, **kwargs):
"""Delete a carrier model.
Args:
carrier_model: Carrier model to delete.
"""
data = kwargs
data['carrier_model'] = carrier_model
self._provider.post('delete-carrier-model', data=data)
|
StarcoderdataPython
|
3282054
|
import pandas as pd
import numpy as np
import unidecode
comunas = pd.read_csv('./data/comuna.csv')
comunas_name = np.array([unidecode.unidecode(x).lower() for x in comunas['name'].to_numpy()],dtype=str)
comunas_id = np.array(comunas['id'].to_numpy(), dtype=int)
comuna_code = dict(zip(comunas_name, comunas_id))
comunas_fix = {
'isla de pascua': 'isla de pascua',
'trehuaco' : 'treguaco',
'coccepcion' : 'concepcion',
'conce' : 'concepcion',
'concepcion.' : 'concepcion',
'santiago centro' : 'santiago',
'caleta tortel' : 'tortel',
'puente' : 'puente alto',
'san vicente de tagua tagua' : 'san vicente',
'san vicente tagua tagua' : 'san vicente',
'marchigue' : 'marchihue',
'coihaique' : 'coyhaique',
'coyihaque' : 'coyhaique',
'haulpen' : 'hualpen',
'vina': 'vina del mar',
'la serena': 'la serena',
'huechurabs' : 'huechuraba',
'providenica' : 'providencia',
'providenca' : 'providencia',
'cowuimbo' : 'coquimbo',
'comuna de putre' : 'putre',
'x region, chile' : 'nr',
'v region' : 'nr',
'alto hospicii' : 'alto hospicio',
'san miguel.' : 'san miguel',
'pozo amonte' : 'pozo almonte',
'til til' : 'tiltil',
'qta normal' : 'quinta normal',
'quinta norma' : 'quinta normal',
'milina' : 'molina',
'batuco' : 'lampa',
'la visterna' : 'la cisterna',
'"puerto montt' : 'puerto montt',
'extranjero' : 'nr',
'cerrillos.' : 'cerrillos',
'maipu (mientras)..' : 'maipu',
'colchagua': 'nr',
'san antonio comuna de cartagena': 'cartagena',
'quemchi chiloe-' : 'quemchi',
'rocas de santo domingo' : 'santo domingo',
'la calera' : 'calera',
'coyhique' : 'coyhaique',
'cancun' : 'nr',
'estados unidos' : 'nr',
'gladstone' : 'nr',
'qjillota' : 'quillota',
'pac' : 'pedro aguirre cerda',
'paihuano' : 'paiguano',
'puerto aysen' : 'aysen',
'provincia' : 'nr',
'santioago' : 'santiago',
'quilpue (belloto)' : 'quilpue',
'nan' : 'nr'
}
def get_comunas_id(x, col):
try:
x[col] = comuna_code[x[col]]
except KeyError:
x[col] = comuna_code['nr']
return x
def fix_location_online(x):
if pd.isna(x['Comuna']):
if pd.isna(x['Comuna.1']):
x['Comuna'] = ''
else:
x['Comuna'] = x['Comuna.1']
try:
x['Comuna'] = comuna_code[unidecode.unidecode(x['Comuna']).lower()]
except KeyError:
x['Comuna'] = comuna_code[comunas_fix[unidecode.unidecode(x['Comuna']).lower()]]
return x
def fix_location(x):
if x['comuna'] == 'nr':
x['comuna'] = 1
if pd.isna(x['comuna']):
x['comuna'] = 1
return x
|
StarcoderdataPython
|
3391899
|
<gh_stars>0
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import socket
from typing import Union
from pyignite.constants import *
from pyignite.exceptions import (
HandshakeError, ParameterError, SocketError, connection_errors, AuthenticationError,
)
from pyignite.datatypes import Byte, Int, Short, String, UUIDObject
from pyignite.datatypes.internal import Struct
from .handshake import HandshakeRequest
from .ssl import wrap
from ..stream import BinaryStream, READ_BACKWARD
CLIENT_STATUS_AUTH_FAILURE = 2000
class Connection:
"""
This is a `pyignite` class, that represents a connection to Ignite
node. It serves multiple purposes:
* socket wrapper. Detects fragmentation and network errors. See also
https://docs.python.org/3/howto/sockets.html,
* binary protocol connector. Incapsulates handshake and failover reconnection.
"""
_socket = None
_failed = None
client = None
host = None
port = None
timeout = None
username = None
password = <PASSWORD>
ssl_params = {}
uuid = None
@staticmethod
def _check_ssl_params(params):
expected_args = [
'use_ssl',
'ssl_version',
'ssl_ciphers',
'ssl_cert_reqs',
'ssl_keyfile',
'ssl_keyfile_password',
'ssl_certfile',
'ssl_ca_certfile',
]
for param in params:
if param not in expected_args:
raise ParameterError((
'Unexpected parameter for connection initialization: `{}`'
).format(param))
def __init__(
self, client: 'Client', timeout: float = 2.0,
username: str = None, password: str = None, **ssl_params
):
"""
Initialize connection.
For the use of the SSL-related parameters see
https://docs.python.org/3/library/ssl.html#ssl-certificates.
:param client: Ignite client object,
:param timeout: (optional) sets timeout (in seconds) for each socket
operation including `connect`. 0 means non-blocking mode, which is
virtually guaranteed to fail. Can accept integer or float value.
Default is None (blocking mode),
:param use_ssl: (optional) set to True if Ignite server uses SSL
on its binary connector. Defaults to use SSL when username
and password has been supplied, not to use SSL otherwise,
:param ssl_version: (optional) SSL version constant from standard
`ssl` module. Defaults to TLS v1.1, as in Ignite 2.5,
:param ssl_ciphers: (optional) ciphers to use. If not provided,
`ssl` default ciphers are used,
:param ssl_cert_reqs: (optional) determines how the remote side
certificate is treated:
* `ssl.CERT_NONE` − remote certificate is ignored (default),
* `ssl.CERT_OPTIONAL` − remote certificate will be validated,
if provided,
* `ssl.CERT_REQUIRED` − valid remote certificate is required,
:param ssl_keyfile: (optional) a path to SSL key file to identify
local (client) party,
:param ssl_keyfile_password: (optional) password for SSL key file,
can be provided when key file is encrypted to prevent OpenSSL
password prompt,
:param ssl_certfile: (optional) a path to ssl certificate file
to identify local (client) party,
:param ssl_ca_certfile: (optional) a path to a trusted certificate
or a certificate chain. Required to check the validity of the remote
(server-side) certificate,
:param username: (optional) user name to authenticate to Ignite
cluster,
:param password: (optional) password to authenticate to Ignite cluster.
"""
self.client = client
self.timeout = timeout
self.username = username
self.password = password
self._check_ssl_params(ssl_params)
if self.username and self.password and 'use_ssl' not in ssl_params:
ssl_params['use_ssl'] = True
self.ssl_params = ssl_params
self._failed = False
@property
def closed(self) -> bool:
""" Tells if socket is closed. """
return self._socket is None
@property
def failed(self) -> bool:
""" Tells if connection is failed. """
return self._failed
@failed.setter
def failed(self, value):
self._failed = value
@property
def alive(self) -> bool:
""" Tells if connection is up and no failure detected. """
return not self.failed and not self.closed
def __repr__(self) -> str:
return '{}:{}'.format(self.host or '?', self.port or '?')
_wrap = wrap
def get_protocol_version(self):
"""
Returns the tuple of major, minor, and revision numbers of the used
thin protocol version, or None, if no connection to the Ignite cluster
was yet established.
"""
return self.client.protocol_version
def read_response(self) -> Union[dict, OrderedDict]:
"""
Processes server's response to the handshake request.
:return: handshake data.
"""
response_start = Struct([
('length', Int),
('op_code', Byte),
])
with BinaryStream(self, self.recv(reconnect=False)) as stream:
start_class = response_start.parse(stream)
start = stream.read_ctype(start_class, direction=READ_BACKWARD)
data = response_start.to_python(start)
response_end = None
if data['op_code'] == 0:
response_end = Struct([
('version_major', Short),
('version_minor', Short),
('version_patch', Short),
('message', String),
('client_status', Int)
])
elif self.get_protocol_version() >= (1, 4, 0):
response_end = Struct([
('node_uuid', UUIDObject),
])
if response_end:
end_class = response_end.parse(stream)
end = stream.read_ctype(end_class, direction=READ_BACKWARD)
data.update(response_end.to_python(end))
return data
def connect(
self, host: str = None, port: int = None
) -> Union[dict, OrderedDict]:
"""
Connect to the given server node with protocol version fallback.
:param host: Ignite server node's host name or IP,
:param port: Ignite server node's port number.
"""
detecting_protocol = False
# choose highest version first
if self.client.protocol_version is None:
detecting_protocol = True
self.client.protocol_version = max(PROTOCOLS)
try:
result = self._connect_version(host, port)
except HandshakeError as e:
if e.expected_version in PROTOCOLS:
self.client.protocol_version = e.expected_version
result = self._connect_version(host, port)
else:
raise e
except connection_errors:
# restore undefined protocol version
if detecting_protocol:
self.client.protocol_version = None
raise
# connection is ready for end user
self.uuid = result.get('node_uuid', None) # version-specific (1.4+)
self.failed = False
return result
def _connect_version(
self, host: str = None, port: int = None,
) -> Union[dict, OrderedDict]:
"""
Connect to the given server node using protocol version
defined on client.
:param host: Ignite server node's host name or IP,
:param port: Ignite server node's port number.
"""
host = host or IGNITE_DEFAULT_HOST
port = port or IGNITE_DEFAULT_PORT
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.settimeout(self.timeout)
self._socket = self._wrap(self._socket)
self._socket.connect((host, port))
protocol_version = self.client.protocol_version
hs_request = HandshakeRequest(
protocol_version,
self.username,
self.password
)
with BinaryStream(self) as stream:
hs_request.from_python(stream)
self.send(stream.getbuffer(), reconnect=False)
hs_response = self.read_response()
if hs_response['op_code'] == 0:
self.close()
error_text = 'Handshake error: {}'.format(hs_response['message'])
# if handshake fails for any reason other than protocol mismatch
# (i.e. authentication error), server version is 0.0.0
if any([
hs_response['version_major'],
hs_response['version_minor'],
hs_response['version_patch'],
]):
error_text += (
' Server expects binary protocol version '
'{version_major}.{version_minor}.{version_patch}. Client '
'provides {client_major}.{client_minor}.{client_patch}.'
).format(
client_major=protocol_version[0],
client_minor=protocol_version[1],
client_patch=protocol_version[2],
**hs_response
)
elif hs_response['client_status'] == CLIENT_STATUS_AUTH_FAILURE:
raise AuthenticationError(error_text)
raise HandshakeError((
hs_response['version_major'],
hs_response['version_minor'],
hs_response['version_patch'],
), error_text)
self.host, self.port = host, port
return hs_response
def reconnect(self):
# do not reconnect if connection is already working
# or was closed on purpose
if not self.failed:
return
self.close()
# connect and silence the connection errors
try:
self.connect(self.host, self.port)
except connection_errors:
pass
def send(self, data: Union[bytes, bytearray, memoryview], flags=None, reconnect=True):
"""
Send data down the socket.
:param data: bytes to send,
:param flags: (optional) OS-specific flags.
:param reconnect: (optional) reconnect on failure, default True.
"""
if self.closed:
raise SocketError('Attempt to use closed connection.')
kwargs = {}
if flags is not None:
kwargs['flags'] = flags
try:
self._socket.sendall(data, **kwargs)
except connection_errors:
self.failed = True
self.reconnect()
raise
def recv(self, flags=None, reconnect=True) -> bytearray:
"""
Receive data from the socket.
:param flags: (optional) OS-specific flags.
:param reconnect: (optional) reconnect on failure, default True.
"""
def _recv(buffer, num_bytes):
bytes_to_receive = num_bytes
while bytes_to_receive > 0:
try:
bytes_rcvd = self._socket.recv_into(buffer, bytes_to_receive, **kwargs)
if bytes_rcvd == 0:
raise SocketError('Connection broken.')
except connection_errors:
self.failed = True
if reconnect:
self.reconnect()
raise
buffer = buffer[bytes_rcvd:]
bytes_to_receive -= bytes_rcvd
if self.closed:
raise SocketError('Attempt to use closed connection.')
kwargs = {}
if flags is not None:
kwargs['flags'] = flags
data = bytearray(4)
_recv(memoryview(data), 4)
response_len = int.from_bytes(data, PROTOCOL_BYTE_ORDER)
data.extend(bytearray(response_len))
_recv(memoryview(data)[4:], response_len)
return data
def close(self):
"""
Try to mark socket closed, then unlink it. This is recommended but
not required, since sockets are automatically closed when
garbage-collected.
"""
if self._socket:
try:
self._socket.shutdown(socket.SHUT_RDWR)
self._socket.close()
except connection_errors:
pass
self._socket = None
|
StarcoderdataPython
|
3256724
|
<gh_stars>0
""" Common functionality for exasyncio """
from enum import IntEnum
from types import TracebackType
from typing import (
Optional,
Type,
)
class ExaConnStatus(IntEnum):
""" Indicates the status of a connection """
CLOSED = 0
WS_CONNECTED = 1
CONNECTED = 2
DISCONNECTING = 3
CLOSING = 4
class AsyncContextMixin:
""" Mixin to provide asynchronous context support """
async def __aenter__(self):
return self
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
await self.close()
|
StarcoderdataPython
|
4810325
|
<reponame>ducnx1997/int3411<filename>Filter.py<gh_stars>0
from struct import unpack
from scipy import interpolate
import numpy
class Filter:
def __init__(self, acv_file_path, name):
self.name = name
with open(acv_file_path, 'rb') as acv_file:
self.curves = self._read_curves(acv_file)
self.polynomials = self._find_coefficients()
def _read_curves(self, acv_file):
_, nr_curves = unpack('!hh', acv_file.read(4))
curves = []
for i in range(0, nr_curves):
curve = []
num_curve_points, = unpack('!h', acv_file.read(2))
for j in range(0, num_curve_points):
y, x = unpack('!hh', acv_file.read(4))
curve.append((x, y))
curves.append(curve)
return curves
def _find_coefficients(self):
polynomials = []
for curve in self.curves:
xdata = [x[0] for x in curve]
ydata = [x[1] for x in curve]
p = interpolate.lagrange(xdata, ydata)
polynomials.append(p)
return polynomials
def get_r(self):
return self.polynomials[1]
def get_g(self):
return self.polynomials[2]
def get_b(self):
return self.polynomials[3]
def get_c(self):
return self.polynomials[0]
class FilterManager:
def __init__(self):
self.filters = {}
def add_filter(self, filter_obj):
self.filters[filter_obj.name] = filter_obj
def apply_filter(self, filter_name, image_array):
if image_array.ndim < 3:
raise Exception('Photos must be in color, meaning at least 3 channels')
else:
def interpolate(i_arr, f_arr, p, p_c):
p_arr = p_c(f_arr)
return p_arr
# NOTE: Assumes that image_array is a numpy array
image_filter = self.filters[filter_name]
# NOTE: What happens if filter does not exist?
width, height, channels = image_array.shape
filter_array = numpy.zeros((width, height, 3), dtype=float)
p_r = image_filter.get_r()
p_g = image_filter.get_g()
p_b = image_filter.get_b()
p_c = image_filter.get_c()
filter_array[:, :, 0] = p_r(image_array[:, :, 0])
filter_array[:, :, 1] = p_g(image_array[:, :, 1])
filter_array[:, :, 2] = p_b(image_array[:, :, 2])
filter_array = filter_array.clip(0, 255)
filter_array = p_c(filter_array)
filter_array = numpy.ceil(filter_array).clip(0, 255)
return filter_array.astype(numpy.uint8)
|
StarcoderdataPython
|
1644259
|
import ssl
import websocket
import random, string, json, threading, time
import login
def on_message(ws, message):
global first
print(message)
def on_error(ws, error):
print(error)
def on_close(ws):
print("socket closed")
def on_open(ws):
print("socket opened ok")
chat = None
def keepalive(ws):
global chat
while True:
time.sleep(3)
if chat:
ws.send(json.dumps({
"t": "talk",
"d": chat
}))
chat = None
else:
ws.send("null")
def startchat(tid, lila2):
#websocket.enableTrace(True)
sri = ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(10))
url = "wss://socket.lichess.org/tournament/{}/socket/v4?sri={}".format(tid, sri)
print("url", url)
ws = websocket.WebSocketApp(url,
on_message = on_message,
on_error = on_error,
on_close = on_close,
on_open = on_open,
cookie = "lila2={}".format(lila2)
)
threading.Thread(target = keepalive, args = (ws,)).start()
ws.run_forever(
host = "socket.lichess.org",
origin = "https://lichess.org"
)
if __name__ == "__main__":
tid = input("Tourney id: ")
lila2 = login.login()
threading.Thread(target = startchat, args = (tid, lila2)).start()
while True:
chat = input("chat: ")
|
StarcoderdataPython
|
4843015
|
from mock import Mock
from dataclasses import dataclass
from origin.bus.dispatcher import MessageDispatcher
from origin.bus import Message
@dataclass
class Message1(Message):
"""TODO."""
something: str
@dataclass
class Message2(Message):
"""TODO."""
something: str
class TestMessageSerializer:
"""TODO."""
def test__handler_exists_for_type__should_invoke_handler(self):
"""TODO."""
# -- Arrange ---------------------------------------------------------
msg = Message1(something='something')
handler1 = Mock()
handler2 = Mock()
uut = MessageDispatcher({
Message1: handler1,
Message2: handler2,
})
# -- Act -------------------------------------------------------------
uut(msg)
# -- Assert ----------------------------------------------------------
handler1.assert_called_once_with(msg)
handler2.assert_not_called()
def test__handler_does_not_exist_for_type__should_not_invoke_handler(self):
"""TODO."""
# -- Arrange ---------------------------------------------------------
handler = Mock()
uut = MessageDispatcher({
Message1: handler,
})
# -- Act -------------------------------------------------------------
uut(Message2(something='something'))
# -- Assert ----------------------------------------------------------
handler.assert_not_called()
|
StarcoderdataPython
|
1736526
|
<reponame>logyball/raspberry-pi-vacation-planner
from time import sleep, time
def cntdown_timer(window):
while True:
if int(time()) >= window.time_to_move:
window.move_right.emit()
sleep(1)
|
StarcoderdataPython
|
100642
|
<gh_stars>10-100
# The MIT License (MIT)
#
# Copyright (c) 2016 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
class MissingFieldException(Exception):
"""
Exception for cases when something is missing
"""
def __init__(self, message):
"""Initialize MissingFieldException
Args:
message: Message of exception
"""
super(MissingFieldException, self).__init__(
"Field '{}' not found.".format(message))
class FormatException(Exception):
"""
Exception for cases when something has wrong format
"""
pass
class WrongArgumentException(Exception):
"""
Exception for cases when wrong argument is passed
"""
pass
|
StarcoderdataPython
|
3261859
|
<reponame>Project-BE-Comp-Face-Recognition/facerecognition<filename>helpers/facecrop.py
import cv2
import dlib
import os
import sys
#cam = cv2.VideoCapture(1)
detector = dlib.get_frontal_face_detector()
def faceDetector(teacherId):
detector = dlib.get_frontal_face_detector()
detectedFaces=0
facenumber=0
currentDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
imageFolder = os.path.join(currentDir, "identify/")
imageFolder = os.path.join(imageFolder,teacherId)
print(imageFolder)
for filename in os.listdir(imageFolder):
print(imageFolder+ './'+filename)
img=cv2.imread(imageFolder+'./'+filename)
dets = detector(img, 1)
print("detected = {}".format(len(dets)))
if not os.path.exists(currentDir+'./croppedimages'):
os.makedirs(currentDir+'./croppedimages')
target=os.path.join(currentDir,'croppedimages')
if not os.path.exists(target+"./"+teacherId):
os.makedirs(target+"./"+teacherId)
cropped=os.path.join(target,teacherId)
for i, d in enumerate(dets):
facenumber+=1
cv2.imwrite(cropped+"/face" + str(facenumber) +
'.jpg', img[d.top():d.bottom(), d.left():d.right()])
detectedFaces+=len(dets)
print(cropped)
return (cropped)
if __name__ == "__main__":
abc=faceDetector(teacherId)
|
StarcoderdataPython
|
3359795
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class IntegrationRuntimeVNetProperties(Model):
"""VNet properties for managed integration runtime.
:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param v_net_id: The ID of the VNet that this integration runtime will
join.
:type v_net_id: str
:param subnet: The name of the subnet this integration runtime will join.
:type subnet: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'v_net_id': {'key': 'vNetId', 'type': 'str'},
'subnet': {'key': 'subnet', 'type': 'str'},
}
def __init__(self, additional_properties=None, v_net_id=None, subnet=None):
self.additional_properties = additional_properties
self.v_net_id = v_net_id
self.subnet = subnet
|
StarcoderdataPython
|
1622768
|
<reponame>ptrourke/concordia
"""
See the module-level docstring for implementation details
"""
import os
import re
from functools import wraps
from logging import getLogger
from tempfile import NamedTemporaryFile
from urllib.parse import parse_qsl, urlencode, urljoin, urlparse, urlsplit, urlunsplit
import requests
from celery import group, task
from django.db.transaction import atomic
from django.utils.text import slugify
from django.utils.timezone import now
from requests.exceptions import HTTPError
from concordia.models import Asset, Item, MediaType
from concordia.storage import ASSET_STORAGE
from importer.models import ImportItem, ImportItemAsset, ImportJob
logger = getLogger(__name__)
#: P1 has generic search / item pages and a number of top-level format-specific
#: “context portals” which expose the same JSON interface.
#: jq 'to_entries[] | select(.value.type == "context-portal") | .key' < manifest.json
ACCEPTED_P1_URL_PREFIXES = [
"collections",
"search",
"item",
"audio",
"books",
"film-and-videos",
"manuscripts",
"maps",
"newspapers",
"notated-music",
"photos",
"websites",
]
def update_task_status(f):
"""
Decorator which causes any function which is passed a TaskStatusModel to
update on entry and exit and populate the status field with an exception
message if raised
Assumes that all wrapped functions get the Celery task self value as the
first parameter and the TaskStatusModel subclass as the second
"""
@wraps(f)
def inner(self, task_status_model, *args, **kwargs):
# We'll do a sanity check to make sure that another process hasn't
# updated the model status in the meantime:
guard_qs = task_status_model.__class__._default_manager.filter(
pk=task_status_model.pk, completed__isnull=False
)
if guard_qs.exists():
logger.warning(
"Task %s was already completed and will not be repeated",
task_status_model,
extra={
"data": {
"object": task_status_model,
"args": args,
"kwargs": kwargs,
}
},
)
return
task_status_model.last_started = now()
task_status_model.task_id = self.request.id
task_status_model.save()
try:
f(self, task_status_model, *args, **kwargs)
task_status_model.completed = now()
task_status_model.save()
except Exception as exc:
task_status_model.status = "{}\n\nUnhandled exception: {}".format(
task_status_model.status, exc
).strip()
task_status_model.save()
raise
return inner
def get_item_id_from_item_url(item_url):
"""
extracts item id from the item url and returns it
:param item_url: item url
:return: item id
"""
if item_url.endswith("/"):
item_id = item_url.split("/")[-2]
else:
item_id = item_url.split("/")[-1]
return item_id
def normalize_collection_url(original_url):
"""
Given a P1 collection/search URL, produce a normalized version which is safe
to import. This will replace parameters related to our response format and
pagination requirements but otherwise leave the query string unmodified.
"""
parsed_url = urlsplit(original_url)
new_qs = [("fo", "json")]
for k, v in parse_qsl(parsed_url.query):
if k not in ("fo", "at", "sp"):
new_qs.append((k, v))
return urlunsplit(
(parsed_url.scheme, parsed_url.netloc, parsed_url.path, urlencode(new_qs), None)
)
def get_collection_items(collection_url):
"""
:param collection_url: URL of a loc.gov collection or search results page
:return: list of (item_id, item_url) tuples
"""
items = []
current_page_url = collection_url
while current_page_url:
resp = requests.get(current_page_url)
resp.raise_for_status()
data = resp.json()
if "results" not in data:
logger.error('Expected URL %s to include "results"', resp.url)
continue
for result in data["results"]:
try:
item_info = get_item_info_from_result(result)
except Exception as exc:
logger.warning(
"Skipping result from %s which did not match expected format: %s",
resp.url,
exc,
exc_info=True,
extra={"data": {"result": result, "url": resp.url}},
)
continue
if item_info:
items.append(item_info)
current_page_url = data["pagination"].get("next", None)
if not items:
logger.warning("No valid items found for collection url: %s", collection_url)
return items
def get_item_info_from_result(result):
"""
Given a P1 result, return the item ID and URL if it represents a collection
item
:return: (item_id, item_url) tuple or None if the URL does not represent a
supported item type
"""
ignored_formats = {"collection", "web page"}
item_id = result["id"]
original_format = result["original_format"]
if ignored_formats.intersection(original_format):
logger.info(
"Skipping result %s because it contains an unsupported format: %s",
item_id,
original_format,
extra={"data": {"result": result}},
)
return
image_url = result.get("image_url")
if not image_url:
logger.info(
"Skipping result %s because it lacks an image_url",
item_id,
extra={"data": {"result": result}},
)
return
item_url = result["url"]
m = re.search(r"loc.gov/item/([^/]+)", item_url)
if not m:
logger.info(
"Skipping %s because the URL %s doesn't appear to be an item!",
item_id,
item_url,
extra={"data": {"result": result}},
)
return
return m.group(1), item_url
def import_items_into_project_from_url(requesting_user, project, import_url):
"""
Given a loc.gov URL, return the task ID for the import task
"""
parsed_url = urlparse(import_url)
m = re.match(
r"^/(%s)/" % "|".join(map(re.escape, ACCEPTED_P1_URL_PREFIXES)), parsed_url.path
)
if not m:
raise ValueError(
f"{import_url} doesn't match one of the known importable patterns"
)
url_type = m.group(1)
import_job = ImportJob(project=project, created_by=requesting_user, url=import_url)
import_job.full_clean()
import_job.save()
if url_type == "item":
create_item_import_task.delay(import_job.pk, import_url)
else:
# Both collections and search results return the same format JSON
# reponse so we can use the same code to process them:
import_collection_task.delay(import_job.pk)
return import_job
@task(bind=True)
def import_collection_task(self, import_job_pk):
import_job = ImportJob.objects.get(pk=import_job_pk)
return import_collection(self, import_job)
@update_task_status
def import_collection(self, import_job):
item_info = get_collection_items(normalize_collection_url(import_job.url))
for item_id, item_url in item_info:
create_item_import_task.delay(import_job.pk, item_url)
@task(
bind=True,
autoretry_for=(HTTPError,),
retry_backoff=True,
retry_backoff_max=8 * 60 * 60,
retry_jitter=True,
retry_kwargs={"max_retries": 12},
)
def create_item_import_task(self, import_job_pk, item_url):
"""
Create an ImportItem record using the provided import job and URL by
requesting the metadata from the URL
Enqueues the actual import for the item once we have the metadata
"""
import_job = ImportJob.objects.get(pk=import_job_pk)
# Load the Item record with metadata from the remote URL:
resp = requests.get(item_url, params={"fo": "json"})
resp.raise_for_status()
item_data = resp.json()
item, item_created = Item.objects.get_or_create(
item_id=get_item_id_from_item_url(item_data["item"]["id"]),
defaults={"item_url": item_url, "project": import_job.project},
)
import_item, import_item_created = import_job.items.get_or_create(
url=item_url, item=item
)
if not item_created:
logger.warning("Not reprocessing existing item %s", item)
import_item.status = "Not reprocessing existing item %s" % item
import_item.completed = import_item.last_started = now()
import_item.task_id = self.request.id
import_item.full_clean()
import_item.save()
return
import_item.item.metadata.update(item_data)
populate_item_from_url(import_item.item, item_data["item"])
item.full_clean()
item.save()
return import_item_task.delay(import_item.pk)
@task(bind=True)
def import_item_task(self, import_item_pk):
i = ImportItem.objects.select_related("item").get(pk=import_item_pk)
return import_item(self, i)
@update_task_status
@atomic
def import_item(self, import_item):
item_assets = []
import_assets = []
item_resource_url = None
asset_urls, item_resource_url = get_asset_urls_from_item_resources(
import_item.item.metadata.get("resources", [])
)
for idx, asset_url in enumerate(asset_urls, start=1):
asset_title = f"{import_item.item.item_id}-{idx}"
item_asset = Asset(
item=import_item.item,
title=asset_title,
slug=slugify(asset_title, allow_unicode=True),
sequence=idx,
media_url=f"{idx}.jpg",
media_type=MediaType.IMAGE,
download_url=asset_url,
resource_url=item_resource_url,
)
item_asset.full_clean()
item_assets.append(item_asset)
Asset.objects.bulk_create(item_assets)
for asset in item_assets:
import_asset = ImportItemAsset(
import_item=import_item,
asset=asset,
url=asset.download_url,
sequence_number=asset.sequence,
)
import_asset.full_clean()
import_assets.append(import_asset)
import_item.assets.bulk_create(import_assets)
download_asset_group = group(download_asset_task.s(i.pk) for i in import_assets)
import_item.full_clean()
import_item.save()
return download_asset_group()
def populate_item_from_url(item, item_info):
"""
Populates a Concordia.Item from the provided loc.gov URL
Returns the retrieved JSON data so additional imports can be peformed
without a second request
"""
for k in ("title", "description"):
v = item_info.get(k)
if v:
setattr(item, k, v)
# FIXME: this was never set before so we don't have selection logic:
thumb_urls = [i for i in item_info["image_url"] if ".jpg" in i]
if thumb_urls:
item.thumbnail_url = urljoin(item.item_url, thumb_urls[0])
def get_asset_urls_from_item_resources(resources):
"""
Given a loc.gov JSON response, return the list of asset URLs matching our
criteria (JPEG, largest version available)
"""
assets = []
item_resource_url = resources[0]["url"] or ""
for resource in resources:
# The JSON response for each file is a list of available image versions
# we will attempt to save the highest resolution JPEG:
for item_file in resource.get("files", []):
candidates = []
for variant in item_file:
if any(i for i in ("url", "height", "width") if i not in variant):
continue
url = variant["url"]
height = variant["height"]
width = variant["width"]
if variant.get("mimetype") == "image/jpeg":
candidates.append((url, height * width))
if candidates:
candidates.sort(key=lambda i: i[1], reverse=True)
assets.append(candidates[0][0])
return assets, item_resource_url
@task(
bind=True,
autoretry_for=(HTTPError,),
retry_backoff=True,
retry_backoff_max=8 * 60 * 60,
retry_jitter=True,
retry_kwargs={"max_retries": 12},
)
def download_asset_task(self, import_asset_pk):
# We'll use the containing objects' slugs to construct the storage path so
# we might as well use select_related to save extra queries:
qs = ImportItemAsset.objects.select_related("import_item__item__project__campaign")
import_asset = qs.get(pk=import_asset_pk)
return download_asset(self, import_asset)
@update_task_status
def download_asset(self, import_asset):
"""
Download the URL specified for an ImportItemAsset and save it to working
storage
"""
item = import_asset.import_item.item
asset_filename = os.path.join(
item.project.campaign.slug,
item.project.slug,
item.item_id,
"%d.jpg" % import_asset.sequence_number,
)
try:
# We'll download the remote file to a temporary file
# and after that completes successfully will upload it
# to the defined ASSET_STORAGE.
with NamedTemporaryFile(mode="x+b") as temp_file:
resp = requests.get(import_asset.url, stream=True)
resp.raise_for_status()
for chunk in resp.iter_content(chunk_size=256 * 1024):
temp_file.write(chunk)
# Rewind the tempfile back to the first byte so we can
temp_file.flush()
temp_file.seek(0)
ASSET_STORAGE.save(asset_filename, temp_file)
except Exception as exc:
logger.error(
"Unable to download %s to %s: %s",
import_asset.url,
asset_filename,
exc,
exc_info=True,
)
raise
|
StarcoderdataPython
|
1682924
|
from OpenGLCffi.GLES2 import params
@params(api='gles2', prms=['pname', 'value'])
def glBlendParameteriNV(pname, value):
pass
@params(api='gles2', prms=[])
def glBlendBarrierNV():
pass
|
StarcoderdataPython
|
3251190
|
from setuptools import setup
if __name__ == "__main__":
console_scripts = ["iseq = iseq:cli"]
setup(entry_points={"console_scripts": console_scripts},)
|
StarcoderdataPython
|
3370740
|
<gh_stars>1-10
# This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, <NAME> and <NAME>.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
import os
import numpy as np
import subprocess as sub
def _latex_write(name):
header=_latex_preamble()
ending=_latex_ending()
texfile = open(name+".tex", "w")
texfile.writelines(header+ending)
texfile.close()
def _latex_pdf(name):
CWD=os.getcwd()
_latex_write(name)
try:
process = sub.Popen(['pdflatex', CWD+'/'+name+'.tex'], stdout=sub.PIPE)
stdout, stderr = process.communicate()
success=1
except:
print('pdflatex binary not found.')
success=0
if success:
try:
os.remove(name+".log")
os.remove(name+".aux")
except:
pass
def _latex_preamble():
string = "\\documentclass[class=minimal,border=0pt]{standalone}\n"
string += "\\usepackage{tikz}\n"
string += "\\usetikzlibrary{backgrounds,fit,decorations.pathreplacing}\n"
string += "\\newcommand{\\ket}[1]{\\ensuremath{\left|#1\\right\\rangle}}\n"
string += '\\begin{document}\n'
return string
def _latex_ending():
return '\end{document}'
|
StarcoderdataPython
|
1675627
|
__docformat__ = "numpy"
# pylint: disable=R1710
import argparse
from typing import List, Union, Set
import difflib
from prompt_toolkit.completion import NestedCompleter
from gamestonk_terminal import feature_flags as gtff
from gamestonk_terminal.helper_funcs import get_flair, system_clear
from gamestonk_terminal.menu import session
from gamestonk_terminal.portfolio.brokers.ally import ally_controller
from gamestonk_terminal.portfolio.brokers.degiro import degiro_controller
from gamestonk_terminal.portfolio.brokers.robinhood import robinhood_controller
from gamestonk_terminal.portfolio.brokers.coinbase import coinbase_controller
class BrokersController:
"""Brokers Controller"""
CHOICES = [
"cls",
"home",
"h",
"?",
"help",
"q",
"quit",
"..",
"exit",
"r",
"reset",
]
CHOICES_COMMANDS: List = []
BROKERS = ["cb", "ally", "rh", "degiro"]
CHOICES += BROKERS + CHOICES_COMMANDS
def __init__(self, queue: List[str] = None):
self.bro_parser = argparse.ArgumentParser(add_help=False, prog="bro")
self.bro_parser.add_argument("cmd", choices=self.CHOICES)
self.broker_list: Set = set()
self.merged_holdings = None
if queue:
self.queue = queue
else:
self.queue = list()
self.completer: Union[None, NestedCompleter] = None
if session and gtff.USE_PROMPT_TOOLKIT:
choices: dict = {c: {} for c in self.CHOICES}
self.completer = NestedCompleter.from_nested_dict(choices)
def print_help(self):
"""Print help"""
help_string = """
Brokers:
> ally Ally Invest Menu
> degiro Degiro Menu
> rh Robinhood Menu
Crypto Brokers:
> cb Coinbase Pro Menu
"""
print(help_string)
def switch(self, an_input: str):
"""Process and dispatch input
Returns
-------
List[str]
List of commands in the queue to execute
"""
# Empty command
if not an_input:
print("")
return self.queue
# Navigation slash is being used
if "/" in an_input:
actions = an_input.split("/")
# Absolute path is specified
if not actions[0]:
an_input = "home"
# Relative path so execute first instruction
else:
an_input = actions[0]
# Add all instructions to the queue
for cmd in actions[1:][::-1]:
if cmd:
self.queue.insert(0, cmd)
(known_args, other_args) = self.bro_parser.parse_known_args(an_input.split())
# Redirect commands to their correct functions
if known_args.cmd:
if known_args.cmd in ("..", "q"):
known_args.cmd = "quit"
elif known_args.cmd in ("?", "h"):
known_args.cmd = "help"
elif known_args.cmd == "r":
known_args.cmd = "reset"
getattr(
self,
"call_" + known_args.cmd,
lambda _: "Command not recognized!",
)(other_args)
return self.queue
def call_cls(self, _):
"""Process cls command"""
system_clear()
def call_home(self, _):
"""Process home command"""
self.queue.insert(0, "quit")
self.queue.insert(0, "quit")
def call_help(self, _):
"""Process help command"""
self.print_help()
def call_quit(self, _):
"""Process quit menu command"""
print("")
self.queue.insert(0, "quit")
def call_exit(self, _):
"""Process exit terminal command"""
self.queue.insert(0, "quit")
self.queue.insert(0, "quit")
self.queue.insert(0, "quit")
def call_reset(self, _):
"""Process reset command"""
self.queue.insert(0, "bro")
self.queue.insert(0, "portfolio")
self.queue.insert(0, "reset")
self.queue.insert(0, "quit")
self.queue.insert(0, "quit")
def call_degiro(self, _):
"""Process degiro command."""
self.queue = degiro_controller.menu(self.queue)
def call_ally(self, _):
"""Process ally command."""
self.queue = ally_controller.menu(self.queue)
def call_rh(self, _):
"""Process rh command."""
self.queue = robinhood_controller.menu(self.queue)
def call_cb(self, _):
"""Process degiro command."""
self.queue = coinbase_controller.menu(self.queue)
# TODO: Consistent way of merging across brokers including crypto
# def call_login(self, other_args):
# """Process login command"""
# logged_in = False
# if not other_args:
# print("Please enter brokers you wish to login to")
# print("")
# return
# for broker in other_args:
# if broker in self.BROKERS:
# api = broker + "_api"
# try:
# # pylint: disable=eval-used
# eval(api + ".login()")
# self.broker_list.add(broker)
# logged_in = True
# except Exception as e:
# print("")
# print(f"Error at broker : {broker}")
# print(e)
# print("Make sure credentials are defined in config_terminal.py ")
# print("")
# else:
# print(f"{broker} not supported")
def menu(queue: List[str] = None):
"""Brokers Menu"""
print(
"\nUSE THIS MENU AT YOUR OWN DISCRETION\n"
" - This menu is the only one in the entire repository that has access to your broker accounts. "
"If you have provided your login details on the config_terminal.py file"
" - We review the code thoroughly from each contributor, hence, we can ensure that our codebase "
"does not take advantage of your data.\n"
" - HOWEVER, our project imports almost 200 different open source python modules. Therefore, it "
"is impossible for us to check the coding standards and security of each of these modules. "
"Hence why adding this disclaimer here."
)
bro_controller = BrokersController(queue)
an_input = "HELP_ME"
while True:
# There is a command in the queue
if bro_controller.queue and len(bro_controller.queue) > 0:
# If the command is quitting the menu we want to return in here
if bro_controller.queue[0] in ("q", "..", "quit"):
print("")
if len(bro_controller.queue) > 1:
return bro_controller.queue[1:]
return []
# Consume 1 element from the queue
an_input = bro_controller.queue[0]
bro_controller.queue = bro_controller.queue[1:]
# Print the current location because this was an instruction and we want user to know what was the action
if an_input and an_input.split(" ")[0] in bro_controller.CHOICES_COMMANDS:
print(f"{get_flair()} /portfolio/bro/ $ {an_input}")
# Get input command from user
else:
# Display help menu when entering on this menu from a level above
if an_input == "HELP_ME":
bro_controller.print_help()
# Get input from user using auto-completion
if session and gtff.USE_PROMPT_TOOLKIT and bro_controller.completer:
try:
an_input = session.prompt(
f"{get_flair()} /portfolio/bro/ $ ",
completer=bro_controller.completer,
search_ignore_case=True,
)
except KeyboardInterrupt:
# Exit in case of keyboard interrupt
an_input = "exit"
# Get input from user without auto-completion
else:
an_input = input(f"{get_flair()} /portfolio/bro/ $ ")
try:
# Process the input command
bro_controller.queue = bro_controller.switch(an_input)
except SystemExit:
print(
f"\nThe command '{an_input}' doesn't exist on the /portfolio/bro menu.",
end="",
)
similar_cmd = difflib.get_close_matches(
an_input.split(" ")[0] if " " in an_input else an_input,
bro_controller.CHOICES,
n=1,
cutoff=0.7,
)
if similar_cmd:
if " " in an_input:
candidate_input = (
f"{similar_cmd[0]} {' '.join(an_input.split(' ')[1:])}"
)
if candidate_input == an_input:
an_input = ""
bro_controller.queue = []
print("\n")
continue
an_input = candidate_input
else:
an_input = similar_cmd[0]
print(f" Replacing by '{an_input}'.")
bro_controller.queue.insert(0, an_input)
else:
print("\n")
|
StarcoderdataPython
|
4800978
|
<gh_stars>1-10
class Solution {
public:
int longestMountain(vector<int>& A) {
int dp_ins[10010], dp_des[10010];
memset(dp_ins, 0, sizeof(dp_ins));
memset(dp_des, 0, sizeof(dp_des));
for(int i = 1; i < A.size(); i++){
if(A[i] > A[i-1]) dp_ins[i] = dp_ins[i-1] + 1;
}
for(int i = A.size()-2; i >=0; i--){
if(A[i] > A[i+1]) dp_des[i] = dp_des[i+1] + 1;
}
int ans = 0;
for(int i=0;i < A.size(); i++){
if(dp_ins[i] == 0 || dp_des[i] == 0)
continue;
ans = max(ans, dp_ins[i] + dp_des[i] + 1);
}
return ans;
}
};
|
StarcoderdataPython
|
3289405
|
<reponame>ruanyangry/Spark-ML-study
# _*_ coding:utf-8 _*_
'''
GaussianMixture
'''
from pyspark.sql import SparkSession
from pyspark.ml.clustering import GaussianMixture
spark = SparkSession.builder.appName("GaussianMixture").getOrCreate()
paths="/export/home/ry/spark-2.2.1-bin-hadoop2.7/data/mllib/"
data=spark.read.format("libsvm").load(paths+"sample_kmeans_data.txt")
gmm=GaussianMixture().setK(2)
model=gmm.fit(data)
print("Gaussian: ")
model.gaussiansDF.show()
|
StarcoderdataPython
|
1750268
|
# Code by Camden
import os, re, sys, time, platform, subprocess as sp
class g:
directory_application = None
directory_carts = None
IP = "localhost"
pause = None
scale = None
on = False
p8 = None
write = ""
read = ""
def sub():
g.scale = str(g.scale * 128)
return sp.Popen([fr"{g.directory_application}", "-width", g.scale, "-height", g.scale, "-run", fr"{g.directory_carts}"], stdin = sp.PIPE, stdout = sp.PIPE)
def string_to_bytes(string):
length = 16 - len(string)
bytes = bytearray(string, "utf-8")
bytes.extend(bytearray(length))
return bytes
def to_pico8():
old = None
while g.on:
try:
bytes = bytearray(16)
if g.write != old: bytes = string_to_bytes(g.write)
g.p8.stdin.write(bytes)
g.p8.stdin.flush()
old = g.write
except: pass
time.sleep(g.pause)
def from_pico8():
old = None
string = ""
while g.on:
try:
data = g.p8.stdout.read(1).decode("utf-8")
if data != "$": string += data
else:
if string != old:
g.read = string
old = string
string = ""
except: pass
def clear():
if platform.system() == "Windows": os.system("cls")
else: os.system("clear")
def error(message, bool = True):
global flag
print(f"Error: {message} not found!")
if bool: flag = True
else: sys.exit(1)
def get():
global flag
clear()
name = "settings.txt"
try: file = open(name, "r")
except: error(name, False)
lines = file.read().splitlines()
file.close()
for line in lines:
string = None
groups = re.search(r"(?<=\:)([^\#]*)", line)
if groups: string = groups.group().strip()
if not string or string == "none": continue
if line.startswith("Directory to application:"): g.directory_application = string
elif line.startswith("Directory to cart:"): g.directory_carts = string
elif line.startswith("Default IP:"): g.IP = string
elif line.startswith("Pause:"):
try: g.pause = abs(float(string))
except: pass
elif line.startswith("Scale:"):
string = string.lower().replace("x", "")
try:
value = int(string)
if value < 1: value = 1
elif value > 11: value = 5
g.scale = value
except: pass
flag = False
if not g.directory_application: error("Pico-8 app")
if not g.directory_carts: error("Pico-8 carts folder")
if not g.IP: error("IP")
if g.pause is None: error("waiting pause")
if g.scale is None: error("window scale")
if flag: sys.exit(1)
|
StarcoderdataPython
|
1611426
|
""" Register allocation scheme.
"""
from rpython.jit.backend.llsupport import symbolic
from rpython.jit.backend.llsupport.descr import CallDescr, unpack_arraydescr
from rpython.jit.backend.llsupport.gcmap import allocate_gcmap
from rpython.jit.backend.llsupport.regalloc import (FrameManager, BaseRegalloc,
RegisterManager, TempVar, compute_vars_longevity, is_comparison_or_ovf_op,
valid_addressing_size, get_scale, SAVE_DEFAULT_REGS, SAVE_GCREF_REGS,
SAVE_ALL_REGS)
from rpython.jit.backend.x86 import rx86
from rpython.jit.backend.x86.arch import (WORD, JITFRAME_FIXED_SIZE, IS_X86_32,
IS_X86_64, DEFAULT_FRAME_BYTES)
from rpython.jit.backend.x86.jump import remap_frame_layout_mixed
from rpython.jit.backend.x86.regloc import (FrameLoc, RegLoc, ConstFloatLoc,
FloatImmedLoc, ImmedLoc, imm, imm0, imm1, ecx, eax, edx, ebx, esi, edi,
ebp, r8, r9, r10, r11, r12, r13, r14, r15, xmm0, xmm1, xmm2, xmm3, xmm4,
xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14,
X86_64_SCRATCH_REG, X86_64_XMM_SCRATCH_REG)
from rpython.jit.backend.x86.vector_ext import VectorRegallocMixin
from rpython.jit.codewriter import longlong
from rpython.jit.codewriter.effectinfo import EffectInfo
from rpython.jit.metainterp.history import (Const, ConstInt, ConstPtr,
ConstFloat, INT, REF, FLOAT, VECTOR, TargetToken, AbstractFailDescr)
from rpython.jit.metainterp.resoperation import rop
from rpython.jit.metainterp.resume import AccumInfo
from rpython.rlib import rgc
from rpython.rlib.objectmodel import we_are_translated
from rpython.rlib.rarithmetic import r_longlong, r_uint
from rpython.rtyper.lltypesystem import lltype, rffi, rstr
from rpython.rtyper.lltypesystem.lloperation import llop
from rpython.jit.backend.x86.regloc import AddressLoc
def compute_gc_level(calldescr, guard_not_forced=False):
effectinfo = calldescr.get_extra_info()
if guard_not_forced:
return SAVE_ALL_REGS
elif effectinfo is None or effectinfo.check_can_collect():
return SAVE_GCREF_REGS
else:
return SAVE_DEFAULT_REGS
class X86RegisterManager(RegisterManager):
box_types = [INT, REF]
all_regs = [ecx, eax, edx, ebx, esi, edi]
no_lower_byte_regs = [esi, edi]
save_around_call_regs = [eax, edx, ecx]
frame_reg = ebp
def call_result_location(self, v):
return eax
def convert_to_imm(self, c):
if isinstance(c, ConstInt):
return imm(c.value)
elif isinstance(c, ConstPtr):
if we_are_translated() and c.value and rgc.can_move(c.value):
not_implemented("convert_to_imm: ConstPtr needs special care")
return imm(rffi.cast(lltype.Signed, c.value))
else:
not_implemented("convert_to_imm: got a %s" % c)
class X86_64_RegisterManager(X86RegisterManager):
# r11 omitted because it's used as scratch
all_regs = [ecx, eax, edx, ebx, esi, edi, r8, r9, r10, r12, r13, r14, r15]
no_lower_byte_regs = []
save_around_call_regs = [eax, ecx, edx, esi, edi, r8, r9, r10]
class X86XMMRegisterManager(RegisterManager):
box_types = [FLOAT, INT] # yes INT!
all_regs = [xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7]
# we never need lower byte I hope
save_around_call_regs = all_regs
def convert_to_imm(self, c):
adr = self.assembler.datablockwrapper.malloc_aligned(8, 8)
x = c.getfloatstorage()
rffi.cast(rffi.CArrayPtr(longlong.FLOATSTORAGE), adr)[0] = x
return ConstFloatLoc(adr)
def convert_to_imm_16bytes_align(self, c):
adr = self.assembler.datablockwrapper.malloc_aligned(16, 16)
x = c.getfloatstorage()
y = longlong.ZEROF
rffi.cast(rffi.CArrayPtr(longlong.FLOATSTORAGE), adr)[0] = x
rffi.cast(rffi.CArrayPtr(longlong.FLOATSTORAGE), adr)[1] = y
return ConstFloatLoc(adr)
def expand_float(self, size, const):
if size == 4:
loc = self.expand_single_float(const)
else:
loc = self.expand_double_float(const)
return loc
def expand_double_float(self, f):
adr = self.assembler.datablockwrapper.malloc_aligned(16, 16)
fs = f.getfloatstorage()
rffi.cast(rffi.CArrayPtr(longlong.FLOATSTORAGE), adr)[0] = fs
rffi.cast(rffi.CArrayPtr(longlong.FLOATSTORAGE), adr)[1] = fs
return ConstFloatLoc(adr)
def expand_single_float(self, f):
adr = self.assembler.datablockwrapper.malloc_aligned(16, 16)
fs = rffi.cast(lltype.SingleFloat, f.getfloatstorage())
rffi.cast(rffi.CArrayPtr(lltype.SingleFloat), adr)[0] = fs
rffi.cast(rffi.CArrayPtr(lltype.SingleFloat), adr)[1] = fs
rffi.cast(rffi.CArrayPtr(lltype.SingleFloat), adr)[2] = fs
rffi.cast(rffi.CArrayPtr(lltype.SingleFloat), adr)[3] = fs
return ConstFloatLoc(adr)
def call_result_location(self, v):
return xmm0
class X86_64_XMMRegisterManager(X86XMMRegisterManager):
# xmm15 reserved for scratch use
all_regs = [xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14]
save_around_call_regs = all_regs
class X86FrameManager(FrameManager):
def __init__(self, base_ofs):
FrameManager.__init__(self)
self.base_ofs = base_ofs
def frame_pos(self, i, box_type):
return FrameLoc(i, get_ebp_ofs(self.base_ofs, i), box_type)
@staticmethod
def frame_size(box_type):
if IS_X86_32 and box_type == FLOAT:
return 2
else:
return 1
@staticmethod
def get_loc_index(loc):
assert isinstance(loc, FrameLoc)
return loc.position
if WORD == 4:
gpr_reg_mgr_cls = X86RegisterManager
xmm_reg_mgr_cls = X86XMMRegisterManager
elif WORD == 8:
gpr_reg_mgr_cls = X86_64_RegisterManager
xmm_reg_mgr_cls = X86_64_XMMRegisterManager
else:
raise AssertionError("Word size should be 4 or 8")
gpr_reg_mgr_cls.all_reg_indexes = [-1] * WORD * 2 # eh, happens to be true
for _i, _reg in enumerate(gpr_reg_mgr_cls.all_regs):
gpr_reg_mgr_cls.all_reg_indexes[_reg.value] = _i
class RegAlloc(BaseRegalloc, VectorRegallocMixin):
def __init__(self, assembler, translate_support_code=False):
assert isinstance(translate_support_code, bool)
# variables that have place in register
self.assembler = assembler
self.translate_support_code = translate_support_code
# to be read/used by the assembler too
self.jump_target_descr = None
self.final_jump_op = None
self.final_jump_op_position = -1
def _prepare(self, inputargs, operations, allgcrefs):
from rpython.jit.backend.x86.reghint import X86RegisterHints
for box in inputargs:
assert box.get_forwarded() is None
cpu = self.assembler.cpu
self.fm = X86FrameManager(cpu.get_baseofs_of_frame_field())
operations = cpu.gc_ll_descr.rewrite_assembler(cpu, operations,
allgcrefs)
# compute longevity of variables
longevity = compute_vars_longevity(inputargs, operations)
X86RegisterHints().add_hints(longevity, inputargs, operations)
self.longevity = longevity
self.rm = gpr_reg_mgr_cls(self.longevity,
frame_manager = self.fm,
assembler = self.assembler)
self.xrm = xmm_reg_mgr_cls(self.longevity, frame_manager = self.fm,
assembler = self.assembler)
return operations
def prepare_loop(self, inputargs, operations, looptoken, allgcrefs):
operations = self._prepare(inputargs, operations, allgcrefs)
self._set_initial_bindings(inputargs, looptoken)
# note: we need to make a copy of inputargs because possibly_free_vars
# is also used on op args, which is a non-resizable list
self.possibly_free_vars(list(inputargs))
if WORD == 4: # see redirect_call_assembler()
self.min_bytes_before_label = 5
else:
self.min_bytes_before_label = 13
return operations
def prepare_bridge(self, inputargs, arglocs, operations, allgcrefs,
frame_info):
operations = self._prepare(inputargs, operations, allgcrefs)
self._update_bindings(arglocs, inputargs)
self.min_bytes_before_label = 0
return operations
def ensure_next_label_is_at_least_at_position(self, at_least_position):
self.min_bytes_before_label = max(self.min_bytes_before_label,
at_least_position)
def get_final_frame_depth(self):
return self.fm.get_frame_depth()
def possibly_free_var(self, var):
if var.type == FLOAT or var.is_vector():
self.xrm.possibly_free_var(var)
else:
self.rm.possibly_free_var(var)
def possibly_free_vars_for_op(self, op):
for i in range(op.numargs()):
var = op.getarg(i)
if var is not None: # xxx kludgy
self.possibly_free_var(var)
if op.type != 'v':
self.possibly_free_var(op)
def possibly_free_vars(self, vars):
for var in vars:
if var is not None: # xxx kludgy
self.possibly_free_var(var)
def make_sure_var_in_reg(self, var, forbidden_vars=[],
selected_reg=None, need_lower_byte=False):
if var.type == FLOAT or var.is_vector():
if isinstance(var, ConstFloat):
return FloatImmedLoc(var.getfloatstorage())
return self.xrm.make_sure_var_in_reg(var, forbidden_vars,
selected_reg, need_lower_byte)
else:
return self.rm.make_sure_var_in_reg(var, forbidden_vars,
selected_reg, need_lower_byte)
def force_allocate_reg(self, var, forbidden_vars=[], selected_reg=None,
need_lower_byte=False):
if var.type == FLOAT or var.is_vector():
return self.xrm.force_allocate_reg(var, forbidden_vars,
selected_reg, need_lower_byte)
else:
return self.rm.force_allocate_reg(var, forbidden_vars,
selected_reg, need_lower_byte)
def force_allocate_reg_or_cc(self, var):
assert var.type == INT
if self.next_op_can_accept_cc(self.operations, self.rm.position):
# hack: return the ebp location to mean "lives in CC". This
# ebp will not actually be used, and the location will be freed
# after the next op as usual.
self.rm.force_allocate_frame_reg(var)
return ebp
else:
# else, return a regular register (not ebp).
return self.rm.force_allocate_reg(var, need_lower_byte=True)
def force_spill_var(self, var):
if var.type == FLOAT:
return self.xrm.force_spill_var(var)
else:
return self.rm.force_spill_var(var)
def load_xmm_aligned_16_bytes(self, var, forbidden_vars=[]):
# Load 'var' in a register; but if it is a constant, we can return
# a 16-bytes-aligned ConstFloatLoc.
if isinstance(var, Const):
return self.xrm.convert_to_imm_16bytes_align(var)
else:
return self.xrm.make_sure_var_in_reg(var, forbidden_vars)
def _update_bindings(self, locs, inputargs):
# XXX this should probably go to llsupport/regalloc.py
used = {}
i = 0
for loc in locs:
if loc is None: # xxx bit kludgy
loc = ebp
arg = inputargs[i]
i += 1
if isinstance(loc, RegLoc):
if arg.type == FLOAT:
self.xrm.reg_bindings[arg] = loc
used[loc] = None
else:
if loc is ebp:
self.rm.bindings_to_frame_reg[arg] = None
else:
self.rm.reg_bindings[arg] = loc
used[loc] = None
else:
self.fm.bind(arg, loc)
self.rm.free_regs = []
for reg in self.rm.all_regs:
if reg not in used:
self.rm.free_regs.append(reg)
self.xrm.free_regs = []
for reg in self.xrm.all_regs:
if reg not in used:
self.xrm.free_regs.append(reg)
self.possibly_free_vars(list(inputargs))
self.fm.finish_binding()
self.rm._check_invariants()
self.xrm._check_invariants()
def perform(self, op, arglocs, result_loc):
if not we_are_translated():
self.assembler.dump('%s <- %s(%s)' % (result_loc, op, arglocs))
self.assembler.regalloc_perform(op, arglocs, result_loc)
def perform_llong(self, op, arglocs, result_loc):
if not we_are_translated():
self.assembler.dump('%s <- %s(%s)' % (result_loc, op, arglocs))
self.assembler.regalloc_perform_llong(op, arglocs, result_loc)
def perform_math(self, op, arglocs, result_loc):
if not we_are_translated():
self.assembler.dump('%s <- %s(%s)' % (result_loc, op, arglocs))
self.assembler.regalloc_perform_math(op, arglocs, result_loc)
def locs_for_fail(self, guard_op):
faillocs = [self.loc(arg) for arg in guard_op.getfailargs()]
descr = guard_op.getdescr()
if not descr:
return faillocs
assert isinstance(descr, AbstractFailDescr)
if descr.rd_vector_info:
accuminfo = descr.rd_vector_info
while accuminfo:
accuminfo.location = faillocs[accuminfo.getpos_in_failargs()]
loc = self.loc(accuminfo.getoriginal())
faillocs[accuminfo.getpos_in_failargs()] = loc
accuminfo = accuminfo.next()
return faillocs
def perform_guard(self, guard_op, arglocs, result_loc):
faillocs = self.locs_for_fail(guard_op)
if not we_are_translated():
if result_loc is not None:
self.assembler.dump('%s <- %s(%s)' % (result_loc, guard_op,
arglocs))
else:
self.assembler.dump('%s(%s)' % (guard_op, arglocs))
self.assembler.regalloc_perform_guard(guard_op, faillocs, arglocs,
result_loc,
self.fm.get_frame_depth())
self.possibly_free_vars(guard_op.getfailargs())
def perform_discard(self, op, arglocs):
if not we_are_translated():
self.assembler.dump('%s(%s)' % (op, arglocs))
self.assembler.regalloc_perform_discard(op, arglocs)
def walk_operations(self, inputargs, operations):
i = 0
self.operations = operations
while i < len(operations):
op = operations[i]
self.assembler.mc.mark_op(op)
assert self.assembler.mc._frame_size == DEFAULT_FRAME_BYTES
self.rm.position = i
self.xrm.position = i
if rop.has_no_side_effect(op.opnum) and op not in self.longevity:
i += 1
self.possibly_free_vars_for_op(op)
continue
if not we_are_translated() and op.getopnum() == rop.FORCE_SPILL:
self._consider_force_spill(op)
else:
oplist[op.getopnum()](self, op)
self.possibly_free_vars_for_op(op)
self.rm._check_invariants()
self.xrm._check_invariants()
i += 1
assert not self.rm.reg_bindings
assert not self.xrm.reg_bindings
if not we_are_translated():
self.assembler.mc.UD2()
self.flush_loop()
self.assembler.mc.mark_op(None) # end of the loop
self.operations = None
for arg in inputargs:
self.possibly_free_var(arg)
def flush_loop(self):
# Force the code to be aligned to a multiple of 16. Also,
# rare case: if the loop is too short, or if we are just after
# a GUARD_NOT_INVALIDATED, we need to make sure we insert enough
# NOPs. This is important to ensure that there are enough bytes
# produced, because GUARD_NOT_INVALIDATED or
# redirect_call_assembler() will maybe overwrite them. (In that
# rare case we don't worry too much about alignment.)
mc = self.assembler.mc
current_pos = mc.get_relative_pos()
target_pos = (current_pos + 15) & ~15
target_pos = max(target_pos, self.min_bytes_before_label)
insert_nops = target_pos - current_pos
assert 0 <= insert_nops <= 15
for c in mc.MULTIBYTE_NOPs[insert_nops]:
mc.writechar(c)
def loc(self, v):
if v is None: # xxx kludgy
return None
if v.type == FLOAT or v.is_vector():
return self.xrm.loc(v)
return self.rm.loc(v)
def load_condition_into_cc(self, box):
if self.assembler.guard_success_cc == rx86.cond_none:
self.assembler.test_location(self.loc(box))
self.assembler.guard_success_cc = rx86.Conditions['NZ']
def _consider_guard_cc(self, op):
arg = op.getarg(0)
self.load_condition_into_cc(arg)
self.perform_guard(op, [], None)
consider_guard_true = _consider_guard_cc
consider_guard_false = _consider_guard_cc
consider_guard_nonnull = _consider_guard_cc
consider_guard_isnull = _consider_guard_cc
def consider_finish(self, op):
# the frame is in ebp, but we have to point where in the frame is
# the potential argument to FINISH
if op.numargs() == 1:
loc = self.make_sure_var_in_reg(op.getarg(0))
locs = [loc]
else:
locs = []
self.perform(op, locs, None)
def consider_guard_no_exception(self, op):
self.perform_guard(op, [], None)
def consider_guard_not_invalidated(self, op):
mc = self.assembler.mc
n = mc.get_relative_pos(break_basic_block=False)
self.perform_guard(op, [], None)
assert n == mc.get_relative_pos(break_basic_block=False)
# ensure that the next label is at least 5 bytes farther than
# the current position. Otherwise, when invalidating the guard,
# we would overwrite randomly the next label's position.
self.ensure_next_label_is_at_least_at_position(n + 5)
def consider_guard_exception(self, op):
loc = self.rm.make_sure_var_in_reg(op.getarg(0))
box = TempVar()
args = op.getarglist()
loc1 = self.rm.force_allocate_reg(box, args)
if op in self.longevity:
# this means, is it ever used
resloc = self.rm.force_allocate_reg(op, args + [box])
else:
resloc = None
self.perform_guard(op, [loc, loc1], resloc)
self.rm.possibly_free_var(box)
def consider_save_exception(self, op):
resloc = self.rm.force_allocate_reg(op)
self.perform(op, [], resloc)
consider_save_exc_class = consider_save_exception
def consider_restore_exception(self, op):
args = op.getarglist()
loc0 = self.rm.make_sure_var_in_reg(op.getarg(0), args) # exc class
loc1 = self.rm.make_sure_var_in_reg(op.getarg(1), args) # exc instance
self.perform_discard(op, [loc0, loc1])
consider_guard_no_overflow = consider_guard_no_exception
consider_guard_overflow = consider_guard_no_exception
consider_guard_not_forced = consider_guard_no_exception
def consider_guard_value(self, op):
x = self.make_sure_var_in_reg(op.getarg(0))
loc = self.assembler.cpu.all_reg_indexes[x.value]
op.getdescr().make_a_counter_per_value(op, loc)
y = self.loc(op.getarg(1))
self.perform_guard(op, [x, y], None)
def consider_guard_class(self, op):
assert not isinstance(op.getarg(0), Const)
x = self.rm.make_sure_var_in_reg(op.getarg(0))
y = self.loc(op.getarg(1))
self.perform_guard(op, [x, y], None)
consider_guard_nonnull_class = consider_guard_class
consider_guard_gc_type = consider_guard_class
def consider_guard_is_object(self, op):
x = self.make_sure_var_in_reg(op.getarg(0))
tmp_box = TempVar()
y = self.rm.force_allocate_reg(tmp_box, [op.getarg(0)])
self.rm.possibly_free_var(tmp_box)
self.perform_guard(op, [x, y], None)
def consider_guard_subclass(self, op):
x = self.make_sure_var_in_reg(op.getarg(0))
tmp_box = TempVar()
z = self.rm.force_allocate_reg(tmp_box, [op.getarg(0)])
y = self.loc(op.getarg(1))
self.rm.possibly_free_var(tmp_box)
self.perform_guard(op, [x, y, z], None)
def _consider_binop_part(self, op, symm=False):
x = op.getarg(0)
y = op.getarg(1)
xloc = self.loc(x)
argloc = self.loc(y)
# For symmetrical operations, if x is not in a reg, but y is,
# and if x lives longer than the current operation while y dies, then
# swap the role of 'x' and 'y'
if (symm and not isinstance(xloc, RegLoc) and
isinstance(argloc, RegLoc)):
if ((x not in self.rm.longevity or
self.rm.longevity[x].last_usage > self.rm.position) and
self.rm.longevity[y].last_usage == self.rm.position):
x, y = y, x
argloc = self.loc(y)
#
args = op.getarglist()
loc = self.rm.force_result_in_reg(op, x, args)
return loc, argloc
def _consider_binop(self, op):
loc, argloc = self._consider_binop_part(op)
self.perform(op, [loc, argloc], loc)
def _consider_binop_symm(self, op):
loc, argloc = self._consider_binop_part(op, symm=True)
self.perform(op, [loc, argloc], loc)
def _consider_lea(self, op):
x = op.getarg(0)
loc = self.make_sure_var_in_reg(x)
# make it possible to have argloc be == loc if x dies
# (then LEA will not be used, but that's fine anyway)
self.possibly_free_var(x)
argloc = self.loc(op.getarg(1))
resloc = self.force_allocate_reg(op)
self.perform(op, [loc, argloc], resloc)
def consider_int_add(self, op):
y = op.getarg(1)
if isinstance(y, ConstInt) and rx86.fits_in_32bits(y.value):
self._consider_lea(op)
else:
self._consider_binop_symm(op)
consider_nursery_ptr_increment = consider_int_add
def consider_int_sub(self, op):
y = op.getarg(1)
if isinstance(y, ConstInt) and rx86.fits_in_32bits(-y.value):
self._consider_lea(op)
else:
self._consider_binop(op)
consider_int_mul = _consider_binop_symm
consider_int_and = _consider_binop_symm
consider_int_or = _consider_binop_symm
consider_int_xor = _consider_binop_symm
consider_int_mul_ovf = _consider_binop_symm
consider_int_sub_ovf = _consider_binop
consider_int_add_ovf = _consider_binop_symm
def consider_uint_mul_high(self, op):
arg1, arg2 = op.getarglist()
# should support all cases, but is optimized for (box, const)
if isinstance(arg1, Const):
arg1, arg2 = arg2, arg1
self.rm.make_sure_var_in_reg(arg2, selected_reg=eax)
l1 = self.loc(arg1)
# l1 is a register != eax, or stack_bp; or, just possibly, it
# can be == eax if arg1 is arg2
assert not isinstance(l1, ImmedLoc)
assert l1 is not eax or arg1 is arg2
#
# eax will be trash after the operation
self.rm.possibly_free_var(arg2)
tmpvar = TempVar()
self.rm.force_allocate_reg(tmpvar, selected_reg=eax)
self.rm.possibly_free_var(tmpvar)
#
self.rm.force_allocate_reg(op, selected_reg=edx)
self.perform(op, [l1], edx)
def consider_int_neg(self, op):
res = self.rm.force_result_in_reg(op, op.getarg(0))
self.perform(op, [res], res)
consider_int_invert = consider_int_neg
def consider_int_signext(self, op):
argloc = self.loc(op.getarg(0))
numbytesloc = self.loc(op.getarg(1))
resloc = self.force_allocate_reg(op)
self.perform(op, [argloc, numbytesloc], resloc)
def consider_int_lshift(self, op):
if isinstance(op.getarg(1), Const):
loc2 = self.rm.convert_to_imm(op.getarg(1))
else:
loc2 = self.rm.make_sure_var_in_reg(op.getarg(1), selected_reg=ecx)
args = op.getarglist()
loc1 = self.rm.force_result_in_reg(op, op.getarg(0), args)
self.perform(op, [loc1, loc2], loc1)
consider_int_rshift = consider_int_lshift
consider_uint_rshift = consider_int_lshift
def _consider_compop(self, op):
vx = op.getarg(0)
vy = op.getarg(1)
arglocs = [self.loc(vx), self.loc(vy)]
if (vx in self.rm.reg_bindings or vy in self.rm.reg_bindings or
isinstance(vx, Const) or isinstance(vy, Const)):
pass
else:
arglocs[0] = self.rm.make_sure_var_in_reg(vx)
loc = self.force_allocate_reg_or_cc(op)
self.perform(op, arglocs, loc)
consider_int_lt = _consider_compop
consider_int_gt = _consider_compop
consider_int_ge = _consider_compop
consider_int_le = _consider_compop
consider_int_ne = _consider_compop
consider_int_eq = _consider_compop
consider_uint_gt = _consider_compop
consider_uint_lt = _consider_compop
consider_uint_le = _consider_compop
consider_uint_ge = _consider_compop
consider_ptr_eq = consider_instance_ptr_eq = _consider_compop
consider_ptr_ne = consider_instance_ptr_ne = _consider_compop
def _consider_float_op(self, op):
loc1 = self.xrm.loc(op.getarg(1))
args = op.getarglist()
loc0 = self.xrm.force_result_in_reg(op, op.getarg(0), args)
self.perform(op, [loc0, loc1], loc0)
consider_float_add = _consider_float_op # xxx could be _symm
consider_float_sub = _consider_float_op
consider_float_mul = _consider_float_op # xxx could be _symm
consider_float_truediv = _consider_float_op
def _consider_float_cmp(self, op):
vx = op.getarg(0)
vy = op.getarg(1)
arglocs = [self.loc(vx), self.loc(vy)]
if not (isinstance(arglocs[0], RegLoc) or
isinstance(arglocs[1], RegLoc)):
if isinstance(vx, Const):
arglocs[1] = self.xrm.make_sure_var_in_reg(vy)
else:
arglocs[0] = self.xrm.make_sure_var_in_reg(vx)
loc = self.force_allocate_reg_or_cc(op)
self.perform(op, arglocs, loc)
consider_float_lt = _consider_float_cmp
consider_float_le = _consider_float_cmp
consider_float_eq = _consider_float_cmp
consider_float_ne = _consider_float_cmp
consider_float_gt = _consider_float_cmp
consider_float_ge = _consider_float_cmp
def _consider_float_unary_op(self, op):
loc0 = self.xrm.force_result_in_reg(op, op.getarg(0))
self.perform(op, [loc0], loc0)
consider_float_neg = _consider_float_unary_op
consider_float_abs = _consider_float_unary_op
def consider_cast_float_to_int(self, op):
loc0 = self.xrm.make_sure_var_in_reg(op.getarg(0))
loc1 = self.rm.force_allocate_reg(op)
self.perform(op, [loc0], loc1)
def consider_cast_int_to_float(self, op):
loc0 = self.rm.make_sure_var_in_reg(op.getarg(0))
loc1 = self.xrm.force_allocate_reg(op)
self.perform(op, [loc0], loc1)
def consider_cast_float_to_singlefloat(self, op):
loc0 = self.xrm.make_sure_var_in_reg(op.getarg(0))
loc1 = self.rm.force_allocate_reg(op)
tmpxvar = TempVar()
loctmp = self.xrm.force_allocate_reg(tmpxvar) # may be equal to loc0
self.xrm.possibly_free_var(tmpxvar)
self.perform(op, [loc0, loctmp], loc1)
consider_cast_singlefloat_to_float = consider_cast_int_to_float
def consider_convert_float_bytes_to_longlong(self, op):
if longlong.is_64_bit:
loc0 = self.xrm.make_sure_var_in_reg(op.getarg(0))
loc1 = self.rm.force_allocate_reg(op)
self.perform(op, [loc0], loc1)
else:
arg0 = op.getarg(0)
loc0 = self.xrm.loc(arg0)
loc1 = self.xrm.force_allocate_reg(op, forbidden_vars=[arg0])
self.perform(op, [loc0], loc1)
def consider_convert_longlong_bytes_to_float(self, op):
if longlong.is_64_bit:
loc0 = self.rm.make_sure_var_in_reg(op.getarg(0))
loc1 = self.xrm.force_allocate_reg(op)
self.perform(op, [loc0], loc1)
else:
arg0 = op.getarg(0)
loc0 = self.xrm.make_sure_var_in_reg(arg0)
loc1 = self.xrm.force_allocate_reg(op, forbidden_vars=[arg0])
self.perform(op, [loc0], loc1)
def _consider_llong_binop_xx(self, op):
# must force both arguments into xmm registers, because we don't
# know if they will be suitably aligned. Exception: if the second
# argument is a constant, we can ask it to be aligned to 16 bytes.
# xxx some of these operations could be '_symm'.
args = [op.getarg(1), op.getarg(2)]
loc1 = self.load_xmm_aligned_16_bytes(args[1])
loc0 = self.xrm.force_result_in_reg(op, args[0], args)
self.perform_llong(op, [loc0, loc1], loc0)
def _consider_llong_eq_ne_xx(self, op):
# must force both arguments into xmm registers, because we don't
# know if they will be suitably aligned. Exception: if they are
# constants, we can ask them to be aligned to 16 bytes.
args = [op.getarg(1), op.getarg(2)]
loc1 = self.load_xmm_aligned_16_bytes(args[0])
loc2 = self.load_xmm_aligned_16_bytes(args[1], args)
tmpxvar = TempVar()
loc3 = self.xrm.force_allocate_reg(tmpxvar, args)
self.xrm.possibly_free_var(tmpxvar)
loc0 = self.rm.force_allocate_reg(op, need_lower_byte=True)
self.perform_llong(op, [loc1, loc2, loc3], loc0)
def _maybe_consider_llong_lt(self, op):
# XXX just a special case for now
box = op.getarg(2)
if not isinstance(box, ConstFloat):
return False
if box.getfloat() != 0.0: # NaNs are also != 0.0
return False
# "x < 0.0" or maybe "x < -0.0" which is the same
box = op.getarg(1)
assert box.type == FLOAT
loc1 = self.xrm.make_sure_var_in_reg(box)
loc0 = self.rm.force_allocate_reg(op)
self.perform_llong(op, [loc1], loc0)
return True
def _consider_llong_to_int(self, op):
# accept an argument in a xmm register or in the stack
loc1 = self.xrm.loc(op.getarg(1))
loc0 = self.rm.force_allocate_reg(op)
self.perform_llong(op, [loc1], loc0)
def _loc_of_const_longlong(self, value64):
c = ConstFloat(value64)
return self.xrm.convert_to_imm(c)
def _consider_llong_from_int(self, op):
assert IS_X86_32
loc0 = self.xrm.force_allocate_reg(op)
box = op.getarg(1)
if isinstance(box, ConstInt):
loc1 = self._loc_of_const_longlong(r_longlong(box.value))
loc2 = None # unused
else:
loc1 = self.rm.make_sure_var_in_reg(box)
tmpxvar = TempVar()
loc2 = self.xrm.force_allocate_reg(tmpxvar, [op])
self.xrm.possibly_free_var(tmpxvar)
self.perform_llong(op, [loc1, loc2], loc0)
def _consider_llong_from_uint(self, op):
assert IS_X86_32
loc0 = self.xrm.force_allocate_reg(op)
loc1 = self.rm.make_sure_var_in_reg(op.getarg(1))
self.perform_llong(op, [loc1], loc0)
def _consider_math_sqrt(self, op):
loc0 = self.xrm.force_result_in_reg(op, op.getarg(1))
self.perform_math(op, [loc0], loc0)
def _consider_threadlocalref_get(self, op):
if self.translate_support_code:
offset = op.getarg(1).getint() # getarg(0) == 'threadlocalref_get'
calldescr = op.getdescr()
size = calldescr.get_result_size()
sign = calldescr.is_result_signed()
resloc = self.force_allocate_reg(op)
self.assembler.threadlocalref_get(offset, resloc, size, sign)
else:
self._consider_call(op)
def _call(self, op, arglocs, gc_level):
# we need to save registers on the stack:
#
# - at least the non-callee-saved registers
# (gc_level == SAVE_DEFAULT_REGS)
#
# - if gc_level == SAVE_GCREF_REGS we save also the callee-saved
# registers that contain GC pointers
#
# - gc_level == SAVE_ALL_REGS for CALL_MAY_FORCE or CALL_ASSEMBLER. We
# have to save all regs anyway, in case we need to do
# cpu.force(). The issue is that grab_frame_values() would
# not be able to locate values in callee-saved registers.
#
if gc_level == SAVE_ALL_REGS:
save_all_regs = SAVE_ALL_REGS
else:
save_all_regs = SAVE_DEFAULT_REGS
self.xrm.before_call(save_all_regs=save_all_regs)
if gc_level == SAVE_GCREF_REGS:
gcrootmap = self.assembler.cpu.gc_ll_descr.gcrootmap
# we save all the GCREF registers for shadowstack
if gcrootmap: # and gcrootmap.is_shadow_stack:
save_all_regs = SAVE_GCREF_REGS
self.rm.before_call(save_all_regs=save_all_regs)
if op.type != 'v':
if op.type == FLOAT:
resloc = self.xrm.after_call(op)
else:
resloc = self.rm.after_call(op)
else:
resloc = None
self.perform(op, arglocs, resloc)
def _consider_call(self, op, guard_not_forced=False, first_arg_index=1):
calldescr = op.getdescr()
assert isinstance(calldescr, CallDescr)
assert len(calldescr.arg_classes) == op.numargs() - first_arg_index
size = calldescr.get_result_size()
sign = calldescr.is_result_signed()
if sign:
sign_loc = imm1
else:
sign_loc = imm0
gc_level = compute_gc_level(calldescr, guard_not_forced)
#
self._call(op, [imm(size), sign_loc] +
[self.loc(op.getarg(i)) for i in range(op.numargs())],
gc_level=gc_level)
def _consider_real_call(self, op):
effectinfo = op.getdescr().get_extra_info()
assert effectinfo is not None
oopspecindex = effectinfo.oopspecindex
if oopspecindex != EffectInfo.OS_NONE:
if IS_X86_32:
# support for some of the llong operations,
# which only exist on x86-32
if oopspecindex in (EffectInfo.OS_LLONG_ADD,
EffectInfo.OS_LLONG_SUB,
EffectInfo.OS_LLONG_AND,
EffectInfo.OS_LLONG_OR,
EffectInfo.OS_LLONG_XOR):
return self._consider_llong_binop_xx(op)
if oopspecindex == EffectInfo.OS_LLONG_TO_INT:
return self._consider_llong_to_int(op)
if oopspecindex == EffectInfo.OS_LLONG_FROM_INT:
return self._consider_llong_from_int(op)
if oopspecindex == EffectInfo.OS_LLONG_FROM_UINT:
return self._consider_llong_from_uint(op)
if (oopspecindex == EffectInfo.OS_LLONG_EQ or
oopspecindex == EffectInfo.OS_LLONG_NE):
return self._consider_llong_eq_ne_xx(op)
if oopspecindex == EffectInfo.OS_LLONG_LT:
if self._maybe_consider_llong_lt(op):
return
if oopspecindex == EffectInfo.OS_MATH_SQRT:
return self._consider_math_sqrt(op)
if oopspecindex == EffectInfo.OS_THREADLOCALREF_GET:
return self._consider_threadlocalref_get(op)
if oopspecindex == EffectInfo.OS_MATH_READ_TIMESTAMP:
return self._consider_math_read_timestamp(op)
self._consider_call(op)
consider_call_i = _consider_real_call
consider_call_r = _consider_real_call
consider_call_f = _consider_real_call
consider_call_n = _consider_real_call
def _consider_call_may_force(self, op):
self._consider_call(op, guard_not_forced=True)
consider_call_may_force_i = _consider_call_may_force
consider_call_may_force_r = _consider_call_may_force
consider_call_may_force_f = _consider_call_may_force
consider_call_may_force_n = _consider_call_may_force
def _consider_call_release_gil(self, op):
# [Const(save_err), func_addr, args...]
self._consider_call(op, guard_not_forced=True, first_arg_index=2)
consider_call_release_gil_i = _consider_call_release_gil
consider_call_release_gil_f = _consider_call_release_gil
consider_call_release_gil_n = _consider_call_release_gil
def consider_check_memory_error(self, op):
x = self.rm.make_sure_var_in_reg(op.getarg(0))
self.perform_discard(op, [x])
def _consider_call_assembler(self, op):
locs = self.locs_for_call_assembler(op)
self._call(op, locs, gc_level=SAVE_ALL_REGS)
consider_call_assembler_i = _consider_call_assembler
consider_call_assembler_r = _consider_call_assembler
consider_call_assembler_f = _consider_call_assembler
consider_call_assembler_n = _consider_call_assembler
def consider_cond_call_gc_wb(self, op):
assert op.type == 'v'
args = op.getarglist()
N = len(args)
# we force all arguments in a reg (unless they are Consts),
# because it will be needed anyway by the following gc_load
# It avoids loading it twice from the memory.
arglocs = [self.rm.make_sure_var_in_reg(op.getarg(i), args)
for i in range(N)]
self.perform_discard(op, arglocs)
consider_cond_call_gc_wb_array = consider_cond_call_gc_wb
def consider_cond_call(self, op):
args = op.getarglist()
assert 2 <= len(args) <= 4 + 2 # maximum 4 arguments
v_func = args[1]
assert isinstance(v_func, Const)
imm_func = self.rm.convert_to_imm(v_func)
# Delicate ordering here. First get the argument's locations.
# If this also contains args[0], this returns the current
# location too.
arglocs = [self.loc(args[i]) for i in range(2, len(args))]
if op.type == 'v':
# a plain COND_CALL. Calls the function when args[0] is
# true. Often used just after a comparison operation.
gcmap = self.get_gcmap()
self.load_condition_into_cc(op.getarg(0))
resloc = None
else:
# COND_CALL_VALUE_I/R. Calls the function when args[0]
# is equal to 0 or NULL. Returns the result from the
# function call if done, or args[0] if it was not 0/NULL.
# Implemented by forcing the result to live in the same
# register as args[0], and overwriting it if we really do
# the call.
# Load the register for the result. Possibly reuse 'args[0]'.
# But the old value of args[0], if it survives, is first
# spilled away. We can't overwrite any of op.args[2:] here.
# YYY args[0] is maybe not spilled here!!!
resloc = self.rm.force_result_in_reg(op, args[0],
forbidden_vars=args[2:])
# Get the gcmap here, possibly including the spilled
# location, and always excluding the 'resloc' register.
# Some more details: the only interesting case is the case
# where we're doing the call (if we are not, the gcmap is
# not used); and in this case, the gcmap must include the
# spilled location (it contains a valid GC pointer to fix
# during the call if a GC occurs), and never 'resloc'
# (it will be overwritten with the result of the call, which
# is not computed yet if a GC occurs).
#
# (Note that the spilled value is always NULL at the moment
# if the call really occurs, but it's not worth the effort to
# not list it in the gcmap and get crashes if we tweak
# COND_CALL_VALUE_R in the future)
gcmap = self.get_gcmap([resloc])
# Test the register for the result.
self.assembler.test_location(resloc)
self.assembler.guard_success_cc = rx86.Conditions['Z']
if not we_are_translated():
self.assembler.dump('%s <- %s(%s)' % (resloc, op, arglocs))
self.assembler.cond_call(gcmap, imm_func, arglocs, resloc)
consider_cond_call_value_i = consider_cond_call
consider_cond_call_value_r = consider_cond_call
def consider_call_malloc_nursery(self, op):
# YYY what's the reason for using a fixed register for the result?
size_box = op.getarg(0)
assert isinstance(size_box, ConstInt)
size = size_box.getint()
# hint: try to move unrelated registers away from ecx and edx now
self.rm.spill_or_move_registers_before_call([ecx, edx])
# the result will be in ecx
self.rm.force_allocate_reg(op, selected_reg=ecx)
#
# We need edx as a temporary, but otherwise don't save any more
# register. See comments in _build_malloc_slowpath().
tmp_box = TempVar()
self.rm.force_allocate_reg(tmp_box, selected_reg=edx)
gcmap = self.get_gcmap([ecx, edx]) # allocate the gcmap *before*
self.rm.possibly_free_var(tmp_box)
#
gc_ll_descr = self.assembler.cpu.gc_ll_descr
self.assembler.malloc_cond(
gc_ll_descr.get_nursery_free_addr(),
gc_ll_descr.get_nursery_top_addr(),
size, gcmap)
def consider_call_malloc_nursery_varsize_frame(self, op):
size_box = op.getarg(0)
assert not isinstance(size_box, Const) # we cannot have a const here!
# sizeloc must be in a register, but we can free it now
# (we take care explicitly of conflicts with ecx or edx)
sizeloc = self.rm.make_sure_var_in_reg(size_box)
self.rm.spill_or_move_registers_before_call([ecx, edx]) # sizeloc safe
self.rm.possibly_free_var(size_box)
# the result will be in ecx
self.rm.force_allocate_reg(op, selected_reg=ecx)
# we need edx as a temporary
tmp_box = TempVar()
self.rm.force_allocate_reg(tmp_box, selected_reg=edx)
gcmap = self.get_gcmap([ecx, edx]) # allocate the gcmap *before*
self.rm.possibly_free_var(tmp_box)
#
gc_ll_descr = self.assembler.cpu.gc_ll_descr
self.assembler.malloc_cond_varsize_frame(
gc_ll_descr.get_nursery_free_addr(),
gc_ll_descr.get_nursery_top_addr(),
sizeloc, gcmap)
def consider_call_malloc_nursery_varsize(self, op):
gc_ll_descr = self.assembler.cpu.gc_ll_descr
if not hasattr(gc_ll_descr, 'max_size_of_young_obj'):
raise Exception("unreachable code")
# for boehm, this function should never be called
arraydescr = op.getdescr()
length_box = op.getarg(2)
assert not isinstance(length_box, Const) # we cannot have a const here!
# can only use spill_or_move_registers_before_call() as a hint if
# we are sure that length_box stays alive and won't be freed now
# (it should always be the case, see below, but better safe than sorry)
if self.rm.stays_alive(length_box):
self.rm.spill_or_move_registers_before_call([ecx, edx])
# the result will be in ecx
self.rm.force_allocate_reg(op, selected_reg=ecx)
# we need edx as a temporary
tmp_box = TempVar()
self.rm.force_allocate_reg(tmp_box, selected_reg=edx)
gcmap = self.get_gcmap([ecx, edx]) # allocate the gcmap *before*
self.rm.possibly_free_var(tmp_box)
# length_box always survives: it's typically also present in the
# next operation that will copy it inside the new array. It's
# fine to load it from the stack too, as long as it is != ecx, edx.
lengthloc = self.rm.loc(length_box)
self.rm.possibly_free_var(length_box)
#
itemsize = op.getarg(1).getint()
maxlength = (gc_ll_descr.max_size_of_young_obj - WORD * 2)
self.assembler.malloc_cond_varsize(
op.getarg(0).getint(),
gc_ll_descr.get_nursery_free_addr(),
gc_ll_descr.get_nursery_top_addr(),
lengthloc, itemsize, maxlength, gcmap, arraydescr)
def get_gcmap(self, forbidden_regs=[], noregs=False):
frame_depth = self.fm.get_frame_depth()
gcmap = allocate_gcmap(self.assembler, frame_depth, JITFRAME_FIXED_SIZE)
for box, loc in self.rm.reg_bindings.iteritems():
if loc in forbidden_regs:
continue
if box.type == REF and self.rm.is_still_alive(box):
assert not noregs
assert isinstance(loc, RegLoc)
val = gpr_reg_mgr_cls.all_reg_indexes[loc.value]
gcmap[val // WORD // 8] |= r_uint(1) << (val % (WORD * 8))
for box, loc in self.fm.bindings.iteritems():
if box.type == REF and self.rm.is_still_alive(box):
assert isinstance(loc, FrameLoc)
val = loc.position + JITFRAME_FIXED_SIZE
gcmap[val // WORD // 8] |= r_uint(1) << (val % (WORD * 8))
return gcmap
def consider_gc_store(self, op):
args = op.getarglist()
base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args)
size_box = op.getarg(3)
assert isinstance(size_box, ConstInt)
size = size_box.value
assert size >= 1
if size == 1:
need_lower_byte = True
else:
need_lower_byte = False
value_loc = self.make_sure_var_in_reg(op.getarg(2), args,
need_lower_byte=need_lower_byte)
ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args)
self.perform_discard(op, [base_loc, ofs_loc, value_loc,
imm(size)])
def consider_gc_store_indexed(self, op):
args = op.getarglist()
base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args)
scale_box = op.getarg(3)
offset_box = op.getarg(4)
size_box = op.getarg(5)
assert isinstance(scale_box, ConstInt)
assert isinstance(offset_box, ConstInt)
assert isinstance(size_box, ConstInt)
factor = scale_box.value
offset = offset_box.value
size = size_box.value
assert size >= 1
if size == 1:
need_lower_byte = True
else:
need_lower_byte = False
value_loc = self.make_sure_var_in_reg(op.getarg(2), args,
need_lower_byte=need_lower_byte)
ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args)
self.perform_discard(op, [base_loc, ofs_loc, value_loc,
imm(factor), imm(offset), imm(size)])
def consider_increment_debug_counter(self, op):
base_loc = self.loc(op.getarg(0))
self.perform_discard(op, [base_loc])
def _consider_gc_load(self, op):
args = op.getarglist()
base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args)
ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args)
result_loc = self.force_allocate_reg(op)
size_box = op.getarg(2)
assert isinstance(size_box, ConstInt)
nsize = size_box.value # negative for "signed"
size_loc = imm(abs(nsize))
if nsize < 0:
sign_loc = imm1
else:
sign_loc = imm0
self.perform(op, [base_loc, ofs_loc, size_loc, sign_loc], result_loc)
consider_gc_load_i = _consider_gc_load
consider_gc_load_r = _consider_gc_load
consider_gc_load_f = _consider_gc_load
def _consider_gc_load_indexed(self, op):
args = op.getarglist()
base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args)
ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args)
result_loc = self.force_allocate_reg(op)
scale_box = op.getarg(2)
offset_box = op.getarg(3)
size_box = op.getarg(4)
assert isinstance(scale_box, ConstInt)
assert isinstance(offset_box, ConstInt)
assert isinstance(size_box, ConstInt)
scale = scale_box.value
offset = offset_box.value
nsize = size_box.value # negative for "signed"
size_loc = imm(abs(nsize))
if nsize < 0:
sign_loc = imm1
else:
sign_loc = imm0
locs = [base_loc, ofs_loc, imm(scale), imm(offset), size_loc, sign_loc]
self.perform(op, locs, result_loc)
consider_gc_load_indexed_i = _consider_gc_load_indexed
consider_gc_load_indexed_r = _consider_gc_load_indexed
consider_gc_load_indexed_f = _consider_gc_load_indexed
def consider_int_is_true(self, op):
# doesn't need arg to be in a register
argloc = self.loc(op.getarg(0))
resloc = self.force_allocate_reg_or_cc(op)
self.perform(op, [argloc], resloc)
consider_int_is_zero = consider_int_is_true
def _consider_same_as(self, op):
argloc = self.loc(op.getarg(0))
resloc = self.force_allocate_reg(op)
self.perform(op, [argloc], resloc)
consider_cast_ptr_to_int = _consider_same_as
consider_cast_int_to_ptr = _consider_same_as
consider_same_as_i = _consider_same_as
consider_same_as_r = _consider_same_as
consider_same_as_f = _consider_same_as
def consider_load_from_gc_table(self, op):
resloc = self.rm.force_allocate_reg(op)
self.perform(op, [], resloc)
def consider_int_force_ge_zero(self, op):
argloc = self.make_sure_var_in_reg(op.getarg(0))
resloc = self.force_allocate_reg(op, [op.getarg(0)])
self.perform(op, [argloc], resloc)
def consider_load_effective_address(self, op):
p0 = op.getarg(0)
i0 = op.getarg(1)
ploc = self.make_sure_var_in_reg(p0, [i0])
iloc = self.make_sure_var_in_reg(i0, [p0])
res = self.rm.force_allocate_reg(op, [p0, i0])
assert isinstance(op.getarg(2), ConstInt)
assert isinstance(op.getarg(3), ConstInt)
self.assembler.load_effective_addr(iloc, op.getarg(2).getint(),
op.getarg(3).getint(), res, ploc)
def _consider_math_read_timestamp(self, op):
# hint: try to move unrelated registers away from eax and edx now
self.rm.spill_or_move_registers_before_call([eax, edx])
tmpbox_high = TempVar()
self.rm.force_allocate_reg(tmpbox_high, selected_reg=eax)
if longlong.is_64_bit:
# on 64-bit, use rax as temporary register and returns the
# result in rdx
result_loc = self.rm.force_allocate_reg(op,
selected_reg=edx)
self.perform_math(op, [], result_loc)
else:
# on 32-bit, use both eax and edx as temporary registers,
# use a temporary xmm register, and returns the result in
# another xmm register.
tmpbox_low = TempVar()
self.rm.force_allocate_reg(tmpbox_low, selected_reg=edx)
xmmtmpbox = TempVar()
xmmtmploc = self.xrm.force_allocate_reg(xmmtmpbox)
result_loc = self.xrm.force_allocate_reg(op)
self.perform_math(op, [xmmtmploc], result_loc)
self.xrm.possibly_free_var(xmmtmpbox)
self.rm.possibly_free_var(tmpbox_low)
self.rm.possibly_free_var(tmpbox_high)
def compute_hint_frame_locations(self, operations):
# optimization only: fill in the 'hint_frame_pos' dictionary
# of 'fm' based on the JUMP at the end of the loop, by looking
# at where we would like the boxes to be after the jump.
op = operations[-1]
if op.getopnum() != rop.JUMP:
return
self.final_jump_op = op
self.final_jump_op_position = len(operations) - 1
descr = op.getdescr()
assert isinstance(descr, TargetToken)
if descr._ll_loop_code != 0:
# if the target LABEL was already compiled, i.e. if it belongs
# to some already-compiled piece of code
self._compute_hint_locations_from_descr(descr)
#else:
# The loop ends in a JUMP going back to a LABEL in the same loop.
# We cannot fill 'hint_frame_pos' immediately, but we can
# wait until the corresponding consider_label() to know where the
# we would like the boxes to be after the jump.
# YYY can we do coalescing hints in the new register allocation model?
def _compute_hint_locations_from_descr(self, descr):
arglocs = descr._x86_arglocs
jump_op = self.final_jump_op
assert len(arglocs) == jump_op.numargs()
hinted = []
for i in range(jump_op.numargs()):
box = jump_op.getarg(i)
if not isinstance(box, Const):
loc = arglocs[i]
if isinstance(loc, FrameLoc):
self.fm.hint_frame_pos[box] = self.fm.get_loc_index(loc)
else:
if box not in hinted:
hinted.append(box)
assert isinstance(loc, RegLoc)
self.longevity.fixed_register(
self.final_jump_op_position,
loc, box)
def consider_jump(self, op):
assembler = self.assembler
assert self.jump_target_descr is None
descr = op.getdescr()
assert isinstance(descr, TargetToken)
arglocs = descr._x86_arglocs
self.jump_target_descr = descr
# Part about non-floats
src_locations1 = []
dst_locations1 = []
# Part about floats
src_locations2 = []
dst_locations2 = []
# Build the four lists
for i in range(op.numargs()):
box = op.getarg(i)
src_loc = self.loc(box)
dst_loc = arglocs[i]
if box.type != FLOAT and not box.is_vector():
src_locations1.append(src_loc)
dst_locations1.append(dst_loc)
else:
src_locations2.append(src_loc)
dst_locations2.append(dst_loc)
# Do we have a temp var?
if IS_X86_64:
tmpreg = X86_64_SCRATCH_REG
xmmtmp = X86_64_XMM_SCRATCH_REG
else:
tmpreg = None
xmmtmp = None
# Do the remapping
num_moves = remap_frame_layout_mixed(assembler,
src_locations1, dst_locations1, tmpreg,
src_locations2, dst_locations2, xmmtmp)
self.possibly_free_vars_for_op(op)
assembler.closing_jump(self.jump_target_descr)
assembler.num_moves_jump += num_moves
def consider_enter_portal_frame(self, op):
self.assembler.enter_portal_frame(op)
def consider_leave_portal_frame(self, op):
self.assembler.leave_portal_frame(op)
def consider_jit_debug(self, op):
pass
def _consider_force_spill(self, op):
# This operation is used only for testing
self.force_spill_var(op.getarg(0))
def consider_force_token(self, op):
# XXX for now we return a regular reg
#self.rm.force_allocate_frame_reg(op)
self.assembler.force_token(self.rm.force_allocate_reg(op))
def consider_label(self, op):
descr = op.getdescr()
assert isinstance(descr, TargetToken)
inputargs = op.getarglist()
arglocs = [None] * len(inputargs)
#
# we use force_spill() on the boxes that are not going to be really
# used any more in the loop, but that are kept alive anyway
# by being in a next LABEL's or a JUMP's argument or fail_args
# of some guard
position = self.rm.position
for arg in inputargs:
assert not isinstance(arg, Const)
if self.longevity[arg].is_last_real_use_before(position):
self.force_spill_var(arg)
#
# we need to make sure that no variable is stored in ebp
for arg in inputargs:
if self.loc(arg) is ebp:
loc2 = self.fm.loc(arg)
self.assembler.mc.MOV(loc2, ebp)
self.rm.bindings_to_frame_reg.clear()
#
for i in range(len(inputargs)):
arg = inputargs[i]
assert not isinstance(arg, Const)
loc = self.loc(arg)
assert loc is not ebp
arglocs[i] = loc
if isinstance(loc, RegLoc):
self.fm.mark_as_free(arg)
#
# if we are too close to the start of the loop, the label's target may
# get overridden by redirect_call_assembler(). (rare case)
self.flush_loop()
#
descr._x86_arglocs = arglocs
descr._ll_loop_code = self.assembler.mc.get_relative_pos()
descr._x86_clt = self.assembler.current_clt
self.assembler.target_tokens_currently_compiling[descr] = None
self.possibly_free_vars_for_op(op)
self.assembler.label()
#
# if the LABEL's descr is precisely the target of the JUMP at the
# end of the same loop, i.e. if what we are compiling is a single
# loop that ends up jumping to this LABEL, then we can now provide
# the hints about the expected position of the spilled variables.
jump_op = self.final_jump_op
if jump_op is not None and jump_op.getdescr() is descr:
self._compute_hint_locations_from_descr(descr)
def consider_guard_not_forced_2(self, op):
self.rm.before_call(op.getfailargs(), save_all_regs=True)
self.xrm.before_call(op.getfailargs(), save_all_regs=True)
fail_locs = [self.loc(v) for v in op.getfailargs()]
self.assembler.store_force_descr(op, fail_locs,
self.fm.get_frame_depth())
self.possibly_free_vars(op.getfailargs())
def consider_keepalive(self, op):
pass
def _scaled_addr(self, index_loc, itemsize_loc,
base_loc, ofs_loc):
assert isinstance(itemsize_loc, ImmedLoc)
itemsize = itemsize_loc.value
if isinstance(index_loc, ImmedLoc):
temp_loc = imm(index_loc.value * itemsize)
shift = 0
else:
assert valid_addressing_size(itemsize), "rewrite did not correctly handle shift/mul!"
temp_loc = index_loc
shift = get_scale(itemsize)
assert isinstance(ofs_loc, ImmedLoc)
return AddressLoc(base_loc, temp_loc, shift, ofs_loc.value)
def consider_zero_array(self, op):
_, baseofs, _ = unpack_arraydescr(op.getdescr())
length_box = op.getarg(2)
scale_box = op.getarg(3)
assert isinstance(scale_box, ConstInt)
start_itemsize = scale_box.value
len_scale_box = op.getarg(4)
assert isinstance(len_scale_box, ConstInt)
len_itemsize = len_scale_box.value
# rewrite handles the mul of a constant length box
constbytes = -1
if isinstance(length_box, ConstInt):
constbytes = length_box.getint()
args = op.getarglist()
base_loc = self.rm.make_sure_var_in_reg(args[0], args)
startindex_loc = self.rm.make_sure_var_in_reg(args[1], args)
if 0 <= constbytes <= 16 * 8:
if IS_X86_64:
null_loc = X86_64_XMM_SCRATCH_REG
else:
null_box = TempVar()
null_loc = self.xrm.force_allocate_reg(null_box)
self.xrm.possibly_free_var(null_box)
self.perform_discard(op, [base_loc, startindex_loc,
imm(constbytes), imm(start_itemsize),
imm(baseofs), null_loc])
else:
# base_loc and startindex_loc are in two regs here (or they are
# immediates). Compute the dstaddr_loc, which is the raw
# address that we will pass as first argument to memset().
# It can be in the same register as either one, but not in
# args[2], because we're still needing the latter.
dstaddr_box = TempVar()
dstaddr_loc = self.rm.force_allocate_reg(dstaddr_box, [args[2]])
itemsize_loc = imm(start_itemsize)
dst_addr = self._scaled_addr(startindex_loc, itemsize_loc,
base_loc, imm(baseofs))
self.assembler.mc.LEA(dstaddr_loc, dst_addr)
#
if constbytes >= 0:
length_loc = imm(constbytes)
else:
# load length_loc in a register different than dstaddr_loc
length_loc = self.rm.make_sure_var_in_reg(length_box,
[dstaddr_box])
if len_itemsize > 1:
# we need a register that is different from dstaddr_loc,
# but which can be identical to length_loc (as usual,
# only if the length_box is not used by future operations)
bytes_box = TempVar()
bytes_loc = self.rm.force_allocate_reg(bytes_box,
[dstaddr_box])
len_itemsize_loc = imm(len_itemsize)
b_adr = self._scaled_addr(length_loc, len_itemsize_loc, imm0, imm0)
self.assembler.mc.LEA(bytes_loc, b_adr)
length_box = bytes_box
length_loc = bytes_loc
#
# call memset()
self.rm.before_call()
self.xrm.before_call()
self.assembler.simple_call_no_collect(
imm(self.assembler.memset_addr),
[dstaddr_loc, imm0, length_loc])
self.rm.possibly_free_var(length_box)
self.rm.possibly_free_var(dstaddr_box)
def not_implemented_op(self, op):
not_implemented("not implemented operation: %s" % op.getopname())
oplist = [RegAlloc.not_implemented_op] * rop._LAST
import itertools
iterate = itertools.chain(RegAlloc.__dict__.iteritems(),
VectorRegallocMixin.__dict__.iteritems())
for name, value in iterate:
if name.startswith('consider_'):
name = name[len('consider_'):]
num = getattr(rop, name.upper())
oplist[num] = value
def get_ebp_ofs(base_ofs, position):
# Argument is a frame position (0, 1, 2...).
# Returns (ebp+20), (ebp+24), (ebp+28)...
# i.e. the n'th word beyond the fixed frame size.
return base_ofs + WORD * (position + JITFRAME_FIXED_SIZE)
def not_implemented(msg):
msg = '[x86/regalloc] %s\n' % msg
if we_are_translated():
llop.debug_print(lltype.Void, msg)
raise NotImplementedError(msg)
|
StarcoderdataPython
|
3335092
|
<filename>web/migrations/versions/dae74ff90530_add_task_info_to_analysis.py<gh_stars>0
"""Add task info to Analysis
Revision ID: dae74ff90530
Revises: 547fa0a6608b
Create Date: 2018-08-01 22:32:41.531901
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'dae74ff<PASSWORD>'
down_revision = '547fa0a6608b'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('analysis', sa.Column('finished', sa.Boolean(), nullable=True))
op.add_column('analysis', sa.Column('task_id', sa.String(length=120), nullable=True))
op.create_index(op.f('ix_analysis_task_id'), 'analysis', ['task_id'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_analysis_task_id'), table_name='analysis')
op.drop_column('analysis', 'task_id')
op.drop_column('analysis', 'finished')
# ### end Alembic commands ###
|
StarcoderdataPython
|
3280108
|
from functools import partial, wraps
from mimetypes import guess_type
from os import path
from re import sub
from time import gmtime, strftime
from urllib.parse import unquote
from sanic.compat import stat_async
from sanic.exceptions import (
ContentRangeError,
FileNotFound,
HeaderNotFound,
InvalidUsage,
)
from sanic.handlers import ContentRangeHandler
from sanic.log import error_logger
from sanic.response import HTTPResponse, file, file_stream
async def _static_request_handler(
file_or_directory,
use_modified_since,
use_content_range,
stream_large_files,
request,
content_type=None,
file_uri=None,
):
# Using this to determine if the URL is trying to break out of the path
# served. os.path.realpath seems to be very slow
if file_uri and "../" in file_uri:
raise InvalidUsage("Invalid URL")
# Merge served directory and requested file if provided
# Strip all / that in the beginning of the URL to help prevent python
# from herping a derp and treating the uri as an absolute path
root_path = file_path = file_or_directory
if file_uri:
file_path = path.join(file_or_directory, sub("^[/]*", "", file_uri))
# URL decode the path sent by the browser otherwise we won't be able to
# match filenames which got encoded (filenames with spaces etc)
file_path = path.abspath(unquote(file_path))
if not file_path.startswith(path.abspath(unquote(root_path))):
error_logger.exception(
f"File not found: path={file_or_directory}, "
f"relative_url={file_uri}"
)
raise FileNotFound(
"File not found", path=file_or_directory, relative_url=file_uri
)
try:
headers = {}
# Check if the client has been sent this file before
# and it has not been modified since
stats = None
if use_modified_since:
stats = await stat_async(file_path)
modified_since = strftime(
"%a, %d %b %Y %H:%M:%S GMT", gmtime(stats.st_mtime)
)
if request.headers.get("If-Modified-Since") == modified_since:
return HTTPResponse(status=304)
headers["Last-Modified"] = modified_since
_range = None
if use_content_range:
_range = None
if not stats:
stats = await stat_async(file_path)
headers["Accept-Ranges"] = "bytes"
headers["Content-Length"] = str(stats.st_size)
if request.method != "HEAD":
try:
_range = ContentRangeHandler(request, stats)
except HeaderNotFound:
pass
else:
del headers["Content-Length"]
for key, value in _range.headers.items():
headers[key] = value
headers["Content-Type"] = (
content_type or guess_type(file_path)[0] or "text/plain"
)
if request.method == "HEAD":
return HTTPResponse(headers=headers)
else:
if stream_large_files:
if type(stream_large_files) == int:
threshold = stream_large_files
else:
threshold = 1024 * 1024
if not stats:
stats = await stat_async(file_path)
if stats.st_size >= threshold:
return await file_stream(
file_path, headers=headers, _range=_range
)
return await file(file_path, headers=headers, _range=_range)
except ContentRangeError:
raise
except Exception:
error_logger.exception(
f"File not found: path={file_or_directory}, "
f"relative_url={file_uri}"
)
raise FileNotFound(
"File not found", path=file_or_directory, relative_url=file_uri
)
def register(
app,
uri,
file_or_directory,
pattern,
use_modified_since,
use_content_range,
stream_large_files,
name="static",
host=None,
strict_slashes=None,
content_type=None,
):
# TODO: Though sanic is not a file server, I feel like we should at least
# make a good effort here. Modified-since is nice, but we could
# also look into etags, expires, and caching
"""
Register a static directory handler with Sanic by adding a route to the
router and registering a handler.
:param app: Sanic
:param file_or_directory: File or directory path to serve from
:param uri: URL to serve from
:param pattern: regular expression used to match files in the URL
:param use_modified_since: If true, send file modified time, and return
not modified if the browser's matches the
server's
:param use_content_range: If true, process header for range requests
and sends the file part that is requested
:param stream_large_files: If true, use the file_stream() handler rather
than the file() handler to send the file
If this is an integer, this represents the
threshold size to switch to file_stream()
:param name: user defined name used for url_for
:param content_type: user defined content type for header
:return: registered static routes
:rtype: List[sanic.router.Route]
"""
# If we're not trying to match a file directly,
# serve from the folder
if not path.isfile(file_or_directory):
uri += "<file_uri:" + pattern + ">"
# special prefix for static files
if not name.startswith("_static_"):
name = f"_static_{name}"
_handler = wraps(_static_request_handler)(
partial(
_static_request_handler,
file_or_directory,
use_modified_since,
use_content_range,
stream_large_files,
content_type=content_type,
)
)
_routes, _ = app.route(
uri,
methods=["GET", "HEAD"],
name=name,
host=host,
strict_slashes=strict_slashes,
)(_handler)
return _routes
|
StarcoderdataPython
|
164140
|
import pandas as pd
import numpy as np
import umap
import sklearn.cluster as cluster
from sklearn.cluster import KMeans
from sklearn.cluster import DBSCAN
import spacy
import unicodedata
import matplotlib.pyplot as plt
import logging
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
logging.getLogger().setLevel(logging.INFO)
JULIA_VARIABLE_CSV_PATH = "ExperimentData/JuliaVariableData.csv"
CLUSTER_LABEL_CSV_PATH = "clusteringLabels.csv"
KMEANS_CLUSTER_LABEL_CSV_PATH = "ExperimentData/KmeansCluster.csv"
KMEANS_CLUSTER_TRUTH_CSV_PATH = "ExperimentData/KmeanClusterTruths.csv"
KMEANS_PREDICTED_CSV_PATH = "ExperimentData/KmeansPredicted.csv"
PREDICTED_UMAP_CSV_PATH = "ExperimentData/simPredictedUmapClusters.csv"
def createWord2Vec(data):
nlp = spacy.load('en_core_web_md')
tokenList = []
for phrase in data:
token = nlp(phrase)
tokenList.append(token.vector)
return np.asarray(tokenList)
def useUMAP(tokenList):
db = DBSCAN(eps=0.3, min_samples=2).fit(np.asarray(tokenList))
umapModel = umap.UMAP(random_state=42).fit(np.asarray(tokenList))
standardEmbedding = umapModel.transform(tokenList)
db_umap = DBSCAN(eps=0.3, min_samples=2).fit(standardEmbedding)
return np.asarray(db.labels_), np.asarray(db_umap.labels_)
def writeUMAP_DBSCAN_CSV(subj_array, labels, umapLabels, labelsSimArray, \
uMapLabelsSimArray, OutSampleLabelsSimArray, OutSampleUMAPSimArray):
logging.info("Writing CSV")
outputString = "node,labels,umapLabels,dbscanSim,UMAPsim,out_sampleDBSCAN,out_sampleUMAP\n"
for i in range(len(labels)):
outputString += str(subj_array[i]) + ","\
+ str(labels[i]) + ","\
+str(umapLabels[i]) + ","\
+ str(labelsSimArray[i]) + ","\
+ str(uMapLabelsSimArray[i])+ ","\
+ str(OutSampleLabelsSimArray[i]) + ","\
+ str(OutSampleUMAPSimArray[i]) + "\n"
with open(CLUSTER_LABEL_CSV_PATH, 'w') as filetowrite:
filetowrite.write(outputString)
filetowrite.close()
def generatePairs(labels, umapLabels, data):
nlp = spacy.load('en_core_web_md')
labelsSimArray = []
uMapLabelsSimArray = []
OutSampleLabelsSimArray = []
OutSampleUMAPSimArray = []
labels_sim = 0;
umapLabels_sim = 0;
outsample_labels_sim = 0;
outsample_umap_sim = 0;
for i in range(len(data)):
logging.info("Iterating Word " + str(i))
for j in range(len(data)):
if i != j:
token1 = nlp(data[i])
token2 = nlp(data[j])
if(labels[i] == labels[j]):
labels_sim += token1.similarity(token2)
if(umapLabels[i] == umapLabels[j]):
umapLabels_sim += token1.similarity(token2)
if(labels [i] != labels[j]):
outsample_labels_sim += token1.similarity(token2)
if(umapLabels[i] != umapLabels[j]):
outsample_umap_sim += token1.similarity(token2)
if j == len(data)-1:
labelsSimArray.append(float(labels_sim/(list(labels).count(labels[i])-1)))
uMapLabelsSimArray.append(float(umapLabels_sim/(list(umapLabels).count(umapLabels[i])-1)))
if len(labels)-list(labels).count(labels[i]) == 0:
OutSampleLabelsSimArray.append(1)
else:
OutSampleLabelsSimArray.append(float(outsample_labels_sim/(len(labels)-1-list(labels).count(labels[i]))))
if len(umapLabels)-list(umapLabels).count(umapLabels[i]) == 0:
OutSampleUMAPSimArray.append(1)
else:
OutSampleUMAPSimArray.append(float(outsample_umap_sim/(len(umapLabels)-1-list(umapLabels).count(umapLabels[i]))))
labels_sim = 0;
umapLabels_sim = 0;
outsample_labels_sim = 0;
outsample_umap_sim = 0;
return labelsSimArray, uMapLabelsSimArray, OutSampleLabelsSimArray, OutSampleUMAPSimArray
def createCluster(svoFile):
SVOdata = pd.read_csv(svoFile)
subj_array = list(SVOdata["subject"])
obj_array = list(SVOdata["object"])
totalNodes = subj_array + obj_array
tokenList = createWord2Vec(totalNodes)
#Use UMAP Clustering
labels,umapLabels = useUMAP(tokenList)
#Retrieves Labels for Similarity
labelsSimArray, uMapLabelsSimArray, OutSampleLabelsSimArray, OutSampleUMAPSimArray = \
generatePairs(labels, umapLabels, totalNodes)
#Writes CSV for UMAP vs DBScan Labels
writeUMAP_DBSCAN_CSV(totalNodes, labels, umapLabels, labelsSimArray, \
uMapLabelsSimArray, OutSampleLabelsSimArray, OutSampleUMAPSimArray )
def cleanVariables(variableArray):
for i in range(len(variableArray)):
variableArray[i] = str(variableArray[i]).replace(",", " ")
variableArray[i] = str(variableArray[i]).replace("_", " ")
variableArray[i] = containsGreek(variableArray[i])
return variableArray
def containsGreek(inputString):
greekLetters = []
for s in inputString:
name = unicodedata.name(chr(ord(s)))
if "GREEK" in name:
greekLetters.append(s)
for letter in greekLetters:
name = unicodedata.name(chr(ord(letter))).split(" ")[3]
name = name.lower().capitalize()
inputString = inputString.replace(letter, str(name) + str(" "))
return inputString
def useKmeans(trainTokenList, K_size, variableTokenList):
print(type(trainTokenList), type(K_size), type(variableTokenList))
umapModel = umap.UMAP(random_state=42).fit(np.asarray(trainTokenList))
trainEmbedding = umapModel.transform(trainTokenList)
predictEmbedding = umapModel.transform(variableTokenList)
kmeans = KMeans(n_clusters=K_size, random_state = 0).fit(trainEmbedding)
return kmeans.labels_, kmeans.predict(predictEmbedding)
def writeCSV(variable_array, predictedLabels, fileName):
logging.info("generating CSV " + fileName)
outputString = "variable,cluster\n"
for i in range(len(variable_array)):
outputString += str(variable_array[i].replace(",", " ")) + "," + str(predictedLabels[i]) + "\n"
with open(fileName, 'w') as filetowrite:
filetowrite.write(outputString)
filetowrite.close()
def groupNodesByCluster(umapData):
maxNoClusters = max(list(umapData["umapLabels"]))
clusteredNodes = []
for i in range(maxNoClusters + 1):
temp_bin = []
for j in range(len(list(umapData["umapLabels"]))):
if list(umapData["umapLabels"])[j] == i:
temp_bin.append(list(umapData["node"])[j])
clusteredNodes.append(temp_bin)
return clusteredNodes
def groupNodesByKMeansCluster(kMeansData):
maxNoClusters = max(list(kMeansData["cluster"]))
clusteredNodes = []
for i in range(maxNoClusters + 1):
temp_bin = []
for j in range(len(list(kMeansData["cluster"]))):
if list(kMeansData["cluster"])[j] == i:
temp_bin.append(list(kMeansData["variable"])[j])
clusteredNodes.append(temp_bin)
return clusteredNodes
def getSimilarityLabels(clusteredNodes, variable_array):
labels = []
nlp = spacy.load('en_core_web_md')
count = 0
for variable in variable_array:
logging.info("Comparing Variable No: " + str(count))
count += 1
variableToken = nlp(variable)
highest_average = -9000
label = 0
for clusterNo in range(len(clusteredNodes)):
average = 0
for node in clusteredNodes[clusterNo]:
nodeToken = nlp(node)
average += variableToken.similarity(nodeToken)
average /= len(clusteredNodes[clusterNo])
if average > highest_average:
highest_average = average
label = clusterNo
labels.append(label)
return labels
def calculateKMeansAccuracy():
labeledData = pd.read_csv(JULIA_VARIABLE_CSV_PATH)
predictedData = pd.read_csv(KMEANS_PREDICTED_CSV_PATH)
labeled = list(labeledData["KMeansLabels"])
predicted = list(predictedData["cluster"])
count = 0
for i in range(len(predicted)):
if labeled[i] == predicted[i]:
count += 1
logging.info("KMeans Accuracy is : " + str(float(count/len(predicted))))
def calculateSimAccuracy():
labeledData = pd.read_csv(JULIA_VARIABLE_CSV_PATH)
predictedData = pd.read_csv(PREDICTED_UMAP_CSV_PATH)
labeled = list(labeledData["DBSCANLabels"])
predicted = list(predictedData["cluster"])
count = 0
for i in range(len(predicted)):
if labeled[i] == predicted[i]:
count += 1
logging.info("Similar Cluster Assignment Accuracy is : " + str(float(count/len(predicted))))
def runKMeansExp():
variableData = pd.read_csv(JULIA_VARIABLE_CSV_PATH)
umapData = pd.read_csv(CLUSTER_LABEL_CSV_PATH)
umapData = umapData[umapData.umapLabels != -1]
kmeansTrainData = list(umapData["node"])
variable_array = list(variableData["variable"])
variable_array = cleanVariables(variable_array)
variableTokenList = createWord2Vec(variable_array)
trainTokenList = createWord2Vec(kmeansTrainData)
print(len(trainTokenList))
K_size = max(list(umapData["umapLabels"]))
trainLabels, predictedLabels = useKmeans(trainTokenList, K_size, variableTokenList)
writeCSV(kmeansTrainData, trainLabels, KMEANS_CLUSTER_LABEL_CSV_PATH)
writeCSV(variable_array, predictedLabels, KMEANS_PREDICTED_CSV_PATH)
calculateKMeansAccuracy()
def runUMapSimilarityExp():
variableData = pd.read_csv(JULIA_VARIABLE_CSV_PATH)
umapData = pd.read_csv(CLUSTER_LABEL_CSV_PATH)
umapData = umapData[umapData.umapLabels != -1]
variable_array = list(variableData["variable"])
variable_array = cleanVariables(variable_array)
clusteredNodes = groupNodesByCluster(umapData)
labels = getSimilarityLabels(clusteredNodes, variable_array)
writeCSV(variable_array, labels, PREDICTED_UMAP_CSV_PATH)
calculateSimAccuracy()
def getAverageSimilarity(variable_array, clusteredNodes, predictedLabels):
nlp = spacy.load('en_core_web_md')
averageSimArray = []
for i in range(len(variable_array)):
averageSim = 0
for word in clusteredNodes[predictedLabels[i]]:
token1 = nlp(word)
token2 = nlp(variable_array[i])
averageSim += token1.similarity(token2)
averageSimArray.append(float(averageSim/ len(clusteredNodes[predictedLabels[i]])))
return averageSimArray
def runCombinationExp():
variableData = pd.read_csv(JULIA_VARIABLE_CSV_PATH)
umapData = pd.read_csv(CLUSTER_LABEL_CSV_PATH)
umapData = umapData[umapData.umapLabels != -1]
kmeansTrainData = list(umapData["node"])
variable_array = list(variableData["variable"])
variable_array = cleanVariables(variable_array)
variableTokenList = createWord2Vec(variable_array)
trainTokenList = createWord2Vec(kmeansTrainData)
K_size = max(list(umapData["umapLabels"]))
trainLabels, predictedLabels = useKmeans(trainTokenList, K_size, variableTokenList)
writeCSV(kmeansTrainData, trainLabels, KMEANS_CLUSTER_LABEL_CSV_PATH)
clusteredNodes = groupNodesByKMeansCluster(pd.read_csv(KMEANS_CLUSTER_LABEL_CSV_PATH))
averageSimArray = getAverageSimilarity(variable_array, clusteredNodes, predictedLabels)
writeCSV(variable_array, predictedLabels, KMEANS_PREDICTED_CSV_PATH)
graphCombinationExp(averageSimArray)
return averageSimArray
def graphCombinationExp(averageSimArray):
labeledData = pd.read_csv(JULIA_VARIABLE_CSV_PATH)
predictedData = pd.read_csv(KMEANS_CLUSTER_TRUTH_CSV_PATH)
labeled = list(labeledData["KMeansLabels"])
predicted = list(predictedData["cluster"])
thresholdArray = []
accuracy = []
numberOfAssignments = []
threshold = .01
while threshold < .95:
assignmentCount = 0
denominatorCount = 0
for i in range(len(predicted)):
if averageSimArray[i] > threshold:
denominatorCount += 1
if labeled[i] == predicted[i] and averageSimArray[i] > threshold:
assignmentCount += 1
if denominatorCount != 0:
accuracy.append(float(assignmentCount/denominatorCount))
else:
accuracy.append(1.0)
numberOfAssignments.append(float(assignmentCount/len(predicted)))
thresholdArray.append(threshold)
threshold += .02
numberOfAssignments = np.divide(np.asarray(numberOfAssignments), numberOfAssignments[0])
plt.figure(0)
plt.title("Accuracy vs Normalized True Assignments")
plt.plot(thresholdArray, accuracy, color="blue", label="Accuracy")
plt.plot(thresholdArray, numberOfAssignments, color="orange", label="Normalized True Assigns" )
plt.legend(loc="upper right")
plt.xticks(np.arange(0, 1, step=0.1))
plt.xlabel("Similarity Threshold")
plt.ylabel("Normalized Values")
idx = np.argwhere(np.diff(np.sign(numberOfAssignments - accuracy))).flatten()
plt.plot(thresholdArray[int(idx)], numberOfAssignments[int(idx)], 'ro')
logging.info("Intersection Threshold is: " + str(thresholdArray[int(idx)]))
|
StarcoderdataPython
|
1671712
|
# date: 2019.04.09
# https://stackoverflow.com/questions/55592626/how-would-i-make-this-button-so-that-it-only-registers-one-click
import pygame
# --- constants ---
WIDTH = 640
HEIGHT = 480
FPS = 5
# --- functions ---
def action_button_click(x, y, w, h, action=None):
mouse = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
if x+w > mouse[0] > x and y+h > mouse[1] > y:
if click[0] == 1 and action != None:
action()
def action_button_draw(x, y, w, h, ic, ac, text, text_colour):
mouse = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
if x+w > mouse[0] > x and y+h > mouse[1] > y:
pygame.draw.rect(screen, ac,(x,y,w,h))
else:
pygame.draw.rect(screen, ic,(x,y,w,h))
font = pygame.font.SysFont("arial black",20)
text = font.render(text,True,(text_colour))
screen.blit(text,[x+w/2-(text.get_rect().w/2),y+h/2-(text.get_rect().h/2)])
def test_action():
print("clicked")
# --- main ---
# - init -
pygame.init()
screen = pygame.display.set_mode((WIDTH, HEIGHT))
screen_rect = screen.get_rect()
# - mainloop -
clock = pygame.time.Clock()
running = True
while running:
# - events -
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
running = False
# MOUSEBUTTONDOWN is created only once,
# when button changes state from "not-pressed" to "pressed"
# so it is better for this job than "pygame.mouse.get_pressed()"
elif event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
action_button_click(100, 100, 100, 50, test_action)
# --- draws ----
screen.fill([0,0,0]) # clear screen
action_button_draw(100, 100, 100, 50, [255,0,0], [0,255,0], "Hello", [0,0,0])
pygame.display.flip()
# - FPS -
clock.tick(FPS)
# - end -
pygame.quit()
|
StarcoderdataPython
|
3292941
|
<filename>hms_tz/nhif/api/sales_invoice.py
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Aakvatech and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from hms_tz.nhif.api.healthcare_utils import update_dimensions, create_individual_lab_test, create_individual_radiology_examination, create_individual_procedure_prescription
def validate(doc, method):
for item in doc.items:
if not item.is_free_item and item.amount == 0:
frappe.throw(_("Amount of the healthcare service <b>'{0}'</b> cannot be ZERO. Please do not select this item and request Pricing team to resolve this.").format(item.item_name))
update_dimensions(doc)
@frappe.whitelist()
def create_pending_healthcare_docs(doc_name):
doc = frappe.get_doc("Sales Invoice", doc_name)
create_healthcare_docs(doc, "From Front End")
def before_submit(doc, method):
if doc.is_pos and doc.outstanding_amount != 0:
frappe.throw(_("Sales invoice not paid in full. Make sure that full paid amount is entered in Mode of Payments table."))
def create_healthcare_docs(doc, method):
for item in doc.items:
if item.reference_dt:
if item.reference_dt == "Healthcare Service Order":
hso_doc = frappe.get_doc(
"Healthcare Service Order", item.reference_dn)
if hso_doc.insurance_subscription and not hso_doc.prescribed:
return
if not hso_doc.order:
frappe.msgprint(_("HSO order not found..."), alert = True)
return
child = frappe.get_doc(hso_doc.order_reference_doctype,
hso_doc.order_reference_name)
if hso_doc.order_doctype == "Lab Test Template":
create_individual_lab_test(hso_doc, child)
elif hso_doc.order_doctype == "Radiology Examination Template":
create_individual_radiology_examination(hso_doc, child)
elif hso_doc.order_doctype == "Clinical Procedure Template":
create_individual_procedure_prescription(hso_doc, child)
frappe.enqueue(method="hms_tz.nhif.api.patient_encounter.enqueue_on_update_after_submit", queue='short',
timeout=10000, is_async=True, kwargs=doc.name)
|
StarcoderdataPython
|
169170
|
import math
from typing import Optional, Union, Tuple, List
import numpy as np
import torch
import torch.nn.functional as F
from torch import Tensor
from torch_geometric.utils.num_nodes import maybe_num_nodes
from torch_scatter import scatter, segment_csr, gather_csr
from torch_scatter.utils import broadcast
import tsl
__all__ = [
'expand_then_cat',
'gated_tanh',
'reverse_tensor',
'sparse_softmax',
'sparse_multi_head_attention'
]
def expand_then_cat(tensors: Union[Tuple[Tensor, ...], List[Tensor]],
dim=-1) -> Tensor:
r"""
Match the dimensions of tensors in the input list and then concatenate.
Args:
tensors: Tensors to concatenate.
dim (int): Dimension along which to concatenate.
"""
shapes = [t.shape for t in tensors]
expand_dims = list(np.max(shapes, 0))
expand_dims[dim] = -1
tensors = [t.expand(*expand_dims) for t in tensors]
return torch.cat(tensors, dim=dim)
@torch.jit.script
def gated_tanh(input: Tensor, dim: int = -1) -> Tensor:
r"""The gated tanh unite. Computes:
.. math ::
\text{GatedTanH}(a, b) = \text{TanH}(a) \otimes \sigma(b)
where `input` is split in half along `dim` to form `a` and `b`, :math:`\text{TanH}` is the hyperbolic tangent
function, :math:`\sigma` is the sigmoid function and :math:`\otimes` is the element-wise product between matrices.
Args:
input (Tensor): Input tensor.
dim (int, optional): Dimension on which to split the input.
(default: -1)
"""
out, gate = torch.tensor_split(input, 2, dim=dim)
return torch.tanh(out) * torch.sigmoid(gate)
@torch.jit.script
def reverse_tensor(tensor: Tensor, dim: int) -> Tensor:
"""Reverse tensor along specific dimension.
Args:
tensor (Tensor): Input tensor.
dim (int): Dimension along which to reverse sequence.
"""
indices = torch.arange(tensor.size(dim) - 1, -1, -1, device=tensor.device)
return tensor.index_select(dim, indices)
@torch.jit.script
def sparse_softmax(src: Tensor, index: Optional[Tensor] = None,
ptr: Optional[Tensor] = None,
num_nodes: Optional[int] = None,
dim: int = -2) -> Tensor:
r"""Extension of ~torch_geometric.softmax with index broadcasting to compute
a sparsely evaluated softmax over multiple broadcast dimensions.
Given a value tensor :attr:`src`, this function first groups the values
along the first dimension based on the indices specified in :attr:`index`,
and then proceeds to compute the softmax individually for each group.
Args:
src (Tensor): The source tensor.
index (Tensor, optional): The indices of elements for applying the softmax.
ptr (LongTensor, optional): If given, computes the softmax based on
sorted inputs in CSR representation. (default: :obj:`None`)
num_nodes (int, optional): The number of nodes, *i.e.*
:obj:`max_val + 1` of :attr:`index`. (default: :obj:`None`)
dim (int, optional): The dimension in which to normalize, i.e., the edge
dimension. (default: :obj:`-2`)
"""
if ptr is not None:
dim = dim + src.dim() if dim < 0 else dim
size = ([1] * dim) + [-1]
ptr = ptr.view(size)
src_max = gather_csr(segment_csr(src, ptr, reduce='max'), ptr)
out = (src - src_max).exp()
out_sum = gather_csr(segment_csr(out, ptr, reduce='sum'), ptr)
elif index is not None:
N = maybe_num_nodes(index, num_nodes)
expanded_index = broadcast(index, src, dim)
src_max = scatter(src, expanded_index, dim, dim_size=N, reduce='max')
src_max = src_max.index_select(dim, index)
out = (src - src_max).exp()
out_sum = scatter(out, expanded_index, dim, dim_size=N, reduce='sum')
out_sum = out_sum.index_select(dim, index)
else:
raise NotImplementedError
return out / (out_sum + tsl.epsilon)
@torch.jit.script
def sparse_multi_head_attention(q: Tensor, k: Tensor, v: Tensor, index: Tensor,
dim_size: Optional[int] = None,
dropout_p: float = 0.0):
r"""Computes multi-head, scaled, dot product attention on query, key and
value tensors, applying dropout if a probability greater than 0.0 is
specified. Index specifies for each query in q the belonging sequence in the
original batched, dense tensor.
Returns a tensor pair containing attended values and attention weights.
Args:
q (Tensor): Query tensor. See Shape section for shape details.
k (Tensor): Key tensor. See Shape section for shape details.
v (Tensor): Value tensor. See Shape section for shape details.
index (Tensor): Tensor containing mask values to be added to calculated
attention. May be 2D or 3D; see Shape section for details.
dim_size (int, optional): The batched target length sequence, i.e.
:obj:`max_val + 1` of :attr:`index`. (default: :obj:`None`)
dropout_p: dropout probability. If greater than 0.0, dropout is applied.
Shape:
- q: :math:`(S, H, E)` where S is sparsed dimension, H is the number of
heads, and E is embedding dimension.
- k: :math:`(S, H, E)` where S is sparsed dimension, H is the number of
heads, and E is embedding dimension.
- v: :math:`(S, H, O)` where S is sparsed dimension, H is the number of
heads, and O is output dimension.
- index: :math:`(S)` where S is sparsed dimension.
- dim_size: must be :math:`(B \times Nt)`
- Output: attention values have shape :math:`(B, Nt, E)`; attention
weights have shape :math:`(S, H)`
"""
dim = 0
B, H, E = q.shape
N = maybe_num_nodes(index, dim_size)
# scores
alpha = (q * k).sum(dim=-1) / math.sqrt(E)
alpha = sparse_softmax(alpha, index, num_nodes=N, dim=dim)
if dropout_p > 0.0:
alpha = F.dropout(alpha, p=dropout_p)
v *= alpha.view(-1, H, 1)
# out
out = torch.zeros((N, H, v.size(2)), dtype=v.dtype, device=v.device)
add_index = broadcast(index, v, dim)
out.scatter_add_(dim, add_index, v)
return out, alpha
|
StarcoderdataPython
|
1604660
|
#!/usr/bin/python
"""
(C) Copyright 2018-2020 Intel Corporation.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
GOVERNMENT LICENSE RIGHTS-OPEN SOURCE SOFTWARE
The Government's rights to use, modify, reproduce, release, perform, display,
or disclose this software are subject to the terms of the Apache License as
provided in Contract No. B609815.
Any reproduction of computer software, computer software documentation, or
portions thereof marked with this legend must also reproduce the markings.
"""
from __future__ import print_function
from getpass import getuser
from grp import getgrgid
from pwd import getpwuid
import re
from command_utils_base import CommandFailure
from dmg_utils_base import DmgCommandBase
class DmgCommand(DmgCommandBase):
# pylint: disable=too-many-ancestors,too-many-public-methods
"""Defines a object representing a dmg command with helper methods."""
# As the handling of these regular expressions are moved inside their
# respective methods, they should be removed from this definition.
METHOD_REGEX = {
"run":
r"(.*)",
"network_scan":
r"[-]+(?:\n|\n\r)([a-z0-9-]+)(?:\n|\n\r)[-]+|NUMA\s+"
r"Socket\s+(\d+)|(ofi\+[a-z0-9;_]+)\s+([a-z0-9, ]+)",
"pool_list":
r"(?:([0-9a-fA-F-]+) +([0-9,]+))",
"pool_query":
r"(?:Pool\s+([0-9a-fA-F-]+),\s+ntarget=(\d+),\s+disabled=(\d+),"
r"\s+leader=(\d+),\s+version=(\d+)|Target\(VOS\)\s+count:"
r"\s*(\d+)|(?:(?:SCM:|NVMe:)\s+Total\s+size:\s+([0-9.]+\s+[A-Z]+)"
r"\s+Free:\s+([0-9.]+\s+[A-Z]+),\smin:([0-9.]+\s+[A-Z]+),"
r"\s+max:([0-9.]+\s+[A-Z]+),\s+mean:([0-9.]+\s+[A-Z]+))"
r"|Rebuild\s+\w+,\s+([0-9]+)\s+objs,\s+([0-9]+)\s+recs)",
"storage_query_list_pools":
r"[-]+\s+([a-z0-9-]+)\s+[-]+|(?:UUID:([a-z0-9-]+)\s+Rank:([0-9]+)"
r"\s+Targets:\[([0-9 ]+)\])(?:\s+Blobs:\[([0-9 ]+)\]\s+?$)",
"storage_query_list_devices":
r"[-]+\s+([a-z0-9-]+)\s+[-]+\s+.*\s+|(?:UUID:([a-z0-9-]+)\s+"
r"Targets:\[([0-9 ]+)\]\s+Rank:([0-9]+)\s+State:([A-Z]+))",
"storage_query_device_health":
r"[-]+\s+([a-z0-9-]+)\s+[-]+\s+.*\s+UUID:([a-z0-9-]+)\s+Targets:"
r"\[([0-9 ]+)\]\s+Rank:([0-9]+)\s+State:(\w+)\s+.*\s+|(?:Temp.*|"
r"Cont.*Busy Time|Pow.*Cycles|Pow.*Duration|Unsafe.*|Media.*|"
r"Read.*|Write.*|Unmap.*|Checksum.*|Err.*Entries|Avail.*|"
r"Dev.*Reli.*|Vola.*):\s*([A-Za-z0-9]+)",
"storage_query_target_health":
r"[-]+\s+([a-z0-9-]+)\s+[-]+\s+|Devices\s+|UUID:([a-z0-9-]+)\s+"
r"Targets:\[([0-9 ]+)\]\s+Rank:(\d+)\s+State:(\w+)|"
r"(?:Read\s+Errors|Write\s+Errors|Unmap\s+Errors|Checksum\s+Errors|"
r"Error\s+Log\s+Entries|Media\s+Errors|Temperature|"
r"Available\s+Spare|Device\s+Reliability|Read\s+Only|"
r"Volatile\s+Memory\s+Backup):\s?([A-Za-z0-9- ]+)",
"storage_set_faulty":
r"[-]+\s+([a-z0-9-]+)\s+[-]+\s+|Devices\s+|(?:UUID:[a-z0-9-]+\s+"
r"Targets:\[[0-9 ]+\]\s+Rank:\d+\s+State:(\w+))",
"system_query":
r"(\d\s+([0-9a-fA-F-]+)\s+([0-9.]+)\s+[A-Za-z]+)",
"system_start":
r"(\d+|\[[0-9-,]+\])\s+([A-Za-z]+)\s+([A-Za-z]+)",
"system_stop":
r"(\d+|\[[0-9-,]+\])\s+([A-Za-z]+)\s+([A-Za-z]+)",
}
def network_scan(self, provider=None, all_devs=False):
"""Get the result of the dmg network scan command.
Args:
provider (str): name of network provider tied to the device
all_devs (bool, optional): Show all device info. Defaults to False.
Returns:
CmdResult: an avocado CmdResult object containing the dmg command
information, e.g. exit status, stdout, stderr, etc.
Raises:
CommandFailure: if the dmg storage scan command fails.
"""
return self._get_result(
("network", "scan"), provider=provider, all=all_devs)
def storage_scan(self, verbose=False):
# pylint: disable=pointless-string-statement
"""Get the result of the dmg storage scan command.
Args:
verbose (bool, optional): create verbose output. Defaults to False.
Returns:
dict: Values obtained from stdout in dictionary. Most of the values
are in list.
Raises:
CommandFailure: if the dmg storage scan command fails.
"""
self.result = self._get_result(("storage", "scan"), verbose=verbose)
# Sample dmg storage scan verbose output. Don't delete this sample
# because it helps to develop and debug the regex.
"""
--------
wolf-130
--------
SCM Namespace Socket ID Capacity
------------- --------- --------
pmem0 0 3.2 TB
pmem1 0 3.2 TB
NVMe PCI Model FW Revision Socket ID Capacity
-------- ----- ----------- --------- --------
0000:5e:00.0 INTEL SSDPE2KE016T8 VDV10170 0 1.6 TB
0000:5f:00.0 INTEL SSDPE2KE016T8 VDV10170 0 1.6 TB
0000:81:00.0 INTEL SSDPED1K750GA E2010475 1 750 GB
0000:da:00.0 INTEL SSDPED1K750GA E2010475 1 750 GB
"""
# Sample dmg storage scan output. Don't delete this sample because it
# helps to develop and debug the regex.
"""
Hosts SCM Total NVMe Total
----- --------- ----------
wolf-130 6.4 TB (2 namespaces) 4.7 TB (4 controllers)
"""
data = {}
if verbose:
vals = re.findall(
r"--------\n([a-z0-9-]+)\n--------|"
r"\n([a-z0-9_]+)[ ]+([\d]+)[ ]+([\d.]+) ([A-Z]+)|"
r"([a-f0-9]+:[a-f0-9]+:[a-f0-9]+.[a-f0-9]+)[ ]+"
r"(\S+)[ ]+(\S+)[ ]+(\S+)[ ]+(\d+)[ ]+([\d.]+)"
r"[ ]+([A-Z]+)[ ]*\n", self.result.stdout)
data = {}
host = vals[0][0]
data[host] = {}
data[host]["scm"] = {}
i = 1
while i < len(vals):
if vals[i][1] == "":
break
pmem_name = vals[i][1]
socket_id = vals[i][2]
capacity = "{} {}".format(vals[i][3], vals[i][4])
data[host]["scm"][pmem_name] = {}
data[host]["scm"][pmem_name]["socket"] = socket_id
data[host]["scm"][pmem_name]["capacity"] = capacity
i += 1
data[host]["nvme"] = {}
while i < len(vals):
pci_addr = vals[i][5]
model = "{} {}".format(vals[i][6], vals[i][7])
fw_revision = vals[i][8]
socket_id = vals[i][9]
capacity = "{} {}".format(vals[i][10], vals[i][11])
data[host]["nvme"][pci_addr] = {}
data[host]["nvme"][pci_addr]["model"] = model
data[host]["nvme"][pci_addr]["fw_revision"] = fw_revision
data[host]["nvme"][pci_addr]["socket"] = socket_id
data[host]["nvme"][pci_addr]["capacity"] = capacity
i += 1
else:
vals = re.findall(
r"([a-z0-9-\[\]]+)\s+([\d.]+)\s+([A-Z]+)\s+"
r"\(([\w\s]+)\)\s+([\d.]+)\s+([A-Z]+)\s+\(([\w\s]+)",
self.result.stdout)
self.log.info("--- Non-verbose output parse result ---")
self.log.info(vals)
data = {}
for row in vals:
host = row[0]
data[host] = {
"scm": {"capacity": None, "details": None},
"nvme": {"capacity": None, "details": None}}
data[host]["scm"]["capacity"] = " ".join(row[1:3])
data[host]["scm"]["details"] = row[3]
data[host]["nvme"]["capacity"] = " ".join(row[4:6])
data[host]["nvme"]["details"] = row[6]
return data
def storage_format(self, reformat=False):
"""Get the result of the dmg storage format command.
Args:
reformat (bool): always reformat storage, could be destructive.
This will create control-plane related metadata i.e. superblock
file and reformat if the storage media is available and
formattable.
Returns:
CmdResult: an avocado CmdResult object containing the dmg command
information, e.g. exit status, stdout, stderr, etc.
Raises:
CommandFailure: if the dmg storage format command fails.
"""
return self._get_result(("storage", "format"), reformat=reformat)
def storage_prepare(self, user=None, hugepages="4096", nvme=False,
scm=False, reset=False, force=True):
"""Get the result of the dmg storage format command.
Returns:
CmdResult: an avocado CmdResult object containing the dmg command
information, e.g. exit status, stdout, stderr, etc.
Raises:
CommandFailure: if the dmg storage prepare command fails.
"""
kwargs = {
"nvme_only": nvme,
"scm_only": scm,
"target_user": getuser() if user is None else user,
"hugepages": hugepages,
"reset": reset,
"force": force
}
return self._get_result(("storage", "prepare"), **kwargs)
def storage_set_faulty(self, uuid, force=True):
"""Get the result of the 'dmg storage set nvme-faulty' command.
Args:
uuid (str): Device UUID to query.
force (bool, optional): Force setting device state to FAULTY.
Defaults to True.
"""
return self._get_result(
("storage", "set", "nvme-faulty"), uuid=uuid, force=force)
def storage_query_list_devices(self, rank=None, health=False):
"""Get the result of the 'dmg storage query list-devices' command.
Args:
rank (int, optional): Limit response to devices on this rank.
Defaults to None.
health (bool, optional): Include device health in response.
Defaults to false.
Returns:
CmdResult: an avocado CmdResult object containing the dmg command
information, e.g. exit status, stdout, stderr, etc.
Raises:
CommandFailure: if the dmg storage prepare command fails.
"""
return self._get_result(
("storage", "query", "list-devices"), rank=rank, health=health)
def storage_query_list_pools(self, uuid=None, rank=None, verbose=False):
"""Get the result of the 'dmg storage query list-pools' command.
Args:
uuid (str): Device UUID to query. Defaults to None.
rank (int, optional): Limit response to pools on this rank.
Defaults to None.
verbose (bool, optional): create verbose output. Defaults to False.
Returns:
CmdResult: an avocado CmdResult object containing the dmg command
information, e.g. exit status, stdout, stderr, etc.
Raises:
CommandFailure: if the dmg storage prepare command fails.
"""
return self._get_result(
("storage", "query", "list-pools"), uuid=uuid, rank=rank,
verbose=verbose)
def storage_query_device_health(self, uuid):
"""Get the result of the 'dmg storage query device-health' command.
Args:
uuid (str): Device UUID to query.
Returns:
CmdResult: an avocado CmdResult object containing the dmg command
information, e.g. exit status, stdout, stderr, etc.
Raises:
CommandFailure: if the dmg storage prepare command fails.
"""
return self._get_result(
("storage", "query", "device-health"), uuid=uuid)
def storage_query_target_health(self, rank, tgtid):
"""Get the result of the 'dmg storage query target-health' command.
Args:
rank (int): Rank hosting target.
tgtid (int): Target index to query.
Returns:
CmdResult: an avocado CmdResult object containing the dmg command
information, e.g. exit status, stdout, stderr, etc.
Raises:
CommandFailure: if the dmg storage prepare command fails.
"""
return self._get_result(
("storage", "query", "target-health"), rank=rank, tgtid=tgtid)
def storage_query_nvme_health(self):
"""Get the result of the 'dmg storage query nvme-health' command.
Returns:
CmdResult: an avocado CmdResult object containing the dmg command
information, e.g. exit status, stdout, stderr, etc.
Raises:
CommandFailure: if the dmg storage prepare command fails.
"""
return self._get_result(("storage", "query", "nvme-health"))
def pool_create(self, scm_size, uid=None, gid=None, nvme_size=None,
target_list=None, svcn=None, group=None, acl_file=None):
"""Create a pool with the dmg command.
The uid and gid method arguments can be specified as either an integer
or a string. If an integer value is specified it will be converted into
the corresponding user/group name string.
Args:
scm_size (int): SCM pool size to create.
uid (object, optional): User ID with privileges. Defaults to None.
gid (object, optional): Group ID with privileges. Defaults to None.
nvme_size (str, optional): NVMe size. Defaults to None.
target_list (list, optional): a list of storage server unique
identifiers (ranks) for the DAOS pool
svcn (str, optional): Number of pool service replicas. Defaults to
None, in which case 1 is used by the dmg binary in default.
group (str, optional): DAOS system group name in which to create the
pool. Defaults to None, in which case "daos_server" is used by
default.
acl_file (str, optional): ACL file. Defaults to None.
Raises:
CommandFailure: if the 'dmg pool create' command fails and
self.exit_status_exception is set to True.
Returns:
dict: a dictionary containing the 'uuid' and 'svc' of the new pool
successfully extracted form the dmg command result.
"""
kwargs = {
"user": getpwuid(uid).pw_name if isinstance(uid, int) else uid,
"group": getgrgid(gid).gr_name if isinstance(gid, int) else gid,
"scm_size": scm_size,
"nvme_size": nvme_size,
"nsvc": svcn,
"sys": group,
"acl_file": acl_file
}
if target_list is not None:
kwargs["ranks"] = ",".join([str(target) for target in target_list])
self._get_result(("pool", "create"), **kwargs)
# Extract the new pool UUID and SVC list from the command output
data = {}
match = re.findall(
r"UUID:\s+([A-Za-z0-9-]+),\s+Service replicas:\s+([A-Za-z0-9-]+)",
self.result.stdout)
if match:
data["uuid"] = match[0][0]
data["svc"] = match[0][1]
return data
def pool_query(self, pool):
"""Query a pool with the dmg command.
Args:
uuid (str): Pool UUID to query.
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the dmg pool query command fails.
"""
return self._get_result(("pool", "query"), pool=pool)
def pool_destroy(self, pool, force=True):
"""Destroy a pool with the dmg command.
Args:
pool (str): Pool UUID to destroy.
force (bool, optional): Force removal of pool. Defaults to True.
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the dmg pool destroy command fails.
"""
return self._get_result(("pool", "destroy"), pool=pool, force=force)
def pool_get_acl(self, pool):
"""Get the ACL for a given pool.
Args:
pool (str): Pool for which to get the ACL.
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the dmg pool get-acl command fails.
"""
return self._get_result(("pool", "get-acl"), pool=pool)
def pool_update_acl(self, pool, acl_file, entry):
"""Update the acl for a given pool.
Args:
pool (str): Pool for which to update the ACL.
acl_file (str): ACL file to update
entry (str): entry to be updated
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the dmg pool update-acl command fails.
"""
return self._get_result(
("pool", "update-acl"), pool=pool, acl_file=acl_file, entry=entry)
def pool_overwrite_acl(self, pool, acl_file):
"""Overwrite the acl for a given pool.
Args:
pool (str): Pool for which to overwrite the ACL.
acl_file (str): ACL file to update
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the dmg pool overwrite-acl command fails.
"""
return self._get_result(
("pool", "overwrite-acl"), pool=pool, acl_file=acl_file)
def pool_delete_acl(self, pool, principal):
"""Delete the acl for a given pool.
Args:
pool (str): Pool for which to delete the ACL.
principal (str): principal to be deleted
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the dmg pool delete-acl command fails.
"""
return self._get_result(
("pool", "delete-acl"), pool=pool, principal=principal)
def pool_list(self):
"""List pools.
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the dmg pool delete-acl command fails.
"""
return self._get_result(("pool", "list"))
def pool_set_prop(self, pool, name, value):
"""Set property for a given Pool.
Args:
pool (str): Pool uuid for which property is supposed
to be set.
name (str): Property name to be set
value (str): Property value to be set
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the dmg pool set-prop command fails.
"""
return self._get_result(
("pool", "set-prop"), pool=pool, name=name, value=value)
def pool_exclude(self, pool, rank, tgt_idx=None):
"""Exclude a daos_server from the pool.
Args:
pool (str): Pool uuid.
rank (int): Rank of the daos_server to exclude
tgt_idx (int): target to be excluded from the pool
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the dmg pool exclude command fails.
"""
return self._get_result(
("pool", "exclude"), pool=pool, rank=rank, tgt_idx=tgt_idx)
def pool_drain(self, pool, rank, tgt_idx=None):
"""Drain a daos_server from the pool
Args:
pool (str): Pool uuid.
rank (int): Rank of the daos_server to drain
tgt_idx (int): target to be excluded from the pool
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the dmg pool drain command fails.
"""
return self._get_result(
("pool", "drain"), pool=pool, rank=rank, tgt_idx=tgt_idx)
def pool_reintegrate(self, pool, rank, tgt_idx=None):
"""Reintegrate a daos_server to the pool.
Args:
pool (str): Pool uuid.
rank (int): Rank of the daos_server to reintegrate
tgt_idx (int): target to be reintegrated to the pool
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the dmg pool reintegrate command fails.
"""
return self._get_result(
("pool", "reintegrate"), pool=pool, rank=rank, tgt_idx=tgt_idx)
def system_query(self, rank=None, verbose=True):
"""Query system to obtain the status of the servers.
Args:
rank: Specify specific rank to obtain it's status
Defaults to None, which means report all available
ranks.
verbose (bool): To obtain detailed query report
Returns:
CmdResult: an avocado CmdResult object containing the dmg command
information, e.g. exit status, stdout, stderr, etc.
Raises:
CommandFailure: if the dmg storage prepare command fails.
"""
return self._get_result(("system", "query"), rank=rank, verbose=verbose)
def system_start(self):
"""Start the system.
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the dmg system start command fails.
"""
return self._get_result(("system", "start"))
def system_stop(self, force=False):
"""Stop the system.
Args:
force (bool, optional): whether to force the stop. Defaults to
False.
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the dmg system stop command fails.
"""
return self._get_result(("system", "stop"), force=force)
def check_system_query_status(stdout_str):
"""Check if any server crashed.
Args:
stdout_str (list): list obtained from 'dmg system query -v'
Returns:
bool: True if no server crashed, False otherwise.
"""
check = True
rank_info = []
failed_rank_list = []
# iterate to obtain failed rank list
for i, _ in enumerate(stdout_str):
rank_info.append(stdout_str[i][0])
print("rank_info: \n{}".format(rank_info))
for items in rank_info:
item = items.split()
if item[3] in ["Unknown", "Evicted", "Errored", "Unresponsive"]:
failed_rank_list.append(items)
# if failed rank list is not empty display the failed ranks
# and return False
if failed_rank_list:
for failed_list in failed_rank_list:
print("failed_list: {}\n".format(failed_list))
out = failed_list.split()
print("Rank {} failed with state '{}'".format(out[0], out[3]))
check = False
return check
def get_pool_uuid_service_replicas_from_stdout(stdout_str):
"""Get Pool UUID and Service replicas from stdout.
stdout_str is something like:
Active connections: [wolf-3:10001]
Creating DAOS pool with 100MB SCM and 0B NvMe storage (1.000 ratio)
Pool-create command SUCCEEDED: UUID: 9cf5be2d-083d-4f6b-9f3e-38d771ee313f,
Service replicas: 0
This method makes it easy to create a test.
Args:
stdout_str (str): Output of pool create command.
Returns:
Tuple (str, str): Tuple that contains two items; Pool UUID and Service
replicas if found. If not found, the tuple contains None.
"""
# Find the following with regex. One or more of whitespace after "UUID:"
# followed by one of more of number, alphabets, or -. Use parenthesis to
# get the returned value.
uuid = None
svc = None
match = re.search(r" UUID: (.+), Service replicas: (.+)", stdout_str)
if match:
uuid = match.group(1)
svc = match.group(2)
return uuid, svc
# ************************************************************************
# *** External usage should be replaced by DmgCommand.storage_format() ***
# ************************************************************************
def storage_format(path, hosts, insecure=True):
"""Execute format command through dmg tool to servers provided.
Args:
path (str): path to tool's binary
hosts (list): list of servers to run format on.
insecure (bool): toggle insecure mode
Returns:
Avocado CmdResult object that contains exit status, stdout information.
"""
# Create and setup the command
dmg = DmgCommand(path)
dmg.insecure.value = insecure
dmg.hostlist.value = hosts
try:
result = dmg.storage_format()
except CommandFailure as details:
print("<dmg> command failed: {}".format(details))
return None
return result
|
StarcoderdataPython
|
3315123
|
<reponame>Rafeen/Inventory-Management-and-POS
from rest_framework_json_api import serializers
from sales.models.client_sales_model import ClientSales
class ClientSalesSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = ClientSales
fields = ('sku', 'quantity', 'client', 'warehouse', 'sales_date')
|
StarcoderdataPython
|
138939
|
import copy
import itertools
from collections import deque
from typing import TextIO, Tuple
from aoc2019.intcode import Computer, read_program
def query_position(x: int, y: int, computer: Computer) -> bool:
computer = copy.deepcopy(computer)
computer.send_input(x)
computer.send_input(y)
computer.run()
return computer.get_output() == 1
def find_line(y: int, x_min: int, x_max: int, computer: Computer) -> Tuple[int, int]:
# First find start of the line:
offset = 0
while not query_position(x_min, y, computer):
offset += 1
x_min += 1
x_max += offset
while query_position(x_max, y, computer):
x_max += 1
x_max -= 1
return x_min, x_max
def part1(data: TextIO) -> int:
computer = Computer(read_program(data))
x_min, x_max = (0, 0)
total = 0
for y in range(50):
x_min, x_max = find_line(y, x_min, x_max, computer)
total += min(x_max, 49) - min(x_min, 50) + 1
return total
def part2(data: TextIO) -> int:
computer = Computer(read_program(data))
x_min, x_max = (0, 0)
lines = deque()
for y in itertools.count():
x_min, x_max = find_line(y, x_min, x_max, computer)
lines.append((x_min, x_max))
if len(lines) == 100:
x_top_min, x_top_max = lines.popleft()
if x_top_max - x_min + 1 < 100:
continue
return x_min * 10000 + y - 99
|
StarcoderdataPython
|
4837139
|
<reponame>Express50/CHARTextract
import numpy as np
def create_train_and_valid(ids, data, labels, train_percent=.6, randomizer=None):
"""Splits data in train and valid
Keyword Arguments:
train_percent {float} -- Ratio of train/total (default: {.6})
randomizer {np.random.RandomState} -- RandomState (default: {None})
Returns:
train ids{list} -- List of train ids
valid ids{list} -- List of valid ids
"""
if randomizer is None:
randomizer = np.random.RandomState()
x = randomizer.permutation(len(data))
train = np.sort(x[:int(len(x) * train_percent)])
valid = np.sort(x[int(len(x) * train_percent):])
dataset = {"train": {}, "valid": {}}
dataset["train"]["data"] = data[train]
dataset["train"]["labels"] = labels[train]
dataset["train"]["ids"] = ids[train]
dataset["valid"]["data"] = data[valid]
dataset["valid"]["labels"] = labels[valid]
dataset["valid"]["ids"] = ids[valid]
return dataset["train"], dataset["valid"]
def n_cross_validation_samples(ids, data, labels, n, train_percent=.6, train_num=None, random_seed=None):
if train_num is not None:
if train_num <= len(ids):
train_percent = train_num/len(ids)
else:
train_percent = 1
if random_seed is not None:
randomizer = np.random.RandomState(random_seed)
else:
randomizer = np.random.RandomState()
n_sample_data = []
for i in range(n):
cur_data = {"train": {"ids": [], "data": [], "labels": []}, "valid": {"ids": [], "data": [], "labels": []}}
cur_data["train"], cur_data["valid"]["ids"] = \
create_train_and_valid(ids, data, labels, train_percent, randomizer)
n_sample_data.append(cur_data)
return n_sample_data
|
StarcoderdataPython
|
3340902
|
import logging
import os
import shutil
import signal
from subprocess import Popen, PIPE, CalledProcessError
import sys
import tempfile
import time
from testify import *
from tron import cmd
# Used for getting the locations of the executables
_test_folder, _ = os.path.split(__file__)
_repo_root, _ = os.path.split(_test_folder)
log = logging.getLogger(__name__)
def wait_for_sandbox_success(func, start_delay=0.1, stop_at=5.0):
"""Call *func* repeatedly until it stops throwing TronSandboxException.
Wait increasing amounts from *start_delay* but wait no more than a total
of *stop_at* seconds
"""
delay = 0.1
total_time = 0.0
last_exception = None
while total_time < 5.0:
time.sleep(delay)
total_time += delay
try:
func()
return
except TronSandboxException, e:
delay *= 2
last_exception = e
raise last_exception
def handle_output(cmd, (stdout, stderr), returncode):
"""Log process output before it is parsed. Raise exception if exit code
is nonzero.
"""
if stdout:
log.info("%s: %r", cmd, stdout)
if stderr:
log.warning("%s: %r", cmd, stderr)
if returncode != 0:
raise CalledProcessError(returncode, "Command '%s' returned non-zero exit status"
" %d" % (cmd, returncode))
class TronSandboxException(Exception):
pass
class MockConfigOptions(object):
def __init__(self, server):
self.server = server
class TronSandbox(object):
def __init__(self):
super(TronSandbox, self).__init__()
"""Set up a temp directory and store paths to relevant binaries"""
# I had a really hard time not calling this function make_sandwich()
self.tmp_dir = tempfile.mkdtemp(prefix='tron-')
self.tron_bin = os.path.join(_repo_root, 'bin')
self.tronctl_bin = os.path.join(self.tron_bin, 'tronctl')
self.trond_bin = os.path.join(self.tron_bin, 'trond')
self.tronfig_bin = os.path.join(self.tron_bin, 'tronfig')
self.tronview_bin = os.path.join(self.tron_bin, 'tronview')
self.log_file = os.path.join(self.tmp_dir, 'tron.log')
self.pid_file = os.path.join(self.tmp_dir, 'tron.pid')
self.config_file = os.path.join(self.tmp_dir, 'tron_config.yaml')
self.port = 8089
self.host = 'localhost'
self.run_time = None
self.trond_debug_args = ['--working-dir=%s' % self.tmp_dir,
'--log-file=%s' % self.log_file,
'--pid-file=%s' % self.pid_file,
'--port=%d' % self.port,
'--host=%s' % self.host]
self.tron_server_address = '%s:%d' % (self.host, self.port)
self.tron_server_uri = 'http://%s' % self.tron_server_address
self.tron_server_arg = '--server=%s' % self.tron_server_address
# mock a config object
self.config_obj = MockConfigOptions(self.tron_server_uri)
cmd.save_config(self.config_obj)
self._last_trond_launch_args = []
def delete(self):
"""Delete the temp directory and its contents"""
if os.path.exists(self.pid_file):
self.stop_trond()
shutil.rmtree(self.tmp_dir)
self.tmp_dir = None
self.tron_bin = None
self.tronctl_bin = None
self.trond_bin = None
self.tronfig_bin = None
self.tronview_bin = None
self.tron_server_uri = None
def save_config(self, config_text):
"""Save a tron configuration to tron_config.yaml. Mainly useful for
setting trond's initial configuration.
"""
with open(self.config_file, 'w') as f:
f.write(config_text)
return config_text
### trond control ###
def start_trond(self, args=None):
"""Start trond"""
args = args or []
self._last_trond_launch_args = args
p = Popen([sys.executable, self.trond_bin] + self.trond_debug_args + args,
stdout=PIPE, stderr=PIPE)
handle_output(self.trond_bin, p.communicate(), p.returncode)
# make sure trond has actually launched
wait_for_sandbox_success(self.list_all)
# (but p.communicate() already waits for the process to exit... -Steve)
return p.wait()
def stop_trond(self):
"""Stop trond based on the tron.pid in the temp directory"""
with open(self.pid_file, 'r') as f:
os.kill(int(f.read()), signal.SIGKILL)
def restart_trond(self, args=None):
"""Stop and start trond"""
if args == None:
args = self._last_trond_launch_args
self.stop_tron()
self.start_tron(args=args)
### www API ###
def _check_call_api(self, uri, data=None):
cmd.load_config(self.config_obj)
status, content = cmd.request(self.tron_server_uri, uri, data=data)
if status != cmd.OK or not content:
raise TronSandboxException("Error connecting to tron server at %s%s" % (self.tron_server_uri, uri))
return content
def upload_config(self, config_text):
"""Upload a tron configuration to the server"""
self._check_call_api('/config', {'config': config_text})
def get_config(self):
"""Get the text of the current configuration"""
return self._check_call_api('/config')['config']
def ctl(self, command, arg=None, run_time=None):
"""Call the www API like tronctl does. ``command`` can be one of
``(start, cancel, disable, enable, disableall, enableall, fail, succeed)``.
``run_time`` should be of the form ``YYYY-MM-DD HH:MM:SS``.
"""
content = self._check_call_api('/')
data = {'command': command}
if run_time is not None:
data['run_time'] = run_time
if arg is not None:
job_to_uri = cmd.make_job_to_uri(content)
service_to_uri = cmd.make_service_to_uri(content)
full_uri = cmd.obj_spec_to_uri(arg, job_to_uri, service_to_uri)
else:
full_uri = '/jobs'
self._check_call_api(full_uri, data=data)
def list_all(self):
"""Call the www API to list jobs and services."""
return self._check_call_api('/')
def list_events(self):
"""Call the www API to list all events."""
return self._check_call_api('/events')
def list_job(self, job_name):
"""Call the www API to list all runs of one job."""
return self._check_call_api('/jobs/%s' % job_name)
def list_job_events(self, job_name):
"""Call the www API to list all events of one job."""
return self._check_call_api('/jobs/%s/_events' % job_name)
def list_job_run(self, job_name, run_number):
"""Call the www API to list all actions of one job run."""
return self._check_call_api('/jobs/%s/%d' % (job_name, run_number))
def list_job_run_events(self, job_name, run_number):
"""Call the www API to list all actions of one job run."""
return self._check_call_api('/jobs/%s/%d/_events' % (job_name, run_number))
def list_action_run(self, job_name, run_number, action_name, num_lines=100):
"""Call the www API to display the results of an action."""
return self._check_call_api('/jobs/%s/%d/%s?num_lines=%d' % (job_name, run_number, action_name, num_lines))
def list_service(self, service_name):
return self._check_call_api('/services/%s' % service_name)
def list_service_events(self, service_name):
return self._check_call_api('/services/%s/_events' % service_name)
### Basic subprocesses ###
def tronctl(self, args=None):
"""Call tronctl with args and return ``(stdout, stderr)``"""
args = args or []
p = Popen([sys.executable, self.tronctl_bin] + args, stdout=PIPE, stderr=PIPE)
retval = p.communicate()
handle_output(self.tronctl_bin, retval, p.returncode)
return retval
def tronview(self, args=None):
"""Call tronview with args and return ``(stdout, stderr)``"""
args = args or []
p = Popen([sys.executable, self.tronview_bin] + args, stdout=PIPE, stderr=PIPE)
retval = p.communicate()
handle_output(self.tronview_bin, retval, p.returncode)
# TODO: Something with return value
# return p.wait()
# (but p.communicate() already waits for the process to exit... -Steve)
return retval
|
StarcoderdataPython
|
103042
|
#!/usr/bin/env python3
# Copyright 2020 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
from cros.factory.gooftool import gbb
_TEST_DATA_PATH = os.path.join(os.path.dirname(__file__), 'testdata')
_TEST_GBB_PATH = os.path.join(_TEST_DATA_PATH, 'test_gbb')
class GBBTest(unittest.TestCase):
"""Unittest for gbb."""
def testGetData(self):
with open(_TEST_GBB_PATH, 'rb') as f:
gbb_content = gbb.UnpackGBB(f.read())
# HWID string
self.assertEqual(gbb_content.hwid.value, 'TEST HWID')
# SHA256 of HWID
self.assertEqual(
gbb_content.hwid_digest.value,
'846045dcb414d8ae984aa9c78c024b398b340d63afc85870606a3236a5459cfe')
# rootkey
self.assertEqual(gbb_content.rootkey.value, b'\xa5' * 4096)
# recovery key
self.assertEqual(gbb_content.recovery_key.value, b'\x5a' * 4096)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
1678822
|
<gh_stars>10-100
import re
from unittest.mock import patch
from ..twitter.generate_tweets_feed import (
format_tweet,
generate_twitter_feed,
generate_xml,
)
from .data import (
empty_feed,
formatted_tweet_1,
formatted_tweet_2,
invalid_param as param,
tweet_1,
tweet_1_feed,
tweet_2,
tweet_100_feed,
)
from .utils import ToDotNotation, TwitterApi
def test_format_tweet():
assert format_tweet(ToDotNotation(tweet_1)) == formatted_tweet_1
assert format_tweet(ToDotNotation(tweet_2)) == formatted_tweet_2
@patch('tweepy.Cursor')
def test_tweetfeed_empty(fake_tweepy_no_tweets):
val = generate_twitter_feed(None, 'test', param)
# remove date
val = re.sub(
r'(<lastBuildDate>)(.*)(</lastBuildDate>)',
'<lastBuildDate></lastBuildDate>',
val,
)
assert val == empty_feed
@patch('tweepy.Cursor')
def test_tweetfeed_retweet(get_mock, fake_tweepy_retweet):
get_mock.return_value = fake_tweepy_retweet.return_value
val = generate_twitter_feed(None, 'test', param)
# remove date
val = re.sub(
r'(<lastBuildDate>)(.*)(</lastBuildDate>)',
'<lastBuildDate></lastBuildDate>',
val,
)
assert val == empty_feed
@patch('tweepy.Cursor')
def test_tweetfeed_ok(get_mock, fake_tweepy_ok):
get_mock.return_value = fake_tweepy_ok.return_value
val = generate_twitter_feed(None, 'test', param)
# remove date
val = re.sub(
r'(<lastBuildDate>)(.*)(</lastBuildDate>)',
'<lastBuildDate></lastBuildDate>',
val,
)
assert val == tweet_1_feed
@patch('tweepy.Cursor')
def test_tweetfeed_limit_ok(get_mock, fake_tweepy_200_ok):
get_mock.return_value = fake_tweepy_200_ok.return_value
val = generate_twitter_feed(None, 'test', param)
# remove date
val = re.sub(
r'(<lastBuildDate>)(.*)(</lastBuildDate>)',
'<lastBuildDate></lastBuildDate>',
val,
)
assert val == tweet_100_feed
@patch('tweepy.Cursor')
def test_tweetfeed_limit_with_retweet_ok(get_mock, fake_tweepy_220_ok):
get_mock.return_value = fake_tweepy_220_ok.return_value
val = generate_twitter_feed(None, 'test', param)
# remove date
val = re.sub(
r'(<lastBuildDate>)(.*)(</lastBuildDate>)',
'<lastBuildDate></lastBuildDate>',
val,
)
assert val == tweet_100_feed
@patch('tweepy.Cursor')
def test_generate_xml_ok(get_mock, fake_tweepy_ok):
get_mock.return_value = fake_tweepy_ok.return_value
val, code = generate_xml(TwitterApi(), 'test', param)
# remove date
val = re.sub(
r'(<lastBuildDate>)(.*)(</lastBuildDate>)',
'<lastBuildDate></lastBuildDate>',
val,
)
assert val == tweet_1_feed
assert code == 200
def test_generate_xml_no_api():
val, code = generate_xml(None, 'test', param)
assert val == 'error - Twitter parameters not defined'
assert code == 401
|
StarcoderdataPython
|
3246230
|
<gh_stars>10-100
from richkit.retrieve import dns
import unittest
class DNSTestCase(unittest.TestCase):
# Since A record change every time, just checking whether we are retrieving a record or not
def setUp(self):
self.test_urls = ["www.google.co.uk", "www.cloudflare.com", "www.intranet.es.aau.dk"]
self.test_ips = ["8.8.8.8", "8.8.4.4", "1.1.1.1"]
def test_a_record(self):
for url in self.test_urls:
instance = dns.get_a_record(url)
self.assertIsNot(instance[0], None)
# Since PTR record change every time, just checking whether we are retrieving a record or not
def test_ptr_record(self):
for url in self.test_ips:
instance = dns.get_ptr_record(url)
self.assertIsNot(instance[0], None)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
1665017
|
from googleapiclient import discovery
import json
class Perspective:
"""
Intialize the class with your API_KEY, Required Languages and Required Attributes and their respective thresholds in **kwargs
You can also setup the Developer Name.
** Accepted attributes **
attributes = ["TOXICITY", "SEVERE_TOXICITY", "IDENTITY_ATTACK", "INSULT",
"PROFANITY", "THREAT", "SEXUALLY_EXPLICIT", "FLIRTATION", "SPAM",
"ATTACK_ON_AUTHOR", "ATTACK_ON_COMMENTER", "INCOHERENT",
"INFLAMMATORY", "OBSCENE", "SPAM", "UNSUBSTANTIAL"]
** Accepted Languages **
lang = ["en", "fr", "es", "de", "it", "pt", "ru"]
For more details on the API refer to the Perspective API Documentaion
https://developers.perspectiveapi.com/s/about-the-api-attributes-and-languages
** Available Methods **
analyzeText() Method returns an object containing attributes that are booleans (either True or False) .If the returned summary score falls beyond the thresshold specified it return True else False.
getTextReport() Method returns an object containing probability scores for all the attributes that was passed during the initialization.
"""
def __init__(self,
API_KEY: str,
developer_name: str = "perspectiveDeveloper",
lang: list = ['en'],
**kwargs):
self.API_KEY = API_KEY
self.lang = lang
self.attributeThresholds = {}
for key, value in kwargs.items():
self.attributeThresholds[key] = value
self.baseUrl = "https://commentanalyzer.googleapis.com/$discovery/rest?version=v1alpha1"
self.requestedAttributes = {}
self.analyzer = discovery.build(
"commentanalyzer",
"v1alpha1",
developerKey=self.API_KEY,
discoveryServiceUrl=self.baseUrl,
static_discovery=False,
)
def getTextReport(self, text: str) -> dict:
for key in self.attributeThresholds:
self.requestedAttributes[key] = {}
analyze_request = {
'comment': {
'text': text
},
'languages': self.lang,
'requestedAttributes': self.requestedAttributes
}
res = self.analyzer.comments().analyze(body=analyze_request).execute()
formatted_res = json.dumps(res, indent=2)
return formatted_res
def analyzeText(self, text: str) -> dict:
for key in self.attributeThresholds:
self.requestedAttributes[key] = {}
analyze_request = {
'comment': {
'text': text
},
'languages': self.lang,
'requestedAttributes': self.requestedAttributes
}
res = self.analyzer.comments().analyze(body=analyze_request).execute()
data = {}
for key in res['attributeScores']:
data[key] = res['attributeScores'][key]['summaryScore'][
'value'] > self.attributeThresholds[key]
return data
|
StarcoderdataPython
|
4838698
|
<filename>tests/ops/text/clean/test_change_case.py
import pytest
from jange.ops.text.clean import CaseChangeOperation, lowercase, uppercase
from jange.stream import DataStream
@pytest.mark.parametrize(
"fn,expected_mode", [(uppercase, "upper"), (lowercase, "lower")]
)
def test_helper_fns_returns_correct_operation(fn, expected_mode):
op: CaseChangeOperation = fn()
assert isinstance(op, CaseChangeOperation)
assert op.mode == expected_mode
def test_raises_error_when_invalid_mode():
with pytest.raises(ValueError):
CaseChangeOperation(mode="invalid mode")
@pytest.fixture
def ds():
return DataStream(items=["Aa", "bbB"])
@pytest.mark.parametrize(
"mode,expected",
[("upper", ["AA", "BBB"]), ("lower", ["aa", "bbb"]), ("capitalize", ["Aa", "Bbb"])],
)
def test_correctly_changes_cases(ds: DataStream, mode: str, expected: list):
op = CaseChangeOperation(mode=mode)
assert list(ds.apply(op)) == expected
|
StarcoderdataPython
|
1756003
|
import argparse
import os
import torch
class AbsPoseConfig:
def __init__(self):
description = 'Absolute Pose Regression'
parser = argparse.ArgumentParser(description=description)
self.parser = parser
# Add different groups for arguments
prog_group = parser.add_argument_group('General Program Config')
data_group = parser.add_argument_group('Data Loading Config', 'Options regarding image loading and preprocessing')
model_group = parser.add_argument_group('Model Config', 'Options regarding network model and optimization')
visdom_group = parser.add_argument_group('Visdom Config', 'Options regarding visdom server for visualization')
# Program general settings
prog_group.add_argument('--test', action='store_false', dest='training', help='set program to a testing phase')
prog_group.add_argument('--train', action='store_true', dest='training', help='set program to a training phase')
prog_group.add_argument('--validate', '-val', metavar='%d[N]', type=int, nargs=1,
help='evaluate model every N epochs during training')
prog_group.add_argument('--pretrained', metavar='%s', type=str, nargs=1, default=None,
help='the pre-trained weights to initialize the model(default: %(default)s)')
prog_group.add_argument('--resume', metavar='%s', type=str, nargs=1, default=None,
help='the checkpoint file to reload(default: %(default)s)')
prog_group.add_argument('--seed', '-s', metavar='%d', type=int, default=1,
help='seed for randomization(default: %(default)s)')
prog_group.add_argument('--odir', '-o', metavar='%s', type=str, required=True,
help='directory for program outputs')
prog_group.add_argument('--gpu', metavar='%d', type=int, default=0,
help='gpu device to use(cpu used if no available gpu)(default: %(default)s)')
# Data loading and preprocess
data_group.add_argument('--data_root', '-root', metavar='%s', type=str, default='data',
help='the root directory containing target datasets(default: %(default)s)' )
data_group.add_argument('--dataset', '-ds', metavar='%s', type=str, required=True,
help='the target dataset under data root' )
data_group.add_argument('--pose_txt', metavar='%s', default='dataset_train.txt',
help='the file to load pose labels(default: %(default)s)')
data_group.add_argument('--val_pose_txt', metavar='%s', type=str, default='dataset_test.txt',
help='the file to load validation image pose labels(default: %(default)s)')
data_group.add_argument('--batch_size', '-b', metavar='%d', type=int, default=75,
help='batch size to load the image data(default: %(default)s)')
data_group.add_argument('--num_workers', '-n', metavar='%d', type=int, default=0,
help='number of subprocesses for data loading(default: %(default)s)')
data_group.add_argument('--image_mean', '-imean', metavar='%s', type=str, default=None,
help='path of image_mean file name relative to the dataset path(default: %(default)s)')
data_group.add_argument('--rescale', '-rs', metavar='%d', type=int, default=256,
help='target size (the shorter edge) after rescaling(default: %(default)s)')
data_group.add_argument('--crop', '-c', metavar='%d', type=int, default=224,
help='random/center crop image to square with given size during train/test(default: %(default)s)')
data_group.add_argument('--normalize', '-norm', action='store_true', help='normalize image values with imagenet mean&std')
# Model training loss
model_group.add_argument('--beta', metavar='%s', type=int, default=1,
help='scaling factor before the orientation loss term(default: %(default)s)')
model_group.add_argument('--learn_weighting', action='store_true',
help='learn the weighting factor during training')
model_group.add_argument('--homo_init', metavar=('%f[Sx]', '%f[Sq]'), type=float, nargs=2, default=[0.0, -3.0],
help='initial guess for homoscedastic uncertainties variables(default: %(default)s)')
model_group.add_argument('--epochs', '-ep', metavar='%d', type=int, default=900,
help='number of training epochs(default: %(default)s)')
model_group.add_argument('--optim', type=str, default='Adam', choices=['Adam', 'SGD'],
help='specify optimizer(default: %(default)s)')
model_group.add_argument('--epsilon', '-eps', metavar='%f', type=float, default=1.0,
help='epsilon factor for Adam(default: %(default)s)')
model_group.add_argument('--momentum', '-mom', metavar='%f', type=float, default=0.9,
help='momentum factor for SGD(default: %(default)s)')
model_group.add_argument('--lr_init', '-lr', metavar='%f', type=float, default=5e-3,
help='initial learning rate(default: %(default)s)')
model_group.add_argument('--lr_decay', '-lrd', metavar=('%f[decay factor]', '%d[step size]'), nargs=2, default=None,
help='learning rate decay factor and step(default: %(default)s)')
model_group.add_argument('--weight_decay', '-wd', metavar='%f', type=float, default=1e-4,
help='weight decay rate(default: %(default)s)')
# Visdom server setting for visualization
visdom_group.add_argument('--visenv', '-venv', metavar='%s', type=str, default=None,
help='the environment for visdom to save all data(default: %(default)s)')
visdom_group.add_argument('--viswin', '-vwin', metavar='%s', type=str, default=None,
help='the prefix appended to window title(default: %(default)s)')
visdom_group.add_argument('--visport', '-vp', type=int, default=9333,
help='the port where the visdom server is running(default: %(default)s)')
visdom_group.add_argument('--vishost', '-vh', type=str, default='localhost',
help='the hostname where the visdom server is running(default: %(default)s)')
model_group.add_argument('--network', type=str, choices=['PoseNet', 'PoseLSTM'], default='PoseNet',
help='network architecture to use(default: %(default)s)')
def parse(self):
config = self.parser.parse_args()
return config
if __name__ == '__main__':
conf = AbsPoseConfig().parse()
|
StarcoderdataPython
|
1705705
|
#!/usr/bin/env python3
# @Author: <NAME> <archer>
# @Date: 2019-07-15
# @Email: george raven community at pm dot me
# @Filename: mongo_handler.py
# @Last modified by: archer
# @Last modified time: 2019-08-16
# @License: Please see LICENSE in project root
from __future__ import print_function, absolute_import # python 2-3 compat
import os
import subprocess
import time
from pymongo import MongoClient, errors, database, command_cursor
import gridfs
import re
class Mongo(object):
"""Python2/3 compatible MongoDb utility wrapper.
This wrapper saves its state in an internal overridable dictionary
such that you can adapt it to your requirements, if you should need to do
something unique, the caveat being it becomes harder to read.
:param args: Dictionary of overides.
:param logger: Function address to print/ log to (default: print).
:type args: dictionary
:type logger: function address
:example: Mongo({"db_user_name": "someUsername",
"db_password": "<PASSWORD>"})
:example: Mongo()
"""
def __init__(self, args=None, logger=None):
"""Init class with defaults.
optionally accepts dictionary of default overides.
"""
args = args if args is not None else dict()
self.home = os.path.expanduser("~")
defaults = {
# generic options section
"db_user_name": "groot",
"db_password": "<PASSWORD>",
"db_config_path": None,
"db_intervention": False,
"db_authentication": "SCRAM-SHA-1",
"db_authentication_database": None,
"db_user_role": "readWrite",
"db_ip": "localhost",
"db_bind_ip": ["localhost"],
"db_name": "nemesyst",
"db_collection_name": "test",
"db_port": "27017",
"db_path": "db",
"db_log_path": "db" + "/log",
"db_log_name": "mongoLog",
"db_cursor_timeout": 600000,
"db_batch_size": 32,
"pylog": logger if logger is not None else print,
"db": None,
"db_pipeline": None,
"gfs": None,
# replica options section
"db_replica_set_name": None,
"db_replica_read_preference": "primary",
"db_replica_max_staleness": -1,
# tls options section
"db_tls": False,
"db_tls_ca_file": None,
"db_tls_certificate_key_file": None,
"db_tls_certificate_key_file_password": None,
"db_tls_crl_file": None,
}
self.args = self._mergeDicts(defaults, args)
# final adjustments to newly defined dictionary
self.args["db_path"] = os.path.abspath(self.args["db_path"])
self.args["db_log_path"] = os.path.abspath(self.args["db_log_path"])
__init__.__annotations__ = {"args": dict, "logger": print, "return": None}
def init(self, db_path=None, db_log_path=None, db_log_name=None,
db_config_path=None):
"""Initialise the database.
Includes ensuring db path and db log path exist and generating,
creating the DB files, and adding an authentication user.
All of this should be done on a localhost port so that the
unprotected database is never exposed.
:param db_path: Desired directory of MongoDB database files.
:param db_log_path: Desired directory of MongoDB log files.
:param db_log_name: Desired name of log file.
:param db_config_path: Config file to pass to MongoDB.
:type db_path: string
:type db_config_path: string
:type db_log_path: string
:type db_log_name: string
"""
db_path = db_path if db_path is not None else self.args["db_path"]
db_log_path = db_log_path if db_log_path is not None else \
self.args["db_log_path"]
db_log_name = db_log_name if db_log_name is not None else \
self.args["db_log_name"]
db_config_path = db_config_path if db_config_path is not None else \
self.args["db_config_path"]
self.stop()
time.sleep(2)
# create directories
subprocess.call([
"mkdir", "-p",
str(db_path),
str(db_log_path),
])
cli_args = [ # non authentication version of db start
"mongod",
"--bind_ip", "localhost",
"--port", self.args["db_port"],
"--dbpath", str(db_path),
"--logpath", str(os.path.join(db_log_path, db_log_name)),
"--quiet"
]
if(db_config_path is not None):
pass
cli_args += [
"--config", str(db_config_path)
]
self.args["pylog"]("Launching unauth db on localhost", cli_args)
# launch unauth db
subprocess.Popen(cli_args)
# wait for db to come up
time.sleep(2)
# connect to db in local scope
self._addUser()
# manual intervention if desired
if(self.args["db_intervention"]):
# INITIATING MANUAL SUPERPOWERS
self.login(db_ip="localhost")
# close the unauth db
self.stop()
init.__annotations__ = {"db_path": str, "db_log_path": str,
"db_log_name": str, "db_config_path": str,
"return": None}
def connect(self, db_ip=None, db_port=None, db_authentication=None,
db_authentication_database=None,
db_user_name=None, db_password=None, db_name=None,
db_replica_set_name=None, db_replica_read_preference=None,
db_replica_max_staleness=None, db_tls=None,
db_tls_ca_file=None, db_tls_certificate_key_file=None,
db_tls_certificate_key_file_password=None,
db_tls_crl_file=None,
db_collection_name=None):
"""Connect to a specific mongodb database.
This sets the internal db client which is neccessary to connect to
and use the associated database. Without it operations such as dump
into the database will fail. This is replica set capable.
:param db_ip: Database hostname or ip to connect to.
:param db_port: Database port to connect to.
:param db_authentication: The authentication method to use on db.
:param db_user_name: Username to use for authentication to db_name.
:param db_password: <PASSWORD> <PASSWORD> in database db_name.
:param db_name: The name of the database to connect to.
:param db_replica_set_name: Name of the replica set to connect to.
:param db_replica_read_preference: What rep type to prefer reads from.
:param db_replica_max_staleness: Max seconds behind is replica allowed.
:param db_tls: use TLS for db connection.
:param db_tls_certificate_key_file: Certificate and key file for tls.
:param db_tls_certificate_key_file_password: Cert and key file pass.
:param db_tls_crl_file: Certificate revocation list file path.
:param db_collection_name: GridFS collection to use.
:type db_ip: string
:type db_port: string
:type db_authentication: string
:type db_user_name: string
:type db_password: string
:type db_name: string
:type db_replica_set_name: string
:type db_replica_read_preference: string
:type db_replica_max_staleness: string
:type db_tls: bool
:type db_tls_certificate_key_file: string
:type db_tls_certificate_key_file_password: string
:type db_tls_crl_file: string
:type db_collection_name: string
:return: database client object
:rtype: pymongo.database.Database
"""
# ip
db_ip = db_ip if db_ip is not None else self.args["db_ip"]
# port
db_port = db_port if db_port is not None else self.args["db_port"]
# authentication mechanism name
db_authentication = db_authentication if db_authentication is not \
None else self.args["db_authentication"]
# authentication destination db name
db_authentication_database = db_authentication_database if\
db_authentication_database is not \
None else self.args["db_authentication_database"]
# username
db_user_name = db_user_name if db_user_name is not None else \
self.args["db_user_name"]
# password
db_password = <PASSWORD> if db_password is not None else \
self.args["db_password"]
# database name
db_name = db_name if db_name is not None else self.args["db_name"]
# replica set name
db_replica_set_name = db_replica_set_name if db_replica_set_name is \
not None else self.args["db_replica_set_name"]
# replica read preference
db_replica_read_preference = db_replica_read_preference if \
db_replica_read_preference is not None else \
self.args["db_replica_read_preference"]
# replica staleness
db_replica_max_staleness = db_replica_max_staleness if \
db_replica_max_staleness is not None else \
self.args["db_replica_max_staleness"]
# to use tls?
db_tls = db_tls if db_tls is not None else self.args["db_tls"]
# certificate authoritys certificate file
db_tls_ca_file = db_tls_ca_file if db_tls_ca_file is not None else \
self.args["db_tls_ca_file"]
# client certificate & key file
db_tls_certificate_key_file = db_tls_certificate_key_file if \
db_tls_certificate_key_file is not None else \
self.args["db_tls_certificate_key_file"]
# client certificate and key file password
db_tls_certificate_key_file_password = \
db_tls_certificate_key_file_password if \
db_tls_certificate_key_file_password is not None else \
self.args["db_tls_certificate_key_file_password"]
# tls revocation certificates file
db_tls_crl_file = db_tls_crl_file if db_tls_crl_file is not None else \
self.args["db_tls_crl_file"]
# collection name
db_collection_name = db_collection_name if db_collection_name is not \
None else self.args["db_collection_name"]
client_args = {}
client_args["host"] = ["{0}:{1}".format(str(db_ip), str(db_port))]
if (db_authentication is not None) and (db_authentication != ""):
# authentication
client_args["authMechanism"] = db_authentication
client_args["username"] = db_user_name
client_args["password"] = <PASSWORD>
client_args["authSource"] = db_authentication_database if \
db_authentication_database is not None else db_name
if (db_replica_set_name is not None):
# replica set
client_args["replicaset"] = db_replica_set_name
client_args["readPreference"] = db_replica_read_preference
client_args["maxStalenessSeconds"] = db_replica_max_staleness
if (db_tls is not None):
# tls
client_args["tls"] = db_tls # False
client_args["tlsCAFile"] = db_tls_ca_file # None
client_args["tlsCertificateKeyFile"] = db_tls_certificate_key_file
client_args["tlsCertificateKeyFilePassword"] = \
db_tls_certificate_key_file_password # None
client_args["tlsCRLFile"] = db_tls_crl_file # None
# TODO add these in next if user has them seperate
# client_args["ssl_certfile"] = None
# client_args["ssl_keyfile"] = None
client = MongoClient(**client_args)
db = client[db_name]
self.args["db"] = db
return db
connect.__annotations__ = {"db_ip": str,
"db_port": str,
"db_authentication": str,
"db_user_name": str,
"db_password": str,
"db_name": str,
"db_replica_set_name": str,
"db_collection_name": str,
"db_replica_read_preference": str,
"db_replica_max_staleness": str,
"db_tls": bool,
"db_tls_ca_file": str,
"db_tls_certificate_key_file": str,
"db_tls_certificate_key_file_password": str,
"db_tls_crl_file": str,
"return": database.Database}
def login(self, db_port=None, db_user_name=None, db_password=<PASSWORD>,
db_name=None, db_ip=None):
"""Log in to database, interrupt, and availiable via cli.
:param db_port: Database port to connect to.
:param db_user_name: Database user to authenticate as.
:param db_password: User password to authenticate with.
:param db_name: Database to authenticate to, the authentication db.
:param db_ip: Database ip to connect to.
:type db_port: string
:type db_user_name: string
:type db_password: string
:type db_name: string
:type db_ip: string
"""
db_port = db_port if db_port is not None else self.args["db_port"]
db_user_name = db_user_name if db_user_name is not None else \
self.args["db_user_name"]
db_password = <PASSWORD> if db_password is not None else \
self.args["db_password"]
db_name = db_name if db_name is not None else self.args["db_name"]
db_ip = db_ip if db_ip is not None else self.args["db_ip"]
loginArgs = [
"mongo",
"--port", str(db_port),
"-u", str(db_user_name),
"-p", str(db_password),
"--authenticationDatabase", str(db_name),
str(db_ip)
]
subprocess.call(loginArgs)
login.__annotations__ = {"db_port": str, "db_user_name": str,
"db_password": str, "db_name": str, "db_ip": str,
"return": None}
def start(self, db_ip=None, db_port=None, db_path=None, db_log_path=None,
db_log_name=None, db_cursor_timeout=None, db_config_path=None,
db_replica_set_name=None):
"""Launch an on machine database with authentication.
:param db_ip: List of IPs to accept connectiongs from.
:param db_port: Port desired for database.
:param db_path: Path to parent dir of database.
:param db_log_path: Path to parent dir of log files.
:param db_log_name: Desired base name for log files.
:param db_cursor_timeout: Set timeout time for unused cursors.
:param db_path: Config file path to pass to MongoDB.
:type db_ip: list
:type db_port: string
:type db_path: string
:type db_log_path: string
:type db_log_name: string
:type db_cursor_timeout: integer
:type db_config_path: string
:rtype: subprocess.Popen
:return: Subprocess of running MongoDB.
"""
db_bind_ip = db_ip if db_ip is not None else self.args["db_bind_ip"]
db_port = db_port if db_port is not None else self.args["db_port"]
db_path = db_path if db_path is not None else self.args["db_path"]
db_log_path = db_log_path if db_log_path is not None else \
self.args["db_log_path"]
db_log_name = db_log_name if db_log_name is not None else \
self.args["db_log_name"]
db_cursor_timeout = db_cursor_timeout if db_cursor_timeout is not \
None else self.args["db_cursor_timeout"]
db_replica_set_name = db_replica_set_name if db_replica_set_name is \
not None else self.args["db_replica_set_name"]
self.args["pylog"]("Starting mongodb: auth=",
str(self.args["db_authentication"]))
cli_args = [
"mongod",
"--bind_ip", ','.join(map(str, db_bind_ip)),
"--port", str(db_port),
"--dbpath", str(db_path),
"--logpath", str(os.path.join(db_log_path, db_log_name)),
"--setParameter", str("cursorTimeoutMillis=" +
str(db_cursor_timeout)),
"--auth",
"--quiet"
]
if(db_replica_set_name is not None):
cli_args += [
"--replSet", str(db_replica_set_name)
]
if(db_config_path is not None):
pass
cli_args += [
"--config", str(db_config_path)
]
time.sleep(2)
db_process = subprocess.Popen(cli_args)
time.sleep(2)
self.args["db_process"] = db_process
return db_process
start.__annotations__ = {"db_ip": str, "db_bind_ip": list,
"db_port": str, "db_path": str,
"db_log_path": str, "db_log_name": str,
"db_cursor_timeout": int,
"db_replica_set_name": str,
"db_config_path": str,
"return": subprocess.Popen}
def stop(self, db_path=None):
"""Stop a running local database.
:param db_path: The path to the database to shut down.
:type db_path: string
:return: Subprocess of database closer.
:rtype: subprocess.Popen
"""
db_path = db_path if db_path is not None else self.args["db_path"]
self.args["pylog"]("Shutting down MongoDB.")
process = subprocess.Popen(
["mongod",
"--dbpath", str(db_path),
"--shutdown"]
)
time.sleep(0.5)
return process
stop.__annotations__ = {"return": subprocess.Popen}
def _addUser(self):
"""Add a user with given permissions to the authentication database."""
local_mongourl = "mongodb://{0}:{1}/".format(
"localhost", self.args["db_port"])
# self.args["pylog"]("Adding mongodb user:",
# str(self.args["db_user_name"]),
# ", pass:",
# str(type(self.args["db_password"])),
# ", role:", str(self.args["db_user_role"]),
# ", authdb:", str(self.args["db_name"]))
debug_status = {
"mongodb-user:": self.args["db_user_name"],
"user-password": type(self.args["db_password"]),
"role": self.args["db_user_role"],
"authdb": self.args["db_name"],
"mongo-url": local_mongourl
}
self.args["pylog"]("Adding user:", debug_status)
client = MongoClient(local_mongourl)
db = client[self.args["db_name"]]
try:
if(self.args["db_user_role"] == "all"):
db.command("createUser",
self.args["db_user_name"],
pwd=self.args["db_password"],
roles=["readWrite", "dbAdmin"])
else:
db.command("createUser",
self.args["db_user_name"],
pwd=self.args["<PASSWORD>"],
roles=[self.args["db_user_role"]])
except errors.DuplicateKeyError: # it used to be a duplicate key error
self.args["pylog"](self.args["db_user_name"] + "@" +
self.args["db_name"],
"already exists skipping (DuplicateKeyError).")
except errors.OperationFailure as e:
# in a new version of pymongo if a user exists already it is now
# no longer a duplicate key error, so we have to split a genuine
# operation failure vs an already existing user which is fine.
# pacman -Q python-pymongo = 3.9.0-1 so this version breaks it
self.args["pylog"]("can not add user:",
str(self.args["db_user_name"]) + "@" +
str(self.args["db_name"]),
"ensure correct " +
"--db-user-name, and " +
"--db-password are being used.")
split_e = re.split('\W+', str(e))
if(split_e[-2] == "already") and (split_e[-1] == "exists"):
self.args["pylog"]("user already exists, skipping.")
else:
raise
_addUser.__annotations__ = {"return": None}
def debug(self):
"""Log function to help track the internal state of the class.
Simply logs working state of args dict.
"""
self.args["pylog"](self.args)
debug.__annotations__ = {"return": None}
def dump(self, db_collection_name, data, db=None):
"""Import data dictionary into database.
:param db_collection_name: Collection name to import into.
:param data: Data to import into database.
:param db: Database to import data into.
:type db_collection_name: string
:type data: dictionary
:type db: pymongo.database.Database
:example: dump(db_collection_name="test",
data={"subdict":{"hello": "world"}})
"""
db = db if db is not None else self.args["db"]
if isinstance(data, dict) and data:
db[str(db_collection_name)].insert_one(data)
elif isinstance(data, tuple) and data:
gfs = gridfs.GridFS(db, collection=db_collection_name)
gfs.put(data[1], **data[0])
dump.__annotations__ = {"db_collection_name": str, "data": dict,
"db": database.Database, "return": None}
def _mergeDicts(self, *dicts):
"""Given multiple dictionaries, merge together in order."""
result = {}
for dictionary in dicts:
result.update(dictionary) # merge each dictionary in order
return result
_mergeDicts.__annotations__ = {"dicts": dict, "return": dict}
def getCursor(self, db=None, db_pipeline=None, db_collection_name=None):
"""Use aggregate pipeline to get a data-cursor from the database.
This cursor is what mongodb provides to allow you to request the data
from the database in a manner you control, instead of just getting
a big dump from the database.
:param db_pipeline: Mongodb aggregate pipeline data to transform and
retrieve the data as you request.
:param db_collection_name: The collection name which we will pull data
from using the aggregate pipeline.
:param db: Database object to operate pipeline on.
:type db_pipeline: list of dicts
:type db_collection_name: str
:type db: pymongo.database.Database
:return: Command cursor to fetch the data with.
:rtype: pymongo.command_cursor.CommandCursor
"""
db_pipeline = db_pipeline if db_pipeline is not None else \
self.args["db_pipeline"]
db_collection_name = db_collection_name if db_collection_name is not \
None else self.args["db_collection_name"]
db = db if db is not None else self.args["db"]
# from string to pymongo.collection.Collection
db_collection = db[db_collection_name]
db_data_cursor = db_collection.aggregate(db_pipeline,
allowDiskUse=True)
self.args["db_data_cursor"] = db_data_cursor
return db_data_cursor
getCursor.__annotations__ = {"db_pipeline": list,
"db_collection_name": str,
"db": database.Database,
"return": command_cursor.CommandCursor}
def getBatches(self, db_batch_size=None, db_data_cursor=None):
"""Get database cursor data in batches.
:param db_batch_size: The number of items to return in a single round.
:param db_data_cursor: The cursor to use to retrieve data from db.
:type db_batch_size: integer
:type db_data_cursor: command_cursor.CommandCursor
:return: yields a list of items requested.
:rtype: list of dicts
:todo: desperateley needs a rewrite and correction of bug. Last value
always fails. I want this in a magic function too to make it easy.
"""
db_batch_size = db_batch_size if db_batch_size is not None else \
self.args["db_batch_size"]
db_data_cursor = db_data_cursor if db_data_cursor is not None else \
self.args["db_data_cursor"]
cursor = db_data_cursor
if(cursor is not None):
while(cursor.alive):
yield self._nextBatch(cursor, db_batch_size)
self.args["pylog"]("cursor is now dead.")
else:
self.args["pylog"]("Your cursor is None, please Mongo.connect()")
getBatches.__annotations__ = {"db_batch_size": int,
"db_data_cursor":
command_cursor.CommandCursor,
"return": list}
def getFiles(self, db_batch_size=None, db_data_cursor=None,
db_collection_name=None, db=None):
"""Get gridfs files from mongodb by id using cursor to .files.
:param db_batch_size: The number of items to return in a single round.
:param db_data_cursor: The cursor to use to retrieve data from db.
:param db_collection_name: The top level collecton name
not including .chunks or .files where gridfs is to operate.
:param db: Database object to operate pipeline on.
:type db_batch_size: integer
:type db_data_cursor: command_cursor.CommandCursor
:type db_collection_name: str
:type db: pymongo.database.Database
:return: yields a list of tuples containing (item requested, metadata).
"""
db_data_cursor = db_data_cursor if db_data_cursor is not None else \
self.args["db_data_cursor"]
db_batch_size = db_batch_size if db_batch_size is not None else \
self.args["db_batch_size"]
db_collection_name = db_collection_name if db_collection_name is not \
None else self.args["db_collection_name"]
db = db if db is not None else self.args["db"]
gfs = gridfs.GridFS(db, collection=db_collection_name)
for batch in self.getBatches(db_batch_size=db_batch_size,
db_data_cursor=db_data_cursor):
gridout_list = list(map(
lambda doc: {"gridout": gfs.get(doc["_id"]),
"_id": doc["_id"]}, batch))
# # equivalent for loop
# gridout_list = []
# for doc in batch:
# gridout_list.extend({"gridout": gfs.get(doc["_id"]),
# "_id": doc["_id"]})
yield gridout_list
getFiles.__annotations__ = {"db_batch_size": int,
"db_data_cursor": command_cursor.CommandCursor,
"db_collection_name": str,
"db": database.Database,
"return": list}
def _nextBatch(self, cursor, db_batch_size):
"""Return the very next batch in mongoDb cursor."""
batch = []
try:
while(len(batch) < db_batch_size):
# cursor.batch_size(0) # batch size not yet set
singleExample = cursor.next()
batch.append(singleExample)
except StopIteration:
pass # will eventually reach the end
return batch
def __setitem__(self, key, value):
"""Set a single arg or state by, (key, value)."""
self.args[key] = value
__setitem__.__annotations__ = {"key": str, "value": any, "return": None}
def __getitem__(self, key):
"""Get a single arg or state by, (key, value)."""
try:
return self.args[key]
except KeyError:
return None # does not exist is the same as None, gracefull catch
__getitem__.__annotations__ = {"key": str, "return": any}
def __delitem__(self, key):
"""Delete a single arg or state by, (key, value)."""
try:
del self.args[key]
except KeyError:
pass # job is not done but equivalent outcomes so will not error
__delitem__.__annotations__ = {"key": str, "return": None}
def __iter__(self):
"""Iterate through housed dictionary, for looping."""
raise NotImplementedError("iter() is not yet implemented")
# self.db.connect()
# cursor = self.db.getData(pipeline=self.getPipe(
# self.args["pipeline"]), db_collection_name=self.args["coll"])
#
# while(cursor.alive):
# try:
# yield self._nextBatch(cursor)
# except StopIteration:
# return
__iter__.__annotations__ = {"return": any}
def __len__(self):
"""Return the first order length of the dictionary."""
return len(self.args)
__len__.__annotations__ = {"return": int}
def _mongo_unit_test():
"""Unit test of MongoDB compat."""
import datetime
import pickle
# create Mongo object to use
db = Mongo({"test2": 2, "db_port": "65535"})
# testing magic functions
db["test2"] = 3 # set item
db["test2"] # get item
len(db) # len
del db["test2"] # del item
# output current state of Mongo
db.debug()
# stop any active databases already running at the db path location
db.stop()
# hold for 2 seconds to give the db time to start
time.sleep(2)
# attempt to initialise the database, as in create the database with users
db.init()
# hold to let the db to launch the now new unauthenticated db
time.sleep(2)
# start the authenticated db, you will now need a username password access
db.start()
# warm up time for new authentication db
time.sleep(2)
# create a connection to the database so we can do database operations
db.connect()
db.debug()
# import data into mongodb debug collection
db.dump(db_collection_name="test", data={
"string": "99",
"number": 99,
"binary": bin(99),
"subdict": {"hello": "world"},
"subarray": [{"hello": "worlds"}, {"hi": "jim"}],
"timedate": datetime.datetime.utcnow(),
})
# testing gridfs insert item into database
db.dump(db_collection_name="test", data=(
{"utctime": datetime.datetime.utcnow()},
b"some_test_string"
# pickle.dumps("some_test_string")
))
# log into the database so user can manually check data import
db.login()
# attempt to retrieve the data that exists in the collection as a cursor
c = db.getCursor(db_collection_name="test", db_pipeline=[{"$match": {}}])
# itetate through the data in batches to minimise requests
for dataBatch in db.getBatches(db_batch_size=32, db_data_cursor=c):
print("Returned number of documents:", len(dataBatch))
# define a pipeline to get the latest gridfs file in a given collection
fs_pipeline = [{'$sort': {'uploadDate': -1}},
{'$limit': 5},
{'$project': {'_id': 1}}]
# get a cursor to get us the ID of files we desire
fc = db.getCursor(db_collection_name="test.files", db_pipeline=fs_pipeline)
# use cursor and get files to collect our data in batches
for batch in db.getFiles(db_batch_size=2, db_data_cursor=fc):
for doc in batch:
# now read the gridout object
print(doc["gridout"].read())
# finally close out database
db.stop()
if(__name__ == "__main__"):
_mongo_unit_test()
|
StarcoderdataPython
|
79580
|
"""Factory classes for easily generating test objects."""
from .activation import Activation
from .annotation import Annotation
from .annotation_moderation import AnnotationModeration
from .auth_client import AuthClient, ConfidentialAuthClient
from .auth_ticket import AuthTicket
from .authz_code import AuthzCode
from .base import set_session
from .document import Document, DocumentMeta, DocumentURI
from .feature import Feature
from .flag import Flag
from .group import Group, OpenGroup, RestrictedGroup
from .group_scope import GroupScope
from .job import Job, SyncAnnotationJob
from .organization import Organization
from .setting import Setting
from .token import DeveloperToken, OAuth2Token
from .user import User
from .user_identity import UserIdentity
__all__ = (
"Activation",
"Annotation",
"AnnotationModeration",
"AuthClient",
"AuthTicket",
"AuthzCode",
"ConfidentialAuthClient",
"DeveloperToken",
"Document",
"DocumentMeta",
"DocumentURI",
"Feature",
"Flag",
"Group",
"GroupScope",
"Job",
"OAuth2Token",
"OpenGroup",
"Organization",
"RestrictedGroup",
"Setting",
"SyncAnnotationJob",
"User",
"UserIdentity",
"set_session",
)
|
StarcoderdataPython
|
1623146
|
<gh_stars>0
#master implementation for UDP Hole Punching
#run this on a reachable machine (like an AWS Instance or something)
import socket
import sys
server_listening_port = 3540
sockfd = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
sockfd.bind(("", server_listening_port))
print(f"listening on port {server_listening_port}")
client_requests = []
while True:
data, addr = sockfd.recvfrom(32)
client_requests.append(addr)
print(f"Connection from {addr}!")
if len(client_requests) == 2:
client_a_ip = client_requests[0][0]
client_a_port = client_requests[0][1]
client_b_ip = client_requests[1][0]
client_b_port = client_requests[1][1]
sockfd.sendto(f"{client_a_ip}:{client_a_port}".encode(),client_requests[1])
sockfd.sendto(f"{client_b_ip}:{client_b_port}".encode(),client_requests[0])
client_requests = []
sockfd.close()
|
StarcoderdataPython
|
3371160
|
import os
import argparse
import joblib
import numpy as np
from PIL import Image
from torchvision import transforms, datasets
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
from sklearn import metrics
from face_recognition import preprocessing, FaceFeaturesExtractor, FaceRecogniser
MODEL_DIR_PATH = 'model'
def parse_args():
parser = argparse.ArgumentParser(
description='Script for training Face Recognition model. You can either give path to dataset or provide path '
'to pre-generated embeddings, labels and class_to_idx. You can pre-generate this with '
'util/generate_embeddings.py script.')
parser.add_argument('-d', '--dataset-path', help='Path to folder with images.')
parser.add_argument('-e', '--embeddings-path', help='Path to file with embeddings.')
parser.add_argument('-l', '--labels-path', help='Path to file with labels.')
parser.add_argument('-c', '--class-to-idx-path', help='Path to pickled class_to_idx dict.')
parser.add_argument('--grid-search', action='store_true',
help='If this option is enabled, grid search will be performed to estimate C parameter of '
'Logistic Regression classifier. In order to use this option you have to have at least '
'3 examples of every class in your dataset. It is recommended to enable this option.')
return parser.parse_args()
def dataset_to_embeddings(dataset, features_extractor):
transform = transforms.Compose([
preprocessing.ExifOrientationNormalize(),
transforms.Resize(1024)
])
embeddings = []
labels = []
for img_path, label in dataset.samples:
print(img_path)
_, embedding = features_extractor(transform(Image.open(img_path).convert('RGB')))
if embedding is None:
print("Could not find face on {}".format(img_path))
continue
if embedding.shape[0] > 1:
print("Multiple faces detected for {}, taking one with highest probability".format(img_path))
embedding = embedding[0, :]
embeddings.append(embedding.flatten())
labels.append(label)
return np.stack(embeddings), labels
def load_data(args, features_extractor):
if args.embeddings_path:
return np.loadtxt(args.embeddings_path), \
np.loadtxt(args.labels_path, dtype='str').tolist(), \
joblib.load(args.class_to_idx_path)
dataset = datasets.ImageFolder(args.dataset_path)
embeddings, labels = dataset_to_embeddings(dataset, features_extractor)
return embeddings, labels, dataset.class_to_idx
def train(args, embeddings, labels):
softmax = LogisticRegression(solver='lbfgs', multi_class='multinomial', C=10, max_iter=10000)
if args.grid_search:
clf = GridSearchCV(
estimator=softmax,
param_grid={'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000]},
cv=3
)
else:
clf = softmax
clf.fit(embeddings, labels)
return clf.best_estimator_ if args.grid_search else clf
def main():
args = parse_args()
features_extractor = FaceFeaturesExtractor()
embeddings, labels, class_to_idx = load_data(args, features_extractor)
clf = train(args, embeddings, labels)
idx_to_class = {v: k for k, v in class_to_idx.items()}
target_names = map(lambda i: i[1], sorted(idx_to_class.items(), key=lambda i: i[0]))
print(metrics.classification_report(labels, clf.predict(embeddings), target_names=list(target_names)))
if not os.path.isdir(MODEL_DIR_PATH):
os.mkdir(MODEL_DIR_PATH)
model_path = os.path.join('model', 'face_recogniser.pkl')
joblib.dump(FaceRecogniser(features_extractor, clf, idx_to_class), model_path)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3200973
|
#!/usr/bin/env python
from __future__ import division
"""@package etddf
ROS interface script for delta tiering filter
Filter operates in ENU
steps: get this to at least launch by itself
verify it works in sim for static sonar (fast scan) & dynamic agent -> plot the error (associator, no sonar control)
check the controller works statically - may need a correction here
"""
import rospy
from etddf_minau.msg import MeasurementPackage, NetworkEstimate, AssetEstimate, Measurement
from etddf_minau.srv import GetMeasurementPackage
import numpy as np
import tf
np.set_printoptions(suppress=True)
from copy import deepcopy
from std_msgs.msg import Header
from geometry_msgs.msg import PoseWithCovariance, Pose, Point, Quaternion, Twist, Vector3, TwistWithCovariance, PoseWithCovarianceStamped
from nav_msgs.msg import Odometry
from minau.msg import SonarTargetList, SonarTarget
from cuprint.cuprint import CUPrint
from deltatier.kf_filter import KalmanFilter
class ETDDF_Node:
def __init__(self):
self.my_name = rospy.get_param("~my_name")
self.cuprint = CUPrint("{}/etddf".format(self.my_name))
self.blue_agent_names = rospy.get_param("~blue_team_names")
blue_positions = rospy.get_param("~blue_team_positions")
self.topside_name = rospy.get_param("~topside_name")
assert self.topside_name not in self.blue_agent_names
red_agent_name = rospy.get_param("~red_team_name")
self.update_times = []
self.red_agent_exists = red_agent_name != ""
if self.red_agent_exists:
self.red_agent_name = red_agent_name
self.red_agent_id = len(self.blue_agent_names)
self.use_strapdown = rospy.get_param("~use_strapdown")
self.do_correct_strapdown = rospy.get_param("~correct_strapdown")
self.correct_strapdown_next_seq = False
self.position_process_noise = rospy.get_param("~position_process_noise")
self.velocity_process_noise = rospy.get_param("~velocity_process_noise")
self.fast_ci = rospy.get_param("~fast_ci")
self.force_modem_pose = rospy.get_param("~force_modem_pose")
self.meas_variances = {}
self.meas_variances["sonar_range"] = rospy.get_param("~force_sonar_range_var")
self.meas_variances["sonar_az"] = rospy.get_param("~force_sonar_az_var")
self.meas_variances["modem_range"] = rospy.get_param("~force_modem_range_var")
self.meas_variances["modem_az"] = rospy.get_param("~force_modem_az_var")
known_position_uncertainty = rospy.get_param("~known_position_uncertainty")
unknown_position_uncertainty = rospy.get_param("~unknown_position_uncertainty")
self.is_deltatier = rospy.get_param("~is_deltatier")
if self.is_deltatier:
self.delta_multipliers = rospy.get_param("~delta_tiers")
self.delta_codebook_table = {"sonar_range" : rospy.get_param("~sonar_range_start_et_delta"),
"sonar_azimuth" : rospy.get_param("~sonar_az_start_et_delta")}
self.buffer_size = rospy.get_param("~buffer_space")
if self.is_deltatier:
rospy.Service('etddf/get_measurement_package', GetMeasurementPackage, self.get_meas_pkg_callback)
self.kf = KalmanFilter(blue_positions, [], self.red_agent_exists, self.is_deltatier, \
known_posititon_unc=known_position_uncertainty,\
unknown_agent_unc=unknown_position_uncertainty)
self.network_pub = rospy.Publisher("etddf/estimate/network", NetworkEstimate, queue_size=10)
self.asset_pub_dict = {}
for asset in self.blue_agent_names:
self.asset_pub_dict[asset] = rospy.Publisher("etddf/estimate/" + asset, Odometry, queue_size=10)
if self.red_agent_exists:
self.asset_pub_dict[self.red_agent_name] = rospy.Publisher("etddf/estimate/" + self.red_agent_name, Odometry, queue_size=10)
self.last_update_time = rospy.get_rostime()
# Modem & Measurement Packages
rospy.Subscriber("etddf/packages_in", MeasurementPackage, self.meas_pkg_callback, queue_size=1)
# Strapdown configuration
self.update_seq = 0
self.strapdown_correction_period = rospy.get_param("~strapdown_correction_period")
strap_topic = "odometry/filtered/odom"
rospy.Subscriber( strap_topic, Odometry, self.nav_filter_callback, queue_size=1)
self.intersection_pub = rospy.Publisher("set_pose", PoseWithCovarianceStamped, queue_size=1)
self.cuprint("Waiting for strapdown")
rospy.wait_for_message( strap_topic, Odometry)
self.cuprint("Strapdown found")
# Sonar Subscription
rospy.Subscriber("sonar_processing/target_list/associated", SonarTargetList, self.sonar_callback)
self.cuprint("Loaded")
def sonar_callback(self, msg):
# self.cuprint("Receiving sonar meas")
collecting_agent_id = self.blue_agent_names.index(self.my_name)
for st in msg.targets:
collected_agent_id = self.blue_agent_names.index( st.id )
range_meas = st.range_m
azimuth_meas = st.bearing_rad + self.last_orientation_rad
if self.meas_variances["sonar_range"] == -1:
R_range = st.range_variance
else:
R_range = self.meas_variances["sonar_range"]
if self.meas_variances["sonar_az"] == -1:
R_az = st.bearing_variance
else:
R_az = self.meas_variances["sonar_az"]
rounded_range_meas = round(range_meas, 1)
rounded_azimuth_meas = round(np.degrees(azimuth_meas),1)
# self.cuprint("{} r: {} az: {} (deg)".format(st.id, rounded_range_meas, rounded_azimuth_meas))
self.kf.filter_azimuth_tracked(azimuth_meas, R_az, collecting_agent_id, collected_agent_id)
self.kf.filter_range_tracked(range_meas, R_range, collecting_agent_id, collected_agent_id)
def nav_filter_callback(self, odom):
# Update at specified rate
t_now = rospy.get_rostime()
delta_t_ros = t_now - self.last_update_time
if delta_t_ros < rospy.Duration(1):
return
self.kf.propogate(self.position_process_noise, self.velocity_process_noise)
self.update_times.append(t_now)
# Update orientation
last_orientation_quat = odom.pose.pose.orientation
(r, p, y) = tf.transformations.euler_from_quaternion([last_orientation_quat.x, \
last_orientation_quat.y, last_orientation_quat.z, last_orientation_quat.w])
self.last_orientation_rad = y
orientation_cov = np.array(odom.pose.covariance).reshape(6,6)
if self.use_strapdown:
# last_orientation_dot = odom.twist.twist.angular
# last_orientation_dot_cov = np.array(odom.twist.covariance).reshape(6,6)
# Turn odom estimate into numpy
# Note the velocities are in the base_link frame --> Transform to odom frame # Assume zero pitch/roll
v_baselink = np.array([[odom.twist.twist.linear.x, odom.twist.twist.linear.y, odom.twist.twist.linear.z]]).T
rot_mat = np.array([ # base_link to odom frame
[np.cos(y), -np.sin(y), 0],
[np.sin(y), np.cos(y), 0],
[0, 0, 1]
])
v_odom = rot_mat.dot( v_baselink )
mean = np.array([[odom.pose.pose.position.x, odom.pose.pose.position.y, odom.pose.pose.position.z, \
v_odom[0,0], v_odom[1,0], v_odom[2,0]]]).T
cov_pose = np.array(odom.pose.covariance).reshape(6,6)
cov_twist = np.array(odom.twist.covariance).reshape(6,6)
cov = np.zeros((6,6))
cov[:3,:3] = cov_pose[:3,:3] #+ np.eye(3) * 4 #sim
cov[3:,3:] = rot_mat.dot( cov_twist[:3,:3] ).dot( rot_mat.T ) #+ np.eye(3) * 0.03 #sim
my_id = self.blue_agent_names.index(self.my_name)
x_nav, P_nav = self.kf.intersect_strapdown(mean, cov, my_id, fast_ci=False)
if self.do_correct_strapdown and (self.update_seq % self.strapdown_correction_period == 0):
if x_nav is not None and P_nav is not None:
self.correct_strapdown(odom.header, x_nav, P_nav, last_orientation_quat, orientation_cov)
elif self.correct_strapdown_next_seq:
self.correct_strapdown(odom.header, x_nav, P_nav, last_orientation_quat, orientation_cov)
self.correct_strapdown_next_seq = False
self.publish_estimates(t_now, last_orientation_quat, orientation_cov)
self.last_update_time = t_now
self.update_seq += 1
def correct_strapdown(self, header, x_nav, P_nav, orientation, orientation_cov):
msg = PoseWithCovarianceStamped()
msg.header = header
msg.header.frame_id = "odom"
# Transform
msg.pose.pose.position.x = x_nav[0,0]
msg.pose.pose.position.y = x_nav[1,0]
msg.pose.pose.position.z = x_nav[2,0]
msg.pose.pose.orientation = orientation
new_cov = np.zeros((6,6))
new_cov[:3,:3] = P_nav[:3,:3] # TODO add full cross correlations
new_cov[3:,3:] = orientation_cov[3:,3:]
msg.pose.covariance = list(new_cov.flatten())
self.intersection_pub.publish( msg )
def publish_estimates(self, timestamp, last_orientation_quat, orientation_cov):
ne = NetworkEstimate()
for asset in self.blue_agent_names:
ind = self.blue_agent_names.index(asset)
x_hat_agent, P_agent, _ = self.kf.get_agent_states(ind)
pose_cov = np.zeros((6,6))
pose_cov[:3,:3] = P_agent[:3,:3]
if asset == self.my_name:
pose = Pose(Point(x_hat_agent[0],x_hat_agent[1],x_hat_agent[2]),last_orientation_quat)
pose_cov[3:,3:] = orientation_cov[3:,3:]
elif "red" in asset:
pose_cov = 5*np.eye(6) # Just set single uncertainty
red_agent_depth = -0.7
pose = Pose(Point(x_hat_agent[0],x_hat_agent[1],red_agent_depth), Quaternion(0,0,0,1))
pose_cov[3:,3:] = -np.eye(3)
else:
pose = Pose(Point(x_hat_agent[0],x_hat_agent[1],x_hat_agent[2]), Quaternion(0,0,0,1))
pose_cov[3:,3:] = -np.eye(3)
pwc = PoseWithCovariance(pose, list(pose_cov.flatten()))
twist_cov = -np.eye(6)
twist_cov[:3,:3] = P_agent[3:6,3:6]
tw = Twist()
tw.linear = Vector3(x_hat_agent[3],x_hat_agent[4],x_hat_agent[5])
twc = TwistWithCovariance(tw, list(twist_cov.flatten()))
h = Header(self.update_seq, timestamp, "odom")
o = Odometry(h, "odom", pwc, twc)
ae = AssetEstimate(o, asset)
ne.assets.append(ae)
self.asset_pub_dict[asset].publish(o)
if self.red_agent_exists:
asset = self.red_agent_name
ind = self.blue_agent_names.index(asset)
x_hat_agent, P_agent = self.kf.get_agent_states(ind)
pose_cov[:3,:3] = P_agent[:3,:3]
red_agent_depth = -0.7
pose = Pose(Point(x_hat_agent[0],x_hat_agent[1],red_agent_depth), Quaternion(0,0,0,1))
pose_cov[3:,3:] = -np.eye(3)
pwc = PoseWithCovariance(pose, list(pose_cov.flatten()))
twist_cov = -np.eye((6,6))
twist_cov[:3,:3] = P_agent[3:6,3:6]
tw = Twist()
tw.linear = Vector3(x_hat_agent[3],x_hat_agent[4],x_hat_agent[5])
twc = TwistWithCovariance(tw, list(twist_cov.flatten()))
h = Header(self.update_seq, timestamp, "odom")
o = Odometry(h, "odom", pwc, twc)
ae = AssetEstimate(o, asset)
ne.assets.append(ae)
self.asset_pub_dict[asset].publish(o)
self.network_pub.publish(ne)
def meas_pkg_callback(self, msg):
# Modem Meas taken by topside
if msg.src_asset == self.topside_name:
self.cuprint("Receiving Surface Modem Measurements")
meas_indices = []
modem_loc = None
# Approximate all modem measurements as being taken at this time
for meas in msg.measurements:
if len(self.force_modem_pose) == 0:
modem_loc = meas.global_pose[:3]
modem_ori = meas.global_pose[3]
else:
modem_loc = self.force_modem_pose[:3]
modem_ori = np.radians(self.force_modem_pose[3])
# self.cuprint("Modem loc: {} Modem pose: {}".format(modem_loc, modem_ori))
# meas_index = min(range(len(self.update_times)), key=lambda i: abs( (self.update_times[i]-meas.stamp).to_sec() ))
meas_index = len(self.update_times) - 5
if meas_index < 0:
meas_index = 0
meas_indices.append(meas_index)
agent = meas.measured_asset
agent_id = self.blue_agent_names.index(agent)
# Approximate the fuse on the next update, so we can get other asset's position immediately
if meas.meas_type == "modem_elevation":
rospy.logerr("Ignoring Modem Elevation Measurement since we have depth measurements")
continue
elif meas.meas_type == "modem_azimuth" and agent != self.my_name:
meas.data += modem_ori
meas_value_rad = np.radians(meas.data)
R = self.meas_variances["modem_az"]
self.kf.filter_azimuth_from_untracked( meas_value_rad, R, modem_loc, agent_id, index=meas_index)
elif meas.meas_type == "modem_range":
BIAS = 0.5
agent = meas.measured_asset
R = self.meas_variances["modem_range"]
self.kf.filter_range_from_untracked( meas.data - BIAS, R, modem_loc, agent_id, index=meas_index)
if meas_indices: # we received measurements
min_index = min(meas_indices)
my_id = self.blue_agent_names.index(self.my_name)
self.kf.catch_up(min_index, modem_loc, self.position_process_noise, self.velocity_process_noise, my_id, fast_ci=False)
self.correct_strapdown_next_seq = True
elif self.is_deltatier:
raise NotImplementedError("DT is not supported yet")
# self.cuprint("receiving buffer")
# # Loop through buffer and see if we've found the red agent
# for i in range(len(msg.measurements)):
# if msg.measurements[i].measured_asset in self.red_asset_names and not self.red_asset_found:
# self.red_asset_found = True
# self.cuprint("Red asset measurement received!")
# implicit_cnt, explicit_cnt = self.filter.receive_buffer(msg.measurements, msg.delta_multiplier, msg.src_asset)
# implicit_cnt, explicit_cnt = self.filter.catch_up(msg.delta_multiplier, msg.measurements, self.Q, msg.all_measurements)
def get_meas_pkg_callback(self, req):
self.cuprint("pulling buffer")
self.update_lock.acquire()
delta, buffer = self.filter.pull_buffer()
self.update_lock.release()
mp = MeasurementPackage(buffer, buffer, self.my_name, delta)
self.cuprint("returning buffer")
return mp
if __name__ == "__main__":
rospy.init_node("etddf_node")
et_node = ETDDF_Node()
debug = False
if not debug:
rospy.spin()
else:
o = Odometry()
o.pose.pose.orientation.w = 1
et_node.use_strapdown = False
rospy.sleep(2)
t = rospy.get_rostime()
et_node.nav_filter_callback(o)
mp = MeasurementPackage()
m = Measurement("modem_range", t, "topside", "guppy", 6, 0, [], 0)
mp.measurements.append(m)
m = Measurement("modem_azimuth", t, "topside", "guppy", 45, 0, [],0)
mp.measurements.append(m)
mp.src_asset = "topside"
et_node.meas_pkg_callback(mp)
rospy.sleep(1)
et_node.kf._filter_artificial_depth(0.0)
et_node.nav_filter_callback(o)
stl = SonarTargetList()
st = SonarTarget()
st.id = "guppy"
st.bearing_rad = np.random.normal(0,0.01)
st.range_m = 5.0 + np.random.normal(0,0.1)
stl.targets.append(st)
et_node.sonar_callback(stl)
rospy.sleep(1)
et_node.nav_filter_callback(o)
|
StarcoderdataPython
|
3298403
|
<gh_stars>10-100
"""Tests for the strategy"""
from django.http import HttpRequest
from rest_framework.request import Request
from authentication.utils import load_drf_strategy
def test_strategy_init(mocker):
"""Test that the constructor works as expected"""
drf_request = mocker.Mock()
strategy = load_drf_strategy(request=drf_request)
assert strategy.drf_request == drf_request
assert strategy.request == drf_request._request # pylint: disable=protected-access
def test_strategy_request_data(mocker):
"""Tests that the strategy request_data correctly returns the DRF request data"""
drf_request = mocker.Mock()
strategy = load_drf_strategy(request=drf_request)
assert strategy.request_data() == drf_request.data
def test_strategy_clean_authenticate_args(mocker):
"""Tests that the strategy clean_authenticate_args moves the request to kwargs"""
# NOTE: don't pass this to load_drf_Strategy, it will error
drf_request = Request(mocker.Mock(spec=HttpRequest))
strategy = load_drf_strategy(mocker.Mock())
assert strategy.clean_authenticate_args(drf_request, 2, 3, kwarg1=1, kwarg2=2) == (
(2, 3),
{"request": drf_request, "kwarg1": 1, "kwarg2": 2},
)
|
StarcoderdataPython
|
57621
|
#import conf.bootstrap as config
#import conf.datakey as datakey
from .hashicorp_base import ConnBase
import consul
import os
import json
from ..utils.io import convert_yaml
from ..utils.logger import Logger
class ConsulCon(ConnBase):
"""Class to construct the dict properties for the app from Consul and Vault
"""
exception_key = ['path']
exception_dict = {}
cons = None
def __init__(self, params = None, exception_dict = None):
"""Constructor inisiating all properties
"""
ConnBase.__init__(self)
# if exception dict is known
if exception_dict:
self.exception_dict = exception_dict
# construct the consul and vault params
consul_params = self.get_configs_dict(self._content['consul'], self.exception_key) if not params else params
# construct the consul
self.cons = consul.Consul(**consul_params)
def get_kv(self, type = 'json'):
"""run config constructor return dict all configs
Keyword arguments :
type -- The type of the value text format
"""
type_enum = {
'json' : lambda x: json.loads(x.decode('utf-8')) if x else '',
'yaml' : lambda x: convert_yaml(x) if x else ''
}
temp = self.cons.kv.get(self.exception_dict['path'])[1]['Value']
result = type_enum[type](temp)
return result
|
StarcoderdataPython
|
76702
|
import tensorflow as tf
import numpy as np
def batches(l, n):
"""Yield successive n-sized batches from l, the last batch is the left indexes."""
for i in range(0, l, n):
yield range(i,min(l,i+n))
class Deep_Autoencoder(object):
def __init__(self, sess, input_dim_list=[7,64,64,7],transfer_function=tf.nn.relu,learning_rate=0.001):
"""input_dim_list must include the original data dimension"""
#assert len(input_dim_list) < 2
#raise ValueError(
# "Do you need more one layer!")
self.W_list = []
self.encoding_b_list = []
self.decoding_b_list = []
self.dim_list = input_dim_list
self.transfer = transfer_function
self.learning_rate=0.001
## Encoders parameters
for i in range(len(input_dim_list)-1):
init_max_value = 4*np.sqrt(6. / (self.dim_list[i] + self.dim_list[i+1]))
self.W_list.append(tf.Variable(tf.random_uniform([self.dim_list[i],self.dim_list[i+1]],
np.negative(init_max_value),init_max_value)))
self.encoding_b_list.append(tf.Variable(tf.random_uniform([self.dim_list[i+1]],-0.1,0.1)))
## Decoders parameters
for i in range(len(input_dim_list)-2,-1,-1):
self.decoding_b_list.append(tf.Variable(tf.random_uniform([self.dim_list[i]],-0.1,0.1)))
## Placeholder for input
self.input_x = tf.placeholder(tf.float32,[None,self.dim_list[0]])
## coding graph :
last_layer = self.input_x
for weight,bias in zip(self.W_list,self.encoding_b_list):
hidden = self.transfer(tf.matmul(last_layer,weight) + bias)
last_layer = hidden
self.hidden = hidden
## decode graph:
for weight,bias in zip(reversed(self.W_list),self.decoding_b_list):
hidden = self.transfer(tf.matmul(last_layer,tf.transpose(weight)) + bias)
last_layer = hidden
self.recon = last_layer
#self.cost = tf.reduce_mean(tf.square(self.input_x - self.recon))
self.cost =0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.recon, self.input_x), 2.0))
self.train_step = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.cost)
sess.run(tf.global_variables_initializer())
def fit(self, X, sess,iteration=100, batch_size=12, init=False,verbose=False):
assert X.shape[1] == self.dim_list[0]
if init:
sess.run(tf.global_variables_initializer())
sample_size = X.shape[0]
for i in range(iteration):
for one_batch in batches(sample_size, batch_size):
e,op=sess.run((self.cost,self.train_step),feed_dict = {self.input_x:X[one_batch]})
if verbose and i%20==0:
#e = self.cost.eval(session = sess,feed_dict = {self.input_x: X[one_batch]})
print(" iteration :", i ,", cost:", e)
def transform(self, X, sess):
return self.hidden.eval(session = sess, feed_dict={self.input_x: X})
def getRecon(self, X, sess):
return self.recon.eval(session = sess,feed_dict={self.input_x: X})
|
StarcoderdataPython
|
116413
|
from collections import namedtuple
class Config(namedtuple('Config', ['origin', 'api_key', 'maybe_logger'])):
"""Configuration for interacting with SLS API."""
__slots__ = ()
def __new__(cls, origin, api_key, maybe_logger=None):
return super(Config, cls).__new__(cls, origin, api_key,
maybe_logger=None)
def mk(origin, api_key, maybe_config=None):
return Config(origin, api_key, maybe_config)
|
StarcoderdataPython
|
144763
|
<gh_stars>1-10
from typing import Optional
from athenian.api.models.web.base_model_ import Model
from athenian.api.models.web.work_type import WorkType
class WorkTypePutRequest(Model):
"""Request body of `PUT /settings/work_type`."""
openapi_types = {"account": int, "work_type": WorkType}
attribute_map = {"account": "account", "work_type": "work_type"}
def __init__(self,
account: Optional[int] = None,
work_type: Optional[WorkType] = None):
"""WorkTypePutRequest - a model defined in OpenAPI
:param account: The account of this WorkTypePutRequest.
:param work_type: The work_type of this WorkTypePutRequest.
"""
self._account = account
self._work_type = work_type
@property
def account(self) -> int:
"""Gets the account of this WorkTypePutRequest.
Account ID.
:return: The account of this WorkTypePutRequest.
"""
return self._account
@account.setter
def account(self, account: int):
"""Sets the account of this WorkTypePutRequest.
Account ID.
:param account: The account of this WorkTypePutRequest.
"""
if account is None:
raise ValueError("Invalid value for `account`, must not be `None`")
self._account = account
@property
def work_type(self) -> WorkType:
"""Gets the work_type of this WorkTypePutRequest.
:return: The work_type of this WorkTypePutRequest.
"""
return self._work_type
@work_type.setter
def work_type(self, work_type: WorkType):
"""Sets the work_type of this WorkTypePutRequest.
:param work_type: The work_type of this WorkTypePutRequest.
"""
if work_type is None:
raise ValueError("Invalid value for `work_type`, must not be `None`")
self._work_type = work_type
|
StarcoderdataPython
|
1742622
|
import os
from pathlib import Path
import pytest
from commitizen import config, defaults, git
PYPROJECT = """
[tool.commitizen]
name = "cz_jira"
version = "1.0.0"
version_files = [
"commitizen/__version__.py",
"pyproject.toml"
]
style = [
["pointer", "reverse"],
["question", "underline"]
]
[tool.black]
line-length = 88
target-version = ['py36', 'py37', 'py38']
"""
_settings = {
"name": "cz_jira",
"version": "1.0.0",
"tag_format": None,
"bump_message": None,
"version_files": ["commitizen/__version__.py", "pyproject.toml"],
"style": [["pointer", "reverse"], ["question", "underline"]],
"changelog_file": "CHANGELOG.md",
"changelog_incremental": False,
"changelog_start_rev": None,
"update_changelog_on_bump": False,
}
_new_settings = {
"name": "cz_jira",
"version": "2.0.0",
"tag_format": None,
"bump_message": None,
"version_files": ["commitizen/__version__.py", "pyproject.toml"],
"style": [["pointer", "reverse"], ["question", "underline"]],
"changelog_file": "CHANGELOG.md",
"changelog_incremental": False,
"changelog_start_rev": None,
"update_changelog_on_bump": False,
}
_read_settings = {
"name": "cz_jira",
"version": "1.0.0",
"version_files": ["commitizen/__version__.py", "pyproject.toml"],
"style": [["pointer", "reverse"], ["question", "underline"]],
"changelog_file": "CHANGELOG.md",
}
@pytest.fixture
def config_files_manager(request, tmpdir):
with tmpdir.as_cwd():
filename = request.param
with open(filename, "w") as f:
if "toml" in filename:
f.write(PYPROJECT)
yield
def test_find_git_project_root(tmpdir):
assert git.find_git_project_root() == Path(os.getcwd())
with tmpdir.as_cwd() as _:
assert git.find_git_project_root() is None
@pytest.mark.parametrize(
"config_files_manager", defaults.config_files.copy(), indirect=True
)
def test_set_key(config_files_manager):
_conf = config.read_cfg()
_conf.set_key("version", "2.0.0")
cfg = config.read_cfg()
assert cfg.settings == _new_settings
class TestReadCfg:
@pytest.mark.parametrize(
"config_files_manager", defaults.config_files.copy(), indirect=True
)
def test_load_conf(_, config_files_manager):
cfg = config.read_cfg()
assert cfg.settings == _settings
def test_conf_returns_default_when_no_files(_, tmpdir):
with tmpdir.as_cwd():
cfg = config.read_cfg()
assert cfg.settings == defaults.DEFAULT_SETTINGS
def test_load_empty_pyproject_toml_and_cz_toml_with_config(_, tmpdir):
with tmpdir.as_cwd():
p = tmpdir.join("pyproject.toml")
p.write("")
p = tmpdir.join(".cz.toml")
p.write(PYPROJECT)
cfg = config.read_cfg()
assert cfg.settings == _settings
class TestTomlConfig:
def test_init_empty_config_content(self, tmpdir):
path = tmpdir.mkdir("commitizen").join(".cz.toml")
toml_config = config.TomlConfig(data="", path=path)
toml_config.init_empty_config_content()
with open(path, "r") as toml_file:
assert toml_file.read() == "[tool.commitizen]"
def test_init_empty_config_content_with_existing_content(self, tmpdir):
existing_content = "[tool.black]\n" "line-length = 88\n"
path = tmpdir.mkdir("commitizen").join(".cz.toml")
path.write(existing_content)
toml_config = config.TomlConfig(data="", path=path)
toml_config.init_empty_config_content()
with open(path, "r") as toml_file:
assert toml_file.read() == existing_content + "[tool.commitizen]"
|
StarcoderdataPython
|
3231432
|
from jnius import autoclass
import os
import numpy as np
import storage
File = autoclass('java.io.File')
Interpreter = autoclass('org.tensorflow.lite.Interpreter')
InterpreterOptions = autoclass('org.tensorflow.lite.Interpreter$Options')
Tensor = autoclass('org.tensorflow.lite.Tensor')
DataType = autoclass('org.tensorflow.lite.DataType')
TensorBuffer = autoclass('org.tensorflow.lite.support.tensorbuffer.TensorBuffer')
ByteBuffer = autoclass('java.nio.ByteBuffer')
class Diabetes:
def __init__(self):
self.load()
def load(self):
self.blood_sugar_model = self.build_blood_sugar_model()
self.tendency_model = self.build_tendency_model()
def build_blood_sugar_model(self):
class BloodSugar:
def __init__(self):
self.load()
def load(self, num_threads=None):
model = File(os.path.join(storage.model_dir_path, 'blood_sugar_model.tflite'))
options = InterpreterOptions()
if num_threads is not None:
options.setNumThreads(num_threads)
self.interpreter = Interpreter(model, options)
self.allocate_tensors()
def allocate_tensors(self):
self.interpreter.allocateTensors()
self.input_shape = self.interpreter.getInputTensor(0).shape()
self.output_shape = self.interpreter.getOutputTensor(0).shape()
self.output_type = self.interpreter.getOutputTensor(0).dataType()
def predict(self, values):
input = ByteBuffer.wrap(values.tobytes())
output = TensorBuffer.createFixedSize(self.output_shape, self.output_type)
self.interpreter.run(input, output.getBuffer().rewind())
return np.reshape(np.array(output.getFloatArray()), self.output_shape)
return BloodSugar()
def build_tendency_model(self):
class Tendency:
def __init__(self):
self.load()
def load(self, num_threads=None):
model = File(os.path.join(storage.model_dir_path, 'tendency_model.tflite'))
options = InterpreterOptions()
if num_threads is not None:
options.setNumThreads(num_threads)
self.interpreter = Interpreter(model, options)
self.allocate_tensors()
def allocate_tensors(self):
self.interpreter.allocateTensors()
self.input_shape = self.interpreter.getInputTensor(0).shape()
self.output_shape = self.interpreter.getOutputTensor(0).shape()
self.output_type = self.interpreter.getOutputTensor(0).dataType()
def predict(self, values):
input = ByteBuffer.wrap(values.tobytes())
output = TensorBuffer.createFixedSize(self.output_shape, self.output_type)
self.interpreter.run(input, output.getBuffer().rewind())
return np.reshape(np.array(output.getFloatArray()), self.output_shape)
return Tendency()
def normalize_input(self, input):
input[0] = input[0] / 400
input[2] = input[2] / 20
input[3] = input[3] / 10
input = np.expand_dims(np.array(input, dtype=np.float32), axis=0)
return input
def normalize_output(self, output):
output *= 500
return round(output)
def predict(self, values):
normalized_input = self.normalize_input(values)
predicted_blood_sugar = self.normalize_output(self.blood_sugar_model.predict(normalized_input[:]).item())
tendency_output = self.tendency_model.predict(normalized_input[:]).tolist()[0]
if tendency_output.index(max(tendency_output)) == 0:
predicted_tendency = -1
elif tendency_output.index(max(tendency_output)) == 1:
predicted_tendency = -0.5
elif tendency_output.index(max(tendency_output)) == 2:
predicted_tendency = 0
elif tendency_output.index(max(tendency_output)) == 3:
predicted_tendency = 0.5
elif tendency_output.index(max(tendency_output)) == 4:
predicted_tendency = 1
return [predicted_blood_sugar, predicted_tendency]
|
StarcoderdataPython
|
108293
|
import argparse
import ast
import collections
import configparser
import logging
import os
import sys
import typing
from abc import ABCMeta
from argparse import Action, ArgumentParser
from enum import Enum
from pathlib import Path
from types import MappingProxyType
from typing import (
Any, Callable, Dict, Iterable, Mapping, MutableMapping, NamedTuple,
Optional, Sequence, Set, Tuple, Type, TypeVar, Union,
)
T = TypeVar("T")
ConverterType = Callable[[str], Any]
NoneType = type(None)
UnionClass = Union[None, int].__class__
def read_config(
*paths: Union[str, Path], **kwargs: Any
) -> Tuple[Dict[str, Any], Tuple[Path, ...]]:
kwargs.setdefault("allow_no_value", True)
kwargs.setdefault("strict", False)
parser = configparser.ConfigParser(**kwargs)
filenames = list(
map(
lambda p: p.resolve(),
filter(
lambda p: p.is_file(),
map(lambda x: Path(x).expanduser(), paths),
),
),
)
config_paths = parser.read(filenames)
result: Dict[str, Union[str, Dict[str, str]]] = dict(
parser.items(parser.default_section, raw=True),
)
for section in parser.sections():
config = dict(parser.items(section, raw=True))
result[section] = config
return result, tuple(map(Path, config_paths))
class ConfigAction(Action):
def __init__(
self, option_strings: Sequence[str], dest: str,
search_paths: Iterable[Union[str, Path]] = (),
type: MappingProxyType = MappingProxyType({}), help: str = "",
required: bool = False, default: Any = None,
):
if not isinstance(type, MappingProxyType):
raise ValueError("type must be MappingProxyType")
super().__init__(
option_strings, dest, type=Path, help=help, default=default,
required=required,
)
self.search_paths = list(map(Path, search_paths))
self._result = None
def __call__(self, parser, namespace, values, option_string=None):
if not self._result:
filenames = list(self.search_paths)
if values:
filenames.insert(0, Path(values))
self._result, filenames = read_config(*filenames)
if self.required and not filenames:
raise argparse.ArgumentError(
argument=self,
message="is required but no one config loaded",
)
setattr(namespace, self.dest, MappingProxyType(self._result))
class Actions(str, Enum):
APPEND = "append"
APPEND_CONST = "append_const"
COUNT = "count"
HELP = "help"
PARSERS = "parsers"
STORE = "store"
STORE_CONST = "store_const"
STORE_FALSE = "store_false"
STORE_TRUE = "store_true"
VERSION = "version"
if sys.version_info >= (3, 8):
EXTEND = "extend"
@classmethod
def default(cls):
return cls.STORE
class Nargs(Enum):
ANY = None
ONE_OR_MORE = "+"
OPTIONAL = "?"
ZERO_OR_MORE = "*"
@classmethod
def default(cls):
return cls.ANY
def deep_getattr(name, attrs: Dict[str, Any], *bases: Type) -> Any:
if name in attrs:
return attrs[name]
for base in bases:
if hasattr(base, name):
return getattr(base, name)
raise KeyError(f"Key {name} was not declared")
def merge_annotations(
annotations: Dict[str, Any], *bases: Type
) -> Dict[str, Any]:
result: Dict[str, Any] = {}
for base in bases:
result.update(getattr(base, "__annotations__", {}))
result.update(annotations)
return result
class StoreMeta(type):
def __new__(mcs, name, bases, attrs: Dict[str, Any]):
annotations = merge_annotations(
attrs.get("__annotations__", {}), *bases
)
attrs["__annotations__"] = annotations
attrs["_fields"] = tuple(
filter(
lambda x: not x.startswith("_"),
annotations.keys(),
),
)
return super().__new__(mcs, name, bases, attrs)
class Store(metaclass=StoreMeta):
_default_value = object()
_fields: Tuple[str, ...]
def __new__(cls, **kwargs):
obj = super().__new__(cls)
type_map: Dict[str, Tuple[Type, Any]] = {}
for key, value in obj.__annotations__.items():
if key.startswith("_"):
continue
type_map[key] = (value, getattr(obj, key, cls._default_value))
for key, (value_type, default) in type_map.items():
if default is cls._default_value and key not in kwargs:
raise TypeError(f"required argument {key!r} must be passed")
value = kwargs.get(key, default)
setattr(obj, key, value)
return obj
def copy(self, **overrides):
kwargs = self.as_dict()
for key, value in overrides.items():
kwargs[key] = value
return self.__class__(**kwargs)
def as_dict(self) -> Dict[str, Any]:
# noinspection PyProtectedMember
return {
field: getattr(self, field) for field in self._fields
}
def __repr__(self) -> str:
values = ", ".join([
f"{k!s}={v!r}" for k, v in sorted(self.as_dict().items())
])
return f"<{self.__class__.__name__}: {values}>"
class ArgumentBase(Store):
def __init__(self, **kwargs):
self._values = collections.OrderedDict()
# noinspection PyUnresolvedReferences
for key in self._fields:
self._values[key] = kwargs.get(key, getattr(self.__class__, key))
def __getattr__(self, item):
try:
return self._values[item]
except KeyError as e:
raise AttributeError from e
@property
def is_positional(self) -> bool:
for alias in self.aliases:
if alias.startswith("-"):
return False
return True
def get_kwargs(self):
nargs = self.nargs
if isinstance(nargs, Nargs):
nargs = nargs.value
action = self.action
kwargs = self.as_dict()
if action in (Actions.STORE_TRUE, Actions.STORE_FALSE, Actions.COUNT):
kwargs.pop("type", None)
if isinstance(action, Actions):
action = action.value
kwargs.pop("aliases", None)
kwargs.pop("env_var", None)
kwargs.pop("converter", None)
kwargs.update(action=action, nargs=nargs)
return {k: v for k, v in kwargs.items() if v is not None}
class _Argument(ArgumentBase):
action: Union[Actions, Type[Action]] = Actions.default()
aliases: Iterable[str] = frozenset()
choices: Optional[Iterable[str]] = None
const: Optional[Any] = None
converter: Optional[ConverterType] = None
default: Optional[Any] = None
env_var: Optional[str] = None
help: Optional[str] = None
metavar: Optional[str] = None
nargs: Optional[Union[int, Nargs]] = Nargs.default()
required: Optional[bool] = None
type: Any = None
@property
def is_nargs(self) -> bool:
if self.nargs == Nargs.ANY:
return False
if isinstance(self.nargs, int):
return self.nargs > 1
return True
class _Config(_Argument):
""" Parse INI file and set results as a value """
action: Type[ConfigAction] = ConfigAction
search_paths: Optional[Iterable[Union[Path, str]]] = None
class AbstractGroup:
pass
class AbstractParser:
pass
TEXT_TRUE_VALUES = frozenset((
"y", "yes", "true", "t", "enable", "enabled", "1", "on",
))
def parse_bool(value: str) -> bool:
return value.lower() in TEXT_TRUE_VALUES
def unwrap_optional(typespec: Any) -> Optional[Any]:
if typespec.__class__ != UnionClass:
return None
union_args = [a for a in typespec.__args__ if a is not NoneType]
if len(union_args) != 1:
raise TypeError(
"Complex types mustn't be used in short form. You have to "
"specify argclass.Argument with converter or type function."
)
return union_args[0]
def _make_action_true_argument(
kind: typing.Type, default: Any = None,
) -> _Argument:
kw: Dict[str, Any] = {"type": kind}
if kind is bool:
if default is False:
kw["action"] = Actions.STORE_TRUE
elif default is True:
kw["action"] = Actions.STORE_FALSE
else:
raise TypeError(f"Can not set default {default!r} for bool")
elif kind == typing.Optional[bool]:
kw["action"] = Actions.STORE
kw["type"] = parse_bool
kw["default"] = None
return _Argument(**kw)
def _type_is_bool(kind: typing.Type) -> bool:
return kind is bool or kind == typing.Optional[bool]
class Meta(ABCMeta):
def __new__(mcs, name, bases, attrs: Dict[str, Any]):
annotations = merge_annotations(
attrs.get("__annotations__", {}), *bases
)
arguments = {}
argument_groups = {}
subparsers = {}
for key, kind in annotations.items():
if key.startswith("_"):
continue
try:
argument = deep_getattr(key, attrs, *bases)
except KeyError:
argument = None
if kind is bool:
argument = False
if not isinstance(
argument, (_Argument, AbstractGroup, AbstractParser),
):
attrs[key] = ...
is_required = argument is None
if _type_is_bool(kind):
argument = _make_action_true_argument(kind, argument)
else:
optional_type = unwrap_optional(kind)
if optional_type is not None:
is_required = False
kind = optional_type
argument = _Argument(
type=kind, default=argument, required=is_required,
)
if isinstance(argument, _Argument):
if argument.type is None and argument.converter is None:
if kind.__class__.__module__ == "typing":
kind = unwrap_optional(kind)
argument.default = None
argument.type = kind
arguments[key] = argument
elif isinstance(argument, AbstractGroup):
argument_groups[key] = argument
for key, value in attrs.items():
if key.startswith("_"):
continue
if isinstance(value, _Argument):
arguments[key] = value
elif isinstance(value, AbstractGroup):
argument_groups[key] = value
elif isinstance(value, AbstractParser):
subparsers[key] = value
attrs["__arguments__"] = MappingProxyType(arguments)
attrs["__argument_groups__"] = MappingProxyType(argument_groups)
attrs["__subparsers__"] = MappingProxyType(subparsers)
cls = super().__new__(mcs, name, bases, attrs)
return cls
class Base(metaclass=Meta):
__arguments__: Mapping[str, _Argument]
__argument_groups__: Mapping[str, "Group"]
__subparsers__: Mapping[str, "Parser"]
def __getattribute__(self, item: str) -> Any:
value = super().__getattribute__(item)
if item.startswith("_"):
return value
if item in self.__arguments__:
class_value = getattr(self.__class__, item, None)
if value is class_value:
raise AttributeError(f"Attribute {item!r} was not parsed")
return value
def __repr__(self) -> str:
return (
f"<{self.__class__.__name__}: "
f"{len(self.__arguments__)} arguments, "
f"{len(self.__argument_groups__)} groups, "
f"{len(self.__subparsers__)} subparsers>"
)
class Destination(NamedTuple):
target: Base
attribute: str
argument: Optional[_Argument]
action: Optional[Action]
DestinationsType = MutableMapping[str, Set[Destination]]
class Group(AbstractGroup, Base):
def __init__(
self, title: str = None, description: Optional[str] = None,
prefix: Optional[str] = None,
defaults: Optional[Dict[str, Any]] = None,
):
self._prefix: Optional[str] = prefix
self._title: Optional[str] = title
self._description: Optional[str] = description
self._defaults: Mapping[str, Any] = MappingProxyType(defaults or {})
# noinspection PyProtectedMember
class Parser(AbstractParser, Base):
HELP_APPENDIX_PREAMBLE = (
" Default values will based on following "
"configuration files {configs}. "
)
HELP_APPENDIX_CURRENT = (
"Now {num_existent} files has been applied {existent}. "
)
HELP_APPENDIX_END = (
"The configuration files is INI-formatted files "
"where configuration groups is INI sections."
"See more https://pypi.org/project/argclass/#configs"
)
def _add_argument(
self, parser: Any, argument: _Argument, dest: str, *aliases,
) -> Tuple[str, Action]:
kwargs = argument.get_kwargs()
if not argument.is_positional:
kwargs["dest"] = dest
if argument.default is not None:
kwargs["help"] = (
f"{kwargs.get('help', '')} (default: {argument.default})"
).strip()
if argument.env_var is not None:
default = kwargs.get("default")
kwargs["default"] = os.getenv(argument.env_var, default)
if kwargs["default"] and argument.is_nargs:
kwargs["default"] = list(
map(
argument.type or str,
ast.literal_eval(kwargs["default"]),
)
)
kwargs["help"] = (
f"{kwargs.get('help', '')} [ENV: {argument.env_var}]"
).strip()
if argument.env_var in os.environ:
self._used_env_vars.add(argument.env_var)
if kwargs.get("default"):
kwargs['required'] = False
return dest, parser.add_argument(*aliases, **kwargs)
@staticmethod
def get_cli_name(name: str) -> str:
return name.replace("_", "-")
def get_env_var(self, name: str, argument: _Argument) -> Optional[str]:
if argument.env_var is not None:
return argument.env_var
if self._auto_env_var_prefix is not None:
return f"{self._auto_env_var_prefix}{name}".upper()
return None
def __init__(
self, config_files: Iterable[Union[str, Path]] = (),
auto_env_var_prefix: Optional[str] = None,
**kwargs,
):
super().__init__()
self.current_subparser = None
self._config_files = config_files
self._config, filenames = read_config(*config_files)
self._epilog = kwargs.pop("epilog", "")
self._epilog += self.HELP_APPENDIX_PREAMBLE.format(
configs=repr(config_files),
)
if filenames:
self._epilog += self.HELP_APPENDIX_CURRENT.format(
num_existent=len(filenames),
existent=repr(list(map(str, filenames))),
)
self._epilog += self.HELP_APPENDIX_END
self._auto_env_var_prefix = auto_env_var_prefix
self._parser_kwargs = kwargs
self._used_env_vars: Set[str] = set()
def _make_parser(
self, parser: Optional[ArgumentParser] = None,
) -> Tuple[ArgumentParser, DestinationsType]:
if parser is None:
parser = ArgumentParser(
epilog=self._epilog, **self._parser_kwargs
)
destinations: DestinationsType = collections.defaultdict(set)
self._fill_arguments(destinations, parser)
self._fill_groups(destinations, parser)
if self.__subparsers__:
self._fill_subparsers(destinations, parser)
return parser, destinations
def _fill_arguments(
self, destinations: DestinationsType, parser: ArgumentParser,
) -> None:
for name, argument in self.__arguments__.items():
aliases = set(argument.aliases)
# Add default alias
if not aliases:
aliases.add(f"--{self.get_cli_name(name)}")
default = self._config.get(name, argument.default)
argument = argument.copy(
aliases=aliases,
env_var=self.get_env_var(name, argument),
default=default,
)
if default and argument.required:
argument = argument.copy(required=False)
dest, action = self._add_argument(parser, argument, name, *aliases)
destinations[dest].add(
Destination(
target=self,
attribute=name,
argument=argument,
action=action,
),
)
def _fill_groups(
self, destinations: DestinationsType, parser: ArgumentParser,
) -> None:
for group_name, group in self.__argument_groups__.items():
group_parser = parser.add_argument_group(
title=group._title,
description=group._description,
)
config = self._config.get(group_name, {})
for name, argument in group.__arguments__.items():
aliases = set(argument.aliases)
dest = "_".join((group._prefix or group_name, name))
if not aliases:
aliases.add(f"--{self.get_cli_name(dest)}")
default = config.get(
name, group._defaults.get(name, argument.default),
)
argument = argument.copy(
default=default,
env_var=self.get_env_var(dest, argument),
)
dest, action = self._add_argument(
group_parser, argument, dest, *aliases
)
destinations[dest].add(
Destination(
target=group,
attribute=name,
argument=argument,
action=action,
),
)
def _fill_subparsers(
self, destinations: DestinationsType, parser: ArgumentParser,
) -> None:
subparsers = parser.add_subparsers()
subparser: Parser
destinations["current_subparser"].add(
Destination(
target=self,
attribute="current_subparser",
argument=None,
action=None,
),
)
for subparser_name, subparser in self.__subparsers__.items():
current_parser, subparser_dests = (
subparser._make_parser(
subparsers.add_parser(
subparser_name, **subparser._parser_kwargs
),
)
)
current_parser.set_defaults(current_subparser=subparser)
for dest, values in subparser_dests.items():
for target, name, argument, action in values:
destinations[dest].add(
Destination(
target=subparser,
attribute=name,
argument=argument,
action=action,
),
)
def parse_args(self, args: Optional[Sequence[str]] = None) -> "Parser":
self._used_env_vars.clear()
parser, destinations = self._make_parser()
parsed_ns = parser.parse_args(args)
parsed_value: Any
current_subparser = getattr(parsed_ns, "current_subparser", None)
for key, values in destinations.items():
parsed_value = getattr(parsed_ns, key, None)
for target, name, argument, action in values:
if (
target is not self and
isinstance(target, Parser) and
current_subparser is not target
):
continue
if isinstance(action, ConfigAction):
action(parser, parsed_ns, parsed_value, None)
parsed_value = getattr(parsed_ns, key)
if argument is not None and argument.converter is not None:
if argument.nargs and parsed_value is None:
parsed_value = []
parsed_value = argument.converter(parsed_value)
setattr(target, name, parsed_value)
return self
def print_help(self) -> None:
parser, _ = self._make_parser()
return parser.print_help()
def sanitize_env(self) -> None:
for name in self._used_env_vars:
os.environ.pop(name, None)
self._used_env_vars.clear()
# noinspection PyPep8Naming
def Argument(
*aliases: str,
action: Union[Actions, Type[Action]] = Actions.default(),
choices: Optional[Iterable[str]] = None,
const: Optional[Any] = None,
converter: Optional[ConverterType] = None,
default: Optional[Any] = None,
env_var: Optional[str] = None,
help: Optional[str] = None,
metavar: Optional[str] = None,
nargs: Optional[Union[int, str, Nargs]] = Nargs.default(),
required: Optional[bool] = None,
type: Any = None
) -> Any:
return _Argument(
action=action,
aliases=aliases,
choices=choices,
const=const,
converter=converter,
default=default,
env_var=env_var,
help=help,
metavar=metavar,
nargs=nargs,
required=required,
type=type,
) # type: ignore
# noinspection PyPep8Naming
def Config(
*aliases: str,
search_paths: Optional[Iterable[Union[Path, str]]] = None,
choices: Optional[Iterable[str]] = None,
converter: Optional[ConverterType] = None,
const: Optional[Any] = None,
default: Optional[Any] = None,
env_var: Optional[str] = None,
help: Optional[str] = None,
metavar: Optional[str] = None,
nargs: Optional[Union[int, str, Nargs]] = Nargs.default(),
required: Optional[bool] = None,
) -> Any:
return _Config(
search_paths=search_paths,
aliases=aliases,
choices=choices,
const=const,
converter=converter,
default=default,
env_var=env_var,
help=help,
metavar=metavar,
nargs=nargs,
required=required,
) # type: ignore
LogLevel: Any = Argument(
choices=("debug", "info", "warning", "error", "critical"),
converter=lambda v: getattr(logging, v.upper(), logging.INFO),
default="info",
)
__all__ = (
"Actions",
"Argument",
"Group",
"Nargs",
"Parser",
"LogLevel",
)
|
StarcoderdataPython
|
1793343
|
import itertools # set iterator for next word in the file
import pandas as pd
from sklearn.preprocessing import LabelEncoder # convert categorical variables into numerical variables
from sklearn.tree import DecisionTreeClassifier # Import Decision Tree Classifier
from sklearn import metrics # Import scikit-learn metrics module for accuracy calculation
import sys
# open files
sbd_train_file = sys.argv[1] or "SBD.train"
sbd_test_file = sys.argv[2] or "SBD.test"
data = open(sbd_train_file, "r")
test_data = open(sbd_test_file, "r")
# set iterator for next word in the file
list_cycle = itertools.cycle(data)
list_cycle_test = itertools.cycle(test_data)
data_features = []
test_data_features = []
test_out = []
# extract all features from train data and save it in "list of features " List
for words in data:
word = words.split()
if word[1][-1] == ".":
label = word[2]
left_word = word[1]
list_right_words = next(list_cycle)
right_words = list_right_words.split()
right_word = right_words[1]
data_features.append(
[left_word, right_word, str(left_word[0].isupper()), str(right_word[0].isupper()),
str(len(left_word) < 3),
str(len(right_word) < 3), str(len(left_word) > 5), str(len(right_word) > 5), label])
# extract all features from test data and save it in "test_data_features " List
for words in test_data:
word = words.split()
if word[1][-1] == ".":
label = word[2]
left_word = word[1]
list_right_words = next(list_cycle_test)
right_words = list_right_words.split()
right_word = right_words[1]
test_data_features.append(
[left_word, right_word, str(left_word[0].isupper()), str(right_word[0].isupper()),
str(len(left_word) < 3),
str(len(right_word) < 3), str(len(left_word) > 5), str(len(right_word) > 5), label])
test_out.append([word[0], word[1]])
# place "Data & Test_Data" in pandas DataFrame
col_names = ["L word", "R word", "L cap ?", "R cap ?", "L less than 3", "R less than 3", "L more than 5",
"R more than 5", "label"]
train_data = pd.DataFrame(data_features, columns=col_names)
test_data = pd.DataFrame(test_data_features, columns=col_names)
the_label = test_data.label
# Encoder function to Convert Pandas Categorical Data
def Encoder(df):
columns_to_encode = list(df.select_dtypes(include=['category', 'object']))
le = LabelEncoder()
for feature in columns_to_encode:
try:
df[feature] = le.fit_transform(df[feature])
except:
print('Error encoding ' + feature)
return df
train_data_encoded = Encoder(train_data) # Encode train data set
test_data_encoded = Encoder(test_data) # Encode test data set
all_feature_cols = ["L word", "R word", "L cap ?", "R cap ?", "L less than 3", "R less than 3", "L more than 5",
"R more than 5"]
core_feature_cols = ["L word", "R word", "L cap ?", "R cap ?", "L less than 3"]
my_feature_cols = ["R less than 3", "L more than 5", "R more than 5"]
all_features = train_data_encoded[all_feature_cols] # all Features of train Data
core_feature = train_data_encoded[core_feature_cols] # core Features of train Data
my_features = train_data_encoded[my_feature_cols] # my Features of train Data
encoded_train_label = train_data_encoded.label
all_test = test_data_encoded[all_feature_cols]
core_test = test_data_encoded[core_feature_cols]
my_test = test_data_encoded[my_feature_cols]
encoded_test_label = test_data_encoded.label
# Create Decision Tree classifer object
clf_all = DecisionTreeClassifier()
clf_core = DecisionTreeClassifier()
clf_my = DecisionTreeClassifier()
# Train Decision Tree Classifer
all_fit = clf_all.fit(all_features, encoded_train_label)
core_fit = clf_core.fit(core_feature, encoded_train_label)
my_fit = clf_my.fit(my_features, encoded_train_label)
# Predict the response for test dataset
all_pred = all_fit.predict(all_test)
print("Accuracy for all features:", metrics.accuracy_score(encoded_test_label, all_pred), "%")
core_pred = core_fit.predict(core_test)
print("Accuracy for core features:", metrics.accuracy_score(encoded_test_label, core_pred), "%")
my_pred = my_fit.predict(my_test)
print("Accuracy for my features:", metrics.accuracy_score(encoded_test_label, my_pred), "%")
# create SBD.test.out csv file
df_pre = pd.DataFrame(all_pred)
test_out_df = pd.DataFrame(test_out, columns=["Word_#", "Word"])
le = LabelEncoder()
le.fit(the_label)
final_results = le.inverse_transform(df_pre[0])
test_out_df["my_prediction"] = final_results
test_out_df.to_csv("SBD.test.out")
|
StarcoderdataPython
|
4809333
|
"""This module contains Module implementations for Kalman filtering.
"""
import numpy as np
import scipy.linalg as spl
from modprop.core.modules_core import ModuleBase, InputPort, OutputPort, iterative_invalidate
from modprop.core.backprop import sum_accumulators
def transpose_matrix(m, n):
'''Generate the vectorized transpose matrix for column-major flattening.
'''
d = m * n
inds = np.arange(start=0, stop=d)
mat_trans = np.reshape(inds, (m, n), 'F').T
T = np.zeros((d, d))
T[inds, mat_trans.flatten('F')] = 1
return T
def cho_solve_right(Lfactor, B):
"""Solves a right-inverse system B = XA for X = BA^-1.
"""
return spl.cho_solve(Lfactor, B.T).T
class PredictModule(ModuleBase):
"""Performs a Kalman filter predict step.
Input Ports
-----------
x_in : State estimate mean
P_in : State estimate covariance
Q_in : Transition covariance
Output Ports
------------
x_out : Post-prediction state estimate mean
P_out : Post-prediction state estimate covariance
Parameters
----------
A : Transition matrix
"""
def __init__(self, A):
ModuleBase.__init__(self)
self._A = A
self._x_in = InputPort(self)
self._P_in = InputPort(self)
self._Q_in = InputPort(self)
self._x_out = OutputPort(self)
self._P_out = OutputPort(self)
ModuleBase.register_inputs(self, self._x_in, self._P_in, self._Q_in)
ModuleBase.register_outputs(self, self._x_out, self._P_out)
def foreprop(self):
if not self.foreprop_ready():
return []
next_x = np.dot(self._A, self._x_in.value)
next_P = np.dot(np.dot(self._A, self._P_in.value),
self._A.T) + self._Q_in.value
ret = self._x_out.foreprop(next_x)
ret += self._P_out.foreprop(next_P)
return ret
def backprop(self):
if not self.backprop_ready():
return []
do_dxin = self._backprop_x_out()
do_dPin, do_dQ = self._backprop_P_out()
ret = self._x_in.backprop(do_dxin)
ret += self._P_in.backprop(do_dPin)
ret += self._Q_in.backprop(do_dQ)
return ret
def _backprop_x_out(self):
dxout_dxin = self._A
do_dxin = self._x_out.chain_backprop(dy_dx=dxout_dxin)
return do_dxin
def _backprop_P_out(self):
'''Perform backpropagation on the P_out port.
Each ith column of do_dPout is the gradient of element i of
the final output w.r.t. the column unrolled P_out
'''
dPout_dPin = np.kron(self._A, self._A)
do_dPin = self._P_out.chain_backprop(dy_dx=dPout_dPin)
do_dQ = self._P_out.chain_backprop()
return do_dPin, do_dQ
@property
def A(self):
return self._A
@A.setter
def A(self, a):
# TODO Make A an input?
self._A = a
iterative_invalidate(self)
@property
def x_in(self):
return self._x_in
@property
def P_in(self):
return self._P_in
@property
def Q_in(self):
return self._Q_in
@property
def x_out(self):
return self._x_out
@property
def P_out(self):
return self._P_out
class UpdateModule(ModuleBase):
"""Performs a Kalman filter update step.
# TODO Extend this logic to require correct pairings?
This module does not require that all of its outputs be connected in order to backpropagate.
Input Ports
-----------
x_in : Input filter estimate mean
P_in : Input filter estimate covariance
R_in : Input observation covariance
Output Ports
------------
x_out : Post-update estimate mean
P_out : Post-update estimate covariance
v_out : Update innovation
S_out : Update innovation covariance
Parameters
----------
y : Observation vector
C : Observation matrix
"""
def __init__(self, y, C):
ModuleBase.__init__(self)
self._y = y
self._C = C
self._x_in = InputPort(self)
self._P_in = InputPort(self)
self._R_in = InputPort(self)
self._x_out = OutputPort(self)
self._P_out = OutputPort(self)
self._v_out = OutputPort(self)
self._S_out = OutputPort(self)
ModuleBase.register_inputs(self, self._x_in, self._P_in, self._R_in)
ModuleBase.register_outputs(
self, self._x_out, self._P_out, self._v_out, self._S_out)
# Cached variables
self._S_chol = None
self._K = None
def foreprop(self):
if not self.foreprop_ready():
return []
P_in = self._P_in.value
ypred = np.dot(self._C, self._x_in.value)
v = self._y - ypred
S = np.dot(np.dot(self._C, P_in), self._C.T) + self._R_in.value
#self._S_inv = np.linalg.inv(S)
self._S_chol = spl.cho_factor(S)
#self._K = np.dot(np.dot(P_in, self._C.T), self._S_inv)
self._K = cho_solve_right(self._S_chol, np.dot(P_in, self._C.T))
x_next = self._x_in.value + np.dot(self._K, v)
P_next = P_in - np.dot(np.dot(self._K, self._C), P_in)
ret = self._x_out.foreprop(x_next)
ret += self._P_out.foreprop(P_next)
ret += self._v_out.foreprop(v)
ret += self._S_out.foreprop(S)
return ret
def backprop(self):
if not self.backprop_ready():
return []
do_dxin_x, do_dPin_x, do_dR_x = self._backprop_x_out()
do_dPin_P, do_dRin_P = self._backprop_P_out()
do_dxin_v = self._backprop_v_out()
do_dPin_S, do_dRin_S = self._backprop_S_out()
ret = self._x_in.backprop(sum_accumulators((do_dxin_x, do_dxin_v)))
ret += self._P_in.backprop(sum_accumulators((do_dPin_x,
do_dPin_P, do_dPin_S)))
ret += self._R_in.backprop(sum_accumulators((do_dR_x,
do_dRin_P, do_dRin_S)))
return ret
def _backprop_x_out(self):
N = len(self._x_in.value)
dxout_dxin = np.identity(N) - np.dot(self._K, self._C)
do_dxin = self._x_out.chain_backprop(dy_dx=dxout_dxin)
Sv = spl.cho_solve(self._S_chol, self._v_out.value)
# Sv = np.dot(self._S_inv, self._v_out.value)
CTSv = np.dot(self._C.T, Sv)
KC = np.dot(self._K, self._C)
dxout_dPin = np.kron(CTSv.T, np.identity(N)) - np.kron(CTSv.T, KC)
do_dPin = self._x_out.chain_backprop(dy_dx=dxout_dPin)
dxout_dR = -np.kron(Sv.T, self._K)
do_dR = self._x_out.chain_backprop(dy_dx=dxout_dR)
return do_dxin, do_dPin, do_dR
def _backprop_P_out(self):
N = self._P_in.value.shape[0]
KC = np.dot(self._K, self._C)
I = np.identity(N)
II = np.identity(N * N)
T = transpose_matrix(N, N)
dPout_dPin = II - np.dot(II + T, np.kron(I, KC)) + np.kron(KC, KC)
do_dPin = self._P_out.chain_backprop(dy_dx=dPout_dPin)
dPout_dRin = np.kron(self._K, self._K)
do_dRin = self._P_out.chain_backprop(dy_dx=dPout_dRin)
return do_dPin, do_dRin
def _backprop_v_out(self):
dvout_dxin = -self._C
do_dxin = self._v_out.chain_backprop(dy_dx=dvout_dxin)
return do_dxin
def _backprop_S_out(self):
dSout_dPin = np.kron(self._C, self._C)
do_dPin = self._S_out.chain_backprop(dy_dx=dSout_dPin)
do_dRin = self._S_out.chain_backprop()
return do_dPin, do_dRin
@property
def y(self):
return self._y
@y.setter
def y(self, v):
self._y = v
iterative_invalidate(self)
@property
def C(self):
return self._C
@C.setter
def C(self, v):
self._C = v
iterative_invalidate(self)
@property
def x_in(self):
return self._x_in
@property
def P_in(self):
return self._P_in
@property
def R_in(self):
return self._R_in
@property
def x_out(self):
return self._x_out
@property
def P_out(self):
return self._P_out
@property
def v_out(self):
return self._v_out
@property
def S_out(self):
return self._S_out
|
StarcoderdataPython
|
177528
|
<reponame>lwpamihiranga/python_fb_post_commentor
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
from time import sleep
import time
username = "<put your fb account username here>"
password = "<put your account password here>"
# example
# username = "<EMAIL>"
# password = "<PASSWORD>"
options = Options()
options.add_argument('--disable-notifications')
options.add_argument('--no-sandbox')
options.add_argument('--disable-extensions')
# options.add_argument('--headless')
prefs = {'profile.default_content_setting_values.notifications': 2}
options.add_experimental_option('prefs', prefs)
driver = webdriver.Chrome(chrome_options=options)
driver.get("<put fb post link here>")
# example
# driver.get("https://www.facebook.com/test.user/posts/1726976090778944")
username_box = driver.find_element_by_id("email")
username_box.send_keys(username)
password_box = driver.find_element_by_id("pass")
password_box.send_keys(password)
login_btn = driver.find_element_by_xpath("/html/body/div[1]/div[2]/div/div/div[1]/div/div[2]/form/table/tbody/tr[2]/td[3]/label/input")
login_btn.submit()
time.sleep(10)
comment_div_select = driver.find_element_by_xpath('/html/body/div[1]/div[3]/div[1]/div/div[2]/div[2]/div[2]/div[2]/div/div/div/div/div/div/div/div[1]/div/div[2]/div[2]/form/div/div[3]/div[4]/div[2]/div/div/div/div/div/form/div')
comment_div_select.click()
time.sleep(5)
# in range give the comment limit. default is set as 100
for i in range(0, 100):
comment_div = driver.find_element_by_xpath('/html/body/div[1]/div[3]/div[1]/div/div[2]/div[2]/div[2]/div[2]/div/div/div/div/div/div/div/div[1]/div/div[2]/div[2]/form/div/div[3]/div[4]/div[2]/div/div/div/div/div/form/div/div/div[2]/div/div/div/div')
comment_div.send_keys('Comment ', i + 1)
# time.sleep(10) # uncomment here and give how many seconds you want to wait for every comment
comment_div.send_keys(Keys.ENTER)
time.sleep(2)
# comment_div = driver.find_element_by_xpath('/html/body/div[1]/div[3]/div[1]/div/div[2]/div[2]/div[2]/div[2]/div/div/div/div/div/div/div/div[1]/div/div[2]/div[2]/form/div/div[3]/div[4]/div[2]/div/div/div/div/div/form/div/div/div[2]/div/div/div/div')
# comment_div.send_keys('Comment 984')
# time.sleep(10)
# comment_div.send_keys(Keys.ENTER)
# time.sleep(20)
|
StarcoderdataPython
|
197618
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 - 2020 -- <NAME>
# All rights reserved.
#
# License: BSD License
#
"""\
Test against issue #4.
<https://github.com/heuer/segno/issues/4>
"""
from __future__ import unicode_literals, absolute_import
from segno import consts, encoder
def test_issue_4():
qr = encoder.encode(0)
assert consts.VERSION_M1 == qr.version
assert qr.error is None
def test_issue_4_autodetect_micro():
qr = encoder.encode(1)
assert consts.VERSION_M1 == qr.version
assert qr.error is None
def test_issue_4_explicit_error():
qr = encoder.encode(1, error=None)
assert consts.VERSION_M1 == qr.version
assert qr.error is None
def test_issue_4_explicit_error2():
qr = encoder.encode(1, error='m')
assert consts.VERSION_M2 == qr.version
assert consts.ERROR_LEVEL_M == qr.error
if __name__ == '__main__':
import pytest
pytest.main([__file__])
|
StarcoderdataPython
|
185895
|
import os
import struct
import sys
import pytest
from bonsai.active_directory.acl import ACL
from bonsai.active_directory.sid import SID
from conftest import get_config
from bonsai import LDAPClient
from bonsai.active_directory import SecurityDescriptor
@pytest.fixture
def client():
""" Create a client with authentication settings. """
cfg = get_config()
url = f"ldap://{cfg['SERVER']['hostname']}:{cfg['SERVER']['port']}"
client = LDAPClient(url)
client.set_credentials(
"SIMPLE", user=cfg["SIMPLEAUTH"]["user"], password=cfg["SIMPLEAUTH"]["password"]
)
return client
def test_from_binary():
""" Test from_binary method. """
with pytest.raises(TypeError):
_ = SecurityDescriptor.from_binary(0)
with pytest.raises(TypeError):
_ = SecurityDescriptor.from_binary("INVALID")
with pytest.raises(ValueError):
_ = SecurityDescriptor.from_binary(b"\x05\nH\x00\x07\x00")
curdir = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(curdir, "testenv", "sd-sample0.bin"), "rb") as data:
input_data = data.read()
sec_desc = SecurityDescriptor.from_binary(input_data)
assert sec_desc.revision == 1
assert sec_desc.group_sid == "S-1-5-21-3526669579-2242266465-3136906013-512"
assert sec_desc.owner_sid == "S-1-5-21-3526669579-2242266465-3136906013-512"
assert sec_desc.sbz1 == 0
assert sec_desc.control["dacl_present"]
assert len(sec_desc.dacl.aces) == 24
assert sec_desc.dacl.aces[0].type == 5
assert str(sec_desc.dacl.aces[0].trustee_sid) == "S-1-5-32-554"
assert not sec_desc.control["sacl_present"]
assert sec_desc.sacl is None
with open(os.path.join(curdir, "testenv", "sd-sample1.bin"), "rb") as data:
input_data = data.read()
sec_desc = SecurityDescriptor.from_binary(input_data)
assert sec_desc.revision == 1
assert sec_desc.group_sid == "S-1-5-21-3526669579-2242266465-3136906013-512"
assert sec_desc.owner_sid == "S-1-5-21-3526669579-2242266465-3136906013-512"
assert sec_desc.sbz1 == 0
assert sec_desc.control["dacl_present"]
assert len(sec_desc.dacl.aces) == 24
assert sec_desc.dacl.aces[0].type == 5
assert sec_desc.dacl.aces[0].trustee_sid == "S-1-5-32-554"
assert sec_desc.control["sacl_present"]
assert len(sec_desc.sacl.aces) == 3
assert sec_desc.sacl.aces[0].type == 2
assert sec_desc.sacl.aces[0].trustee_sid == "S-1-1-0"
@pytest.mark.parametrize(
"file", ["sd-sample0.bin", "sd-sample1.bin"],
)
def test_to_binary(file):
""" Test to_binary method. """
curdir = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(curdir, "testenv", file), "rb") as data:
expected_data = data.read()
test_sec_desc = SecurityDescriptor.from_binary(expected_data)
test_data = test_sec_desc.to_binary()
(
expected_rev,
expected_sbz1,
expected_ctrl,
expected_offset_owner,
expected_offset_group,
expected_offset_sacl,
expected_offset_dacl,
) = struct.unpack("<BBHIIII", expected_data[:20])
(
test_rev,
test_sbz1,
test_ctrl,
test_offset_owner,
test_offset_group,
test_offset_sacl,
test_offset_dacl,
) = struct.unpack("<BBHIIII", test_data[:20])
assert len(test_data) == len(expected_data)
assert test_rev == expected_rev
assert test_sbz1 == expected_sbz1
assert test_ctrl == expected_ctrl
if expected_offset_owner:
assert (
test_data[
test_offset_owner : test_offset_owner + test_sec_desc.owner_sid.size
]
== expected_data[
expected_offset_owner : expected_offset_owner
+ test_sec_desc.owner_sid.size
]
)
if expected_offset_group:
assert (
test_data[
test_offset_group : test_offset_group + test_sec_desc.group_sid.size
]
== expected_data[
expected_offset_group : expected_offset_group
+ test_sec_desc.group_sid.size
]
)
if expected_offset_sacl:
assert (
test_data[test_offset_sacl : test_offset_sacl + test_sec_desc.sacl.size]
== expected_data[
expected_offset_sacl : expected_offset_sacl
+ test_sec_desc.sacl.size
]
)
if expected_offset_dacl:
assert (
test_data[test_offset_dacl : test_offset_dacl + test_sec_desc.dacl.size]
== expected_data[
expected_offset_dacl : expected_offset_dacl
+ test_sec_desc.dacl.size
]
)
assert SecurityDescriptor.from_binary(test_data).to_binary() == test_data
@pytest.mark.skipif(
not sys.platform.startswith("win"),
reason="Cannot query SecurityDescriptor from OpenLDAP",
)
@pytest.mark.parametrize(
"sd_flags, owner_sid, group_sid, dacl, sacl",
[
(1, True, False, False, False),
(2, False, True, False, False),
(3, True, True, False, False),
(4, False, False, True, False),
(8, False, False, False, True),
(15, True, True, True, True),
],
ids=["only-owner", "only-group", "owner-group", "only-dacl", "only-sacl", "all"],
)
def test_sd_flags(client, sd_flags, owner_sid, group_sid, dacl, sacl):
""" Test LDAP_SERVER_SD_FLAGS_OID control """
client.sd_flags = sd_flags
with client.connect() as conn:
res = conn.search(
"cn=chuck,ou=nerdherd,dc=bonsai,dc=test",
0,
attrlist=["nTSecurityDescriptor"],
)[0]
sec_desc = SecurityDescriptor.from_binary(res["nTSecurityDescriptor"][0])
assert sec_desc.revision == 1
if owner_sid:
assert sec_desc.owner_sid is not None
assert isinstance(sec_desc.owner_sid, SID)
else:
assert sec_desc.owner_sid is None
if group_sid:
assert sec_desc.group_sid is not None
assert isinstance(sec_desc.group_sid, SID)
else:
assert sec_desc.group_sid is None
assert sec_desc.control["dacl_present"] is dacl
if dacl:
assert isinstance(sec_desc.dacl, ACL)
else:
assert sec_desc.dacl is None
assert sec_desc.control["sacl_present"] is sacl
if sacl:
assert isinstance(sec_desc.sacl, ACL)
else:
assert sec_desc.sacl is None
|
StarcoderdataPython
|
1708464
|
# Generated by Django 2.0 on 2017-12-25 05:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0004_auto_20171221_0621'),
]
operations = [
migrations.AddField(
model_name='game',
name='num_players',
field=models.PositiveIntegerField(default=5),
),
]
|
StarcoderdataPython
|
1672244
|
# coding=utf-8
from django.db import models
from extended_choices import Choices
class Sponsor(models.Model):
LEVELS = Choices(
('platinum', 1, 'Platinum'),
('gold', 2, 'Gold'),
('silver', 3, 'Silver'),
('bronze', 4, 'Bronze'),
('diversity', 5, 'Diversity'),
('media', 6, 'Media'),
('partners', 7, 'Partners'),
('connectivity', 9, 'Connectivity'),
)
level = models.PositiveSmallIntegerField(choices=LEVELS, default=LEVELS.silver)
name = models.CharField(max_length=200)
logo = models.FileField(upload_to='sponsors/pyconcz/')
description = models.TextField()
link_url = models.URLField()
twitter = models.URLField(null=True, blank=True, help_text='full URL')
facebook = models.URLField(null=True, blank=True, help_text='full URL')
published = models.BooleanField(default=False)
class Meta:
ordering = ['level', 'name']
def __str__(self):
return self.name
|
StarcoderdataPython
|
3335590
|
#!/usr/bin/python
"""
Allow a web page to access local files.
This makes it easier to preview title screens and video files.
FF stores profiles in ~/.mozilla/firefox/profiles.ini
FF settings are set by creating a .js file that sets things on startup
1. count number of FF profiles.
If more than 1, give up.
2. get profile dir
3. create user.js that sets custom settings.
"""
import os
import ConfigParser
home_dir = os.path.expanduser('~')
print "home dir:", home_dir
profiles_path= os.path.join(home_dir, ".mozilla","firefox","profiles.ini")
print "profiles_path:", profiles_path
# read ini file
config = ConfigParser.RawConfigParser()
config.read([profiles_path])
profiles = [s for s in config.sections() if s !='General']
if len(profiles)>1:
print "more than one profile, you fix it."
print profiles
else:
d=dict(config.items(profiles[0]))
settings_path= os.path.join(home_dir, ".mozilla","firefox",d['path'],"user.js")
config="""
user_pref("capability.policy.policynames", "localfilelinks");
user_pref("capability.policy.localfilelinks.sites", "http://localhost:8080","http://veyepar.nextdayvideo.com:8080");
user_pref("capability.policy.localfilelinks.checkloaduri.enabled", "allAccess");
"""
print "writing to", settings_path
open(settings_path,'w').write(config)
|
StarcoderdataPython
|
1787781
|
<filename>bubbleimg/tabtools.py
import io
import os
import numpy as np
import astropy.table as at
from astropy.io import ascii
def write_row(fn, row, condi, overwrite=False, append=False):
"""
write row (append) to file. If the row already exists, according to the condi conditions, then this row is overwritten (or not) depending on the overwrite parameter. If append = True then write rows to the end without deleting previous duplications.
Params
------
fn (str)
row (astropy tab)
condi (dictionary)
e.g., condi = {'imgtag': 'OIII5008_I'}
overwrite=False
append=False
"""
withheader = not os.path.isfile(fn)
if (not os.path.isfile(fn)):
rowstring = tab_to_string(row, withheader=withheader)
with open(fn, 'w') as f_to:
f_to.write(rowstring)
elif append:
append_row_to_end(fn, row, withheader=withheader)
elif overwrite:
fn_delete_row(fn, condi)
append_row_to_end(fn, row, withheader=withheader)
elif (not fn_has_row(fn, condi)):
append_row_to_end(fn, row, withheader=withheader)
else:
print("[tabtools] skip writing row as it exists")
def append_row_to_end(fn, row, withheader=False):
""" append the row to the end of file """
rowstring = tab_to_string(row, withheader=withheader)
with open(fn, 'a') as f_to:
f_to.write(rowstring)
def tab_to_string(tab, withheader=False):
""" turn table into string with each line seperated by \n """
# with io.BytesIO() as f_temp:
with io.StringIO() as f_temp:
tab.write(f_temp, format='ascii.csv', comment=False)
tabstring = f_temp.getvalue()
if not withheader:
# take out header
tabstring = '\n'.join(tabstring.splitlines()[1:]) + '\n'
return tabstring
def fn_has_row(fn, condi):
"""
return if table has a line with column (key) equals to value. If file does not exist return false.
Params
------
fn (str): file name
condi (dictionary)
{key: value, ...}, where key is the name of the column and value is the value that the column should take.
e.g., condi = {'imgtag': 'OIII5008_I'}
"""
if os.path.isfile(fn):
tab = at.Table.read(fn)
result = tab_has_row(tab, condi)
else:
result = False
return result
def tab_has_row(tab, condi):
"""
return if table has a line with column (key) equals to value.
Params
------
tab: table
condi (dictionary)
{key: value, ...}, where key is the name of the column and value is the value that the column should take.
e.g., condi = {'imgtag': 'OIII5008_I'}
"""
select = get_select(tab, condi)
return np.sum(select) > 0
def fn_delete_row(fn, condi):
"""
delete the lines in table that satisfies the condition
Params
------
fn (str): file name
condi (dictionary)
{key: value, ...}, where key is the name of the column and value is the value that the column should take.
e.g., condi = {'imgtag': 'OIII5008_I'}
"""
if os.path.isfile(fn):
tab = at.Table.read(fn)
tab = tab_delete_row(tab, condi)
tab.write(fn, overwrite=True)
def tab_delete_row(tab, condi):
"""
delete the lines in table that satisfies the condition
Params
------
tab: table
condi (dictionary)
{key: value, ...}, where key is the name of the column and value is the value that the column should take.
e.g., condi = {'imgtag': 'OIII5008_I'}
Return
------
tab
"""
select = get_select(tab, condi)
if np.sum(select)>0:
tab.remove_rows(select)
return tab
def tab_extract_row(tab, condi):
"""
return a table of only the extracted rows that meet the condition.
Params
------
tab: table
condi (dictionary)
{key: value, ...}, where key is the name of the column and value is the value that the column should take.
Return
------
tab
"""
select = get_select(tab, condi)
return tab[select]
def get_select(tab, condi):
""" return boolean array indicating whether each row of the tab is selected """
select_arr = [[str(tab[key][i]) == str(condi[key]) for i in range(len(tab))] for key in condi]
select = np.all(select_arr, axis=0)
return select
def summarize(fn_in, fn_out, columns=[], condi={}, overwrite=False):
"""
Summarize the table 'fn_in' and write the results to 'fn_out'.
For each of the column in columns, take the mean, std, median, and 16%, 84% quantile.
All the other columns that are not specified in columns and condi are ignored.
Params
------
fn_in
fn_out
columns=[] (list of string)
list of column names, e.g., ['area_ars', 'dmax_ars']. Default: all columns.
condi={}
conditions, e.g. {'imgtag': 'OIII5008_I'}
overwrite=False
Return
------
status (bool)
"""
if not os.path.isfile(fn_out) or overwrite:
tab_in = at.Table.read(fn_in)
if len(condi)>0:
tab_select = tab_extract_row(tab_in, condi=condi)
tab_sum = tab_select[list(condi.keys())][0] # creating headers
else:
tab_select = tab_in
tab_sum = at.Table() # no headers
if len(columns)==0:
columns = tab_in.colnames
# calculation
for col in columns:
if not col in list(condi.keys()):
arr = tab_select[col]
if arr.dtype in [float, int]:
var_mean = np.mean(arr)
var_std = np.std(arr)
var_median = np.median(arr)
var_p16 = np.percentile(arr, 16)
var_p84 = np.percentile(arr, 84)
tab_stat = at.Table([[var_mean], [var_std], [var_median], [var_p16], [var_p84], ], names=[col+tag for tag in ['_mean', '_std', '_median', '_p16', '_p84']])
tab_sum = at.hstack([tab_sum, tab_stat])
tab_sum.write(fn_out, overwrite=overwrite)
else:
print("[tabtools] skip summarizing as files exist")
return os.path.isfile(fn_out)
def extract_line_from_file(fn, iline=1, comment='#', fill_trailing_empty=True):
"""
return the iline-th line of the file which is non-empty and does not start with the comment ('#' by default).
if fill_trailing_emtpy is True then if iline is larger than the number of lines then return comma seperated empty values with the size the same as the header line.
iline could be either integer or slice instances, such as iline=slice(1, None, None) will return all lines after the first one.
Params
------
fn (str)
iline=1 (int or slice instance)
comment='#'
fill_trailing_empty=True
Return
------
list of strings (lines)
"""
with open(fn, 'r') as f:
data = f.read()
lines = data.split('\n')
lines_noncomment = []
for line in lines:
if len(line) > 0:
if (line[0] != comment):
lines_noncomment += [line]
if isinstance(iline, slice):
return lines_noncomment[iline]
elif iline < len(lines_noncomment):
return lines_noncomment[iline]
elif fill_trailing_empty and len(lines_noncomment)>0:
n_comma = lines_noncomment[0].count(',')
return "," * n_comma
else:
raise Exception("[batch] _extract_line_from_file iline exceeding the number of lines")
|
StarcoderdataPython
|
114320
|
<gh_stars>1-10
import os
import unittest
import tempfile
import csv
import warnings
from requests import exceptions
from kbcstorage.tables import Tables
from kbcstorage.buckets import Buckets
class TestTables(unittest.TestCase):
def setUp(self):
self.tables = Tables(os.getenv('KBC_TEST_API_URL'),
os.getenv('KBC_TEST_TOKEN'))
self.buckets = Buckets(os.getenv('KBC_TEST_API_URL'),
os.getenv('KBC_TEST_TOKEN'))
try:
self.buckets.delete('in.c-py-test-tables', force=True)
except exceptions.HTTPError as e:
if e.response.status_code != 404:
raise
self.buckets.create(name='py-test-tables', stage='in')
# https://github.com/boto/boto3/issues/454
warnings.simplefilter("ignore", ResourceWarning)
def tearDown(self):
try:
self.buckets.delete('in.c-py-test-tables', force=True)
except exceptions.HTTPError as e:
if e.response.status_code != 404:
raise
def test_create_table_minimal(self):
file, path = tempfile.mkstemp(prefix='sapi-test')
with open(path, 'w') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=['col1', 'col2'],
lineterminator='\n', delimiter=',',
quotechar='"')
writer.writeheader()
writer.writerow({'col1': 'ping', 'col2': 'pong'})
os.close(file)
table_id = self.tables.create(name='some-table', file_path=path,
bucket_id='in.c-py-test-tables')
table_info = self.tables.detail(table_id)
with self.subTest():
self.assertEqual(table_id, table_info['id'])
with self.subTest():
self.assertEqual('in.c-py-test-tables', table_info['bucket']['id'])
def test_create_table_primary_key(self):
file, path = tempfile.mkstemp(prefix='sapi-test')
with open(path, 'w') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=['col1', 'col2'],
lineterminator='\n', delimiter=',',
quotechar='"')
writer.writeheader()
writer.writerow({'col1': 'ping', 'col2': 'pong'})
writer.writerow({'col1': 'pong', 'col2': 'ping'})
os.close(file)
table_id = self.tables.create(name='some-table', file_path=path,
bucket_id='in.c-py-test-tables',
primary_key=['col1', 'col2'])
table_info = self.tables.detail(table_id)
with self.subTest():
self.assertEqual(table_id, table_info['id'])
with self.subTest():
self.assertEqual('in.c-py-test-tables', table_info['bucket']['id'])
with self.subTest():
self.assertEqual(['col1', 'col2'], table_info['primaryKey'])
def test_table_detail(self):
file, path = tempfile.mkstemp(prefix='sapi-test')
with open(path, 'w') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=['col1', 'col2'],
lineterminator='\n', delimiter=',',
quotechar='"')
writer.writeheader()
writer.writerow({'col1': 'ping', 'col2': 'pong'})
table_id = self.tables.create(name='some-table', file_path=path,
bucket_id='in.c-py-test-tables')
table_info = self.tables.detail(table_id)
with self.subTest():
self.assertEqual(table_id, table_info['id'])
with self.subTest():
self.assertEqual('some-table', table_info['name'])
with self.subTest():
self.assertTrue('in.c-py-test-tables.some-table' in table_info['uri'])
with self.subTest():
self.assertEqual([], table_info['primaryKey'])
with self.subTest():
self.assertEqual(['col1', 'col2'], table_info['columns'])
with self.subTest():
self.assertTrue('created' in table_info)
with self.subTest():
self.assertTrue('lastImportDate' in table_info)
with self.subTest():
self.assertTrue('lastChangeDate' in table_info)
with self.subTest():
self.assertTrue('rowsCount' in table_info)
with self.subTest():
self.assertTrue('metadata' in table_info)
with self.subTest():
self.assertTrue('bucket' in table_info)
with self.subTest():
self.assertTrue('columnMetadata' in table_info)
def test_delete_table(self):
file, path = tempfile.mkstemp(prefix='sapi-test')
with open(path, 'w') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=['col1', 'col2'],
lineterminator='\n', delimiter=',',
quotechar='"')
writer.writeheader()
writer.writerow({'col1': 'ping', 'col2': 'pong'})
table_id = self.tables.create(name='some-table', file_path=path,
bucket_id='in.c-py-test-tables')
table_info = self.tables.detail(table_id)
self.assertEqual(table_id, table_info['id'])
self.tables.delete(table_id)
try:
self.tables.detail('some-totally-non-existent-table')
except exceptions.HTTPError as e:
if e.response.status_code != 404:
raise
def test_invalid_create(self):
try:
self.tables.detail('some-totally-non-existent-table')
except exceptions.HTTPError as e:
if e.response.status_code != 404:
raise
def test_import_table_incremental(self):
file, path = tempfile.mkstemp(prefix='sapi-test')
with open(path, 'w') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=['col1', 'col2'],
lineterminator='\n', delimiter=',',
quotechar='"')
writer.writeheader()
writer.writerow({'col1': 'ping', 'col2': 'pong'})
os.close(file)
table_id = self.tables.create(name='some-table', file_path=path,
bucket_id='in.c-py-test-tables')
table_info = self.tables.detail(table_id)
with self.subTest():
self.assertEqual(table_id, table_info['id'])
with self.subTest():
self.assertEqual(1, table_info['rowsCount'])
file, path = tempfile.mkstemp(prefix='sapi-test')
with open(path, 'w') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=['col1', 'col2'],
lineterminator='\n', delimiter=',',
quotechar='"')
writer.writeheader()
writer.writerow({'col1': 'foo', 'col2': 'bar'})
os.close(file)
self.tables.load(table_id=table_id, file_path=path,
is_incremental=True)
table_info = self.tables.detail(table_id)
with self.subTest():
self.assertEqual(table_id, table_info['id'])
with self.subTest():
self.assertEqual(2, table_info['rowsCount'])
def test_import_table_no_incremental(self):
file, path = tempfile.mkstemp(prefix='sapi-test')
with open(path, 'w') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=['col1', 'col2'],
lineterminator='\n', delimiter=',',
quotechar='"')
writer.writeheader()
writer.writerow({'col1': 'ping', 'col2': 'pong'})
os.close(file)
table_id = self.tables.create(name='some-table', file_path=path,
bucket_id='in.c-py-test-tables')
table_info = self.tables.detail(table_id)
with self.subTest():
self.assertEqual(table_id, table_info['id'])
with self.subTest():
self.assertEqual(1, table_info['rowsCount'])
file, path = tempfile.mkstemp(prefix='sapi-test')
with open(path, 'w') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=['col1', 'col2'],
lineterminator='\n', delimiter=',',
quotechar='"')
writer.writeheader()
writer.writerow({'col1': 'foo', 'col2': 'bar'})
os.close(file)
self.tables.load(table_id=table_id, file_path=path,
is_incremental=False)
table_info = self.tables.detail(table_id)
with self.subTest():
self.assertEqual(table_id, table_info['id'])
with self.subTest():
self.assertEqual(1, table_info['rowsCount'])
def test_table_preview(self):
file, path = tempfile.mkstemp(prefix='sapi-test')
with open(path, 'w') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=['col1', 'col2'],
lineterminator='\n', delimiter=',',
quotechar='"')
writer.writeheader()
writer.writerow({'col1': 'ping', 'col2': 'pong'})
writer.writerow({'col1': 'foo', 'col2': 'bar'})
os.close(file)
table_id = self.tables.create(name='some-table', file_path=path,
bucket_id='in.c-py-test-tables')
contents = self.tables.preview(table_id=table_id)
lines = contents.split('\n')
self.assertEqual(['', '"col1","col2"', '"foo","bar"', '"ping","pong"'],
sorted(lines))
def test_table_export(self):
file, path = tempfile.mkstemp(prefix='sapi-test')
with open(path, 'w') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=['col1', 'col2'],
lineterminator='\n', delimiter=',',
quotechar='"')
writer.writeheader()
writer.writerow({'col1': 'ping', 'col2': 'pong'})
writer.writerow({'col1': 'foo', 'col2': 'bar'})
os.close(file)
table_id = self.tables.create(name='some-table', file_path=path,
bucket_id='in.c-py-test-tables')
result = self.tables.export(table_id=table_id)
self.assertIsNotNone(result)
def test_table_export_file_plain(self):
file, path = tempfile.mkstemp(prefix='sapi-test')
with open(path, 'w') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=['col1', 'col2'],
lineterminator='\n', delimiter=',',
quotechar='"')
writer.writeheader()
writer.writerow({'col1': 'ping', 'col2': 'pong'})
writer.writerow({'col1': 'foo', 'col2': 'bar'})
os.close(file)
table_id = self.tables.create(name='some-table', file_path=path,
bucket_id='in.c-py-test-tables')
temp_path = tempfile.TemporaryDirectory()
local_path = self.tables.export_to_file(table_id=table_id,
path_name=temp_path.name,
is_gzip=False)
with open(local_path, mode='rt') as file:
lines = file.readlines()
self.assertEqual(['"col1","col2"\n', '"foo","bar"\n',
'"ping","pong"\n'],
sorted(lines))
def test_table_export_file_gzip(self):
file, path = tempfile.mkstemp(prefix='sapi-test')
with open(path, 'w') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=['col1', 'col2'],
lineterminator='\n', delimiter=',',
quotechar='"')
writer.writeheader()
writer.writerow({'col1': 'ping', 'col2': 'pong'})
writer.writerow({'col1': 'foo', 'col2': 'bar'})
os.close(file)
table_id = self.tables.create(name='some-table', file_path=path,
bucket_id='in.c-py-test-tables')
temp_path = tempfile.TemporaryDirectory()
local_path = self.tables.export_to_file(table_id=table_id,
path_name=temp_path.name,
is_gzip=True)
with open(local_path, mode='rt') as file:
lines = file.readlines()
self.assertEqual(['"col1","col2"\n', '"foo","bar"\n',
'"ping","pong"\n'],
sorted(lines))
def test_table_export_sliced(self):
file, path = tempfile.mkstemp(prefix='sapi-test')
with open(path, 'w') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=['col1', 'col2'],
lineterminator='\n', delimiter=',',
quotechar='"')
writer.writeheader()
writer.writerow({'col1': 'ping', 'col2': 'pong'})
os.close(file)
table_id = self.tables.create(name='some-table', file_path=path,
bucket_id='in.c-py-test-tables')
table_info = self.tables.detail(table_id)
with self.subTest():
self.assertEqual(table_id, table_info['id'])
with self.subTest():
self.assertEqual(1, table_info['rowsCount'])
file, path = tempfile.mkstemp(prefix='sapi-test')
with open(path, 'w') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=['col1', 'col2'],
lineterminator='\n', delimiter=',',
quotechar='"')
writer.writeheader()
writer.writerow({'col1': 'foo', 'col2': 'bar'})
os.close(file)
self.tables.load(table_id=table_id, file_path=path,
is_incremental=True)
temp_path = tempfile.TemporaryDirectory()
local_path = self.tables.export_to_file(table_id=table_id,
path_name=temp_path.name)
with open(local_path, mode='rt') as file:
lines = file.readlines()
self.assertEqual(['"col1","col2"\n', '"foo","bar"\n',
'"ping","pong"\n'],
sorted(lines))
def test_table_columns(self):
file, path = tempfile.mkstemp(prefix='sapi-test')
with open(path, 'w') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=['col1', 'col2', 'col3', 'col4'],
lineterminator='\n', delimiter=',',
quotechar='"')
writer.writeheader()
writer.writerow({'col1': 'ping', 'col2': 'pong', 'col3': 'king', 'col4': 'kong'})
os.close(file)
table_id = self.tables.create(name='some-table', file_path=path, bucket_id='in.c-py-test-tables')
temp_path = tempfile.TemporaryDirectory()
local_path = self.tables.export_to_file(table_id=table_id,
path_name=temp_path.name,
is_gzip=False,
columns=['col3', 'col2'])
with open(local_path, mode='rt') as file:
lines = file.readlines()
self.assertEqual(['"col3","col2"\n', '"king","pong"\n'], sorted(lines))
|
StarcoderdataPython
|
3244734
|
<filename>pyouroboros/__init__.py
VERSION = "1.4.1"
BRANCH = "master"
|
StarcoderdataPython
|
1668742
|
import csv
import json
import logging
import os
import re
from collections import OrderedDict
from io import BytesIO, StringIO
from zipfile import ZipFile
from urllib.parse import urljoin
import json_merge_patch
import requests
from ocdsextensionregistry import ExtensionRegistry
from ocdsdocumentationsupport.models import Codelist
logger = logging.getLogger('ocdsdocumentationsupport')
def _json_loads(data):
"""
Loads JSON data, preserving order.
"""
return json.loads(data, object_pairs_hook=OrderedDict)
class ProfileBuilder:
def __init__(self, standard_tag, extension_versions, registry_base_url=None, schema_base_url=None):
"""
Accepts an OCDS version and a dictionary of extension identifiers and versions, and initializes a reader of the
extension registry.
"""
self.standard_tag = standard_tag
self.extension_versions = extension_versions
self._file_cache = {}
self.schema_base_url = schema_base_url
# Allows setting the registry URL to e.g. a pull request, when working on a profile.
if not registry_base_url:
registry_base_url = 'https://raw.githubusercontent.com/open-contracting/extension_registry/master/'
self.registry = ExtensionRegistry(registry_base_url + 'extension_versions.csv')
def extensions(self):
"""
Returns the matching extension versions from the registry.
"""
for identifier, version in self.extension_versions.items():
yield self.registry.get(id=identifier, version=version)
def release_schema_patch(self):
"""
Returns the consolidated release schema patch.
"""
profile_patch = OrderedDict()
# Replaces `null` with sentinel values, to preserve the null'ing of fields by extensions in the final patch.
for extension in self.extensions():
data = re.sub(r':\s*null\b', ': "REPLACE_WITH_NULL"', extension.remote('release-schema.json'))
json_merge_patch.merge(profile_patch, _json_loads(data))
return _json_loads(json.dumps(profile_patch).replace('"REPLACE_WITH_NULL"', 'null'))
def patched_release_schema(self):
"""
Returns the patched release schema.
"""
content = self.get_standard_file_contents('release-schema.json')
patched = json_merge_patch.merge(_json_loads(content), self.release_schema_patch())
if self.schema_base_url:
patched['id'] = urljoin(self.schema_base_url, 'release-schema.json')
return patched
def release_package_schema(self):
"""
Returns a release package schema. If `schema_base_url` was provided, updates schema URLs.
"""
data = _json_loads(self.get_standard_file_contents('release-package-schema.json'))
if self.schema_base_url:
data['id'] = urljoin(self.schema_base_url, 'release-package-schema.json')
data['properties']['releases']['items']['$ref'] = urljoin(self.schema_base_url, 'release-schema.json')
return data
def standard_codelists(self):
"""
Returns the standard's codelists as Codelist objects.
"""
codelists = OrderedDict()
# Populate the file cache.
self.get_standard_file_contents('release-schema.json')
# This method shouldn't need to know about `_file_cache`.
for path, content in self._file_cache.items():
name = os.path.basename(path)
if 'codelists' in path.split(os.sep) and name:
codelists[name] = Codelist(name)
codelists[name].extend(csv.DictReader(StringIO(content)), 'OCDS Core')
return list(codelists.values())
def extension_codelists(self):
"""
Returns the extensions' codelists as Codelist objects.
The extensions' codelists may be new, or may add codes to (+name.csv), remove codes from (-name.csv) or replace
(name.csv) the codelists of the standard or other extensions.
Codelist additions and removals are merged across extensions. If new codelists or codelist replacements differ
across extensions, an error is raised.
"""
codelists = OrderedDict()
# Keep the original content of codelists, to compare across extensions.
originals = {}
for extension in self.extensions():
# We use the "codelists" field in extension.json (which standard-maintenance-scripts validates). An
# extension is not guaranteed to offer a download URL, which is the only other way to get codelists.
for name in extension.metadata.get('codelists', []):
content = extension.remote('codelists/' + name)
if name not in codelists:
codelists[name] = Codelist(name)
originals[name] = content
elif not codelists[name].patch:
assert originals[name] == content, 'codelist {} differs across extensions'.format(name)
continue
codelists[name].extend(csv.DictReader(StringIO(content)), extension.metadata['name']['en'])
# If a codelist replacement (name.csv) is consistent with additions (+name.csv) and removals (-name.csv), the
# latter should be removed. In other words, the expectations are that:
#
# * A codelist replacement shouldn't omit added codes.
# * A codelist replacement shouldn't include removed codes.
# * If codes are added after a codelist is replaced, this should result in duplicate codes.
# * If codes are removed after a codelist is replaced, this should result in no change.
#
# If these expectations are not met, an error is raised. As such, profile authors only have to handle cases
# where codelist modifications are inconsistent across extensions.
for codelist in list(codelists.values()):
basename = codelist.basename
if codelist.patch and basename in codelists:
name = codelist.name
codes = codelists[basename].codes
if codelist.addend:
for row in codelist:
code = row['Code']
assert code in codes, '{} added by {}, but not in {}'.format(code, name, basename)
logger.info('{0} has the codes added by {1} - ignoring {1}'.format(basename, name))
else:
for row in codelist:
code = row['Code']
assert code not in codes, '{} removed by {}, but in {}'.format(code, name, basename)
logger.info('{0} has no codes removed by {1} - ignoring {1}'.format(basename, name))
del codelists[name]
return list(codelists.values())
def patched_codelists(self):
"""
Returns patched and new codelists as Codelist objects.
"""
codelists = OrderedDict()
for codelist in self.standard_codelists():
codelists[codelist.name] = codelist
for codelist in self.extension_codelists():
if codelist.patch:
basename = codelist.basename
if codelist.addend:
# Add the rows.
codelists[basename].rows.extend(codelist.rows)
# Note that the rows may not all have the same columns, but DictWriter can handle this.
else:
# Remove the codes. Multiple extensions can remove the same codes.
removed = codelist.codes
codelists[basename].rows = [row for row in codelists[basename] if row['Code'] not in removed]
else:
# Set or replace the rows.
codelists[codelist.name] = codelist
return list(codelists.values())
def get_standard_file_contents(self, basename):
"""
Returns the contents of the file within the standard.
Downloads the given version of the standard, and caches the contents of files in the schema/ directory.
"""
if not self._file_cache:
url = 'https://codeload.github.com/open-contracting/standard/zip/' + self.standard_tag
response = requests.get(url)
response.raise_for_status()
zipfile = ZipFile(BytesIO(response.content))
names = zipfile.namelist()
path = 'standard/schema/'
start = len(names[0] + path)
for name in names[1:]:
if path in name:
self._file_cache[name[start:]] = zipfile.read(name).decode('utf-8')
return self._file_cache[basename]
|
StarcoderdataPython
|
4800957
|
<gh_stars>1-10
"""Declare the models related to Ashley ."""
import uuid
from django.db import models
from machina.apps.forum.abstract_models import AbstractForum as MachinaAbstractForum
from machina.core.db.models import get_model, model_factory
LTIContext = get_model("ashley", "LTIContext") # pylint: disable=C0103
class AbstractForum(MachinaAbstractForum):
"""
Forum model for Ashley.
It is based on django-machina's Forum model and adds fields to be able to
map a LTI request to a forum.
"""
# pylint: disable=all
# Pylint is disabled because it crashes while testing Foreign keys declared in the
# django-machina's parent abstract model. This is a known issue in pylint-django with
# foreign keys models referenced by their name.
# (See https://github.com/PyCQA/pylint-django#known-issues )
lti_id = models.UUIDField(
null=False, default=uuid.uuid4, editable=False, unique=False, db_index=True
)
lti_contexts = models.ManyToManyField(LTIContext)
archived = models.BooleanField(default=False)
class Meta(MachinaAbstractForum.Meta):
abstract = True
Forum = model_factory(AbstractForum)
from machina.apps.forum.abstract_models import * # noqa isort:skip
|
StarcoderdataPython
|
3355192
|
<filename>blueapps/utils/sites/open/__init__.py
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2020 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import logging
from blueapps.account import get_user_model
logger = logging.getLogger('blueapps')
def get_component_client_common_args():
"""
获取ComponentClient需要的common_args
@return:
{
bk_username = 'xxx'
}
@rtype: dict
"""
try:
last_login_user = \
get_user_model().objects.all().order_by("-last_login")[0]
except IndexError:
logger.exception("There is not a last_login_user")
raise IndexError("There is not a last_login_user")
username = last_login_user.username
return dict(bk_username=username)
|
StarcoderdataPython
|
1793327
|
<reponame>evinism/littlebaker
from tinybaker import Transform, InputTag, OutputTag
from pickle import load, dumps
from base64 import b64encode
def test_data_uri_text():
class T(Transform):
foo = InputTag("foo")
bar = OutputTag("bar")
def script(self):
with self.foo.open() as f:
data = f.read()
with self.bar.open() as f:
f.write(data)
bar_path = "/tmp/datauri"
T(
input_paths={"foo": "data://Zm9vIGNvbnRlbnRz"},
output_paths={"bar": bar_path},
overwrite=True,
).run()
with open(bar_path, "r") as f:
assert f.read() == "foo contents"
def test_data_uri_utf8():
class T(Transform):
foo = InputTag("foo")
bar = OutputTag("bar")
def script(self):
with self.foo.open() as f:
data = f.read()
with self.bar.open() as f:
f.write(data)
bar_path = "/tmp/datauri2"
T(
input_paths={"foo": "data://2YjZitmD2YrYqNmK2K/ZitinINin2YTYudix2KjZitip"},
output_paths={"bar": bar_path},
overwrite=True,
).run()
with open(bar_path, "r") as f:
assert f.read() == "ويكيبيديا العربية"
def test_data_binary():
class T(Transform):
foo = InputTag("foo")
bar = OutputTag("bar")
def script(self):
with self.foo.openbin() as f:
data = load(f)
assert data == {"hi": str, "bye": [int, test_data_binary]}
with self.bar.open() as f:
f.write("success")
bar_path = "/tmp/datauri"
obj = {"hi": str, "bye": [int, test_data_binary]}
uri_data = b64encode(dumps(obj))
T(
input_paths={"foo": "data://{}".format(uri_data.decode("ascii"))},
output_paths={"bar": "/dev/null"},
overwrite=True,
).run()
|
StarcoderdataPython
|
1633465
|
<reponame>peterbe/
import json
import getpass
import urllib.parse
import click
import requests
from gg.utils import error_out, success_out, info_out
from gg.state import read, update, remove
from gg.main import cli, pass_config
BUGZILLA_URL = "https://bugzilla.mozilla.org"
@cli.group()
@click.option(
"-u",
"--bugzilla-url",
default=BUGZILLA_URL,
help=f"URL to Bugzilla instance (default: {BUGZILLA_URL})",
)
@pass_config
def bugzilla(config, bugzilla_url):
"""General tool for connecting to Bugzilla. The default URL
is that for bugzilla.mozilla.org but you can override that.
Once you're signed in you can use those credentials to automatically
fetch bug summaries even of private bugs.
"""
config.bugzilla_url = bugzilla_url
@bugzilla.command()
@click.argument("api_key", default="")
@pass_config
def login(config, api_key=""):
"""Store your Bugzilla API Key"""
if not api_key:
info_out(
"If you don't have an API Key, go to:\n"
"https://bugzilla.mozilla.org/userprefs.cgi?tab=apikey\n"
)
api_key = getpass.getpass("API Key: ")
# Before we store it, let's test it.
url = urllib.parse.urljoin(config.bugzilla_url, "/rest/whoami")
assert url.startswith("https://"), url
response = requests.get(url, params={"api_key": api_key})
if response.status_code == 200:
if response.json().get("error"):
error_out(f"Failed - {response.json()}")
else:
update(
config.configfile,
{
"BUGZILLA": {
"bugzilla_url": config.bugzilla_url,
"api_key": api_key,
# "login": login,
}
},
)
success_out("Yay! It worked!")
else:
error_out(f"Failed - {response.status_code} ({response.json()})")
@bugzilla.command()
@pass_config
def logout(config):
"""Remove and forget your Bugzilla credentials"""
state = read(config.configfile)
if state.get("BUGZILLA"):
remove(config.configfile, "BUGZILLA")
success_out("Forgotten")
else:
error_out("No stored Bugzilla credentials")
def get_summary(config, bugnumber):
params = {"ids": bugnumber, "include_fields": "summary,id"}
# If this function is called from a plugin, we don't have
# config.bugzilla_url this time.
base_url = getattr(config, "bugzilla_url", BUGZILLA_URL)
state = read(config.configfile)
credentials = state.get("BUGZILLA")
if credentials:
# cool! let's use that
base_url = credentials["bugzilla_url"]
params["api_key"] = credentials["api_key"]
url = urllib.parse.urljoin(base_url, "/rest/bug/")
assert url.startswith("https://"), url
response = requests.get(url, params=params)
response.raise_for_status()
if response.status_code == 200:
data = response.json()
bug = data["bugs"][0]
bug_url = urllib.parse.urljoin(base_url, f"/show_bug.cgi?id={bug['id']}")
return bug["summary"], bug_url
return None, None
@bugzilla.command()
@click.option(
"-b", "--bugnumber", type=int, help="Optionally test fetching a specific bug"
)
@pass_config
def test(config, bugnumber):
"""Test your saved Bugzilla API Key."""
state = read(config.configfile)
credentials = state.get("BUGZILLA")
if not credentials:
error_out("No API Key saved. Run: gg bugzilla login")
if config.verbose:
info_out(f"Using: {credentials['bugzilla_url']}")
if bugnumber:
summary, _ = get_summary(config, bugnumber)
if summary:
info_out("It worked!")
success_out(summary)
else:
error_out("Unable to fetch")
else:
url = urllib.parse.urljoin(credentials["bugzilla_url"], "/rest/whoami")
assert url.startswith("https://"), url
response = requests.get(url, params={"api_key": credentials["api_key"]})
if response.status_code == 200:
if response.json().get("error"):
error_out(f"Failed! - {response.json()}")
else:
success_out(json.dumps(response.json(), indent=2))
else:
error_out(f"Failed to query - {response.status_code} ({response.json()})")
|
StarcoderdataPython
|
1625125
|
# ==============================================================================
# Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
# ==============================================================================
"""Openvino Tensorflow installation test
"""
from __future__ import print_function
import tensorflow as tf
import openvino_tensorflow
if __name__ == '__main__':
print("TensorFlow version: ", tf.version.GIT_VERSION, tf.version.VERSION)
|
StarcoderdataPython
|
3354305
|
<gh_stars>0
import json
from unittest.mock import Mock, patch
from freezegun import freeze_time
from satellite.aliases import AliasGeneratorType, AliasStoreType
from satellite.aliases.manager import redact
from satellite.aliases.store import AliasStore
from .base import BaseHandlerTestCase
@freeze_time('2020-11-01')
class TestAliasesHandler(BaseHandlerTestCase):
def test_post_ok(self):
uuid_patch = patch(
'satellite.aliases.manager.uuid.uuid4',
Mock(side_effect=[
'c20b81b0-d90d-42d1-bf6d-eea5e6981196',
'884a0c8e-de04-46de-945a-c77c3acf783e',
])
)
uuid_patch.start()
self.addCleanup(uuid_patch.stop)
response = self.fetch(
self.get_url('/aliases'),
method='POST',
body=json.dumps({
'data': [
{'value': 123321, 'format': 'UUID'},
{'value': 'abccba', 'format': 'UUID'},
]
}),
)
self.assertEqual(response.code, 200, response.body)
self.assertMatchSnapshot(json.loads(response.body))
store = AliasStore()
self.assertEqual(len(store.get_by_value('123321')), 1)
self.assertEqual(len(store.get_by_value('abccba')), 1)
def test_get_ok(self):
uuid_patch = patch(
'satellite.aliases.manager.uuid.uuid4',
Mock(return_value='29e34c80-c9f2-4c59-97a5-355e1ed3018f'),
)
uuid_patch.start()
self.addCleanup(uuid_patch.stop)
alias = redact(
'123321',
generator_type=AliasGeneratorType.UUID,
store_type=AliasStoreType.PERSISTENT,
)
response = self.fetch(self.get_url(f'/aliases?q={alias.public_alias}'))
self.assertEqual(response.code, 200, response.body)
self.assertMatchSnapshot(json.loads(response.body))
def test_get_unknown_alias(self):
uuid_patch = patch(
'satellite.aliases.manager.uuid.uuid4',
Mock(return_value='b93104db-67c3-4c11-9131-d4955e740a19'),
)
uuid_patch.start()
self.addCleanup(uuid_patch.stop)
alias = redact(
'123321',
generator_type=AliasGeneratorType.UUID,
store_type=AliasStoreType.PERSISTENT,
)
response = self.fetch(self.get_url(
f'/aliases?q={alias.public_alias},tok_tas_kgq94RpcPrAMSHJWh7o7P6',
))
self.assertEqual(response.code, 200, response.body)
self.assertMatchSnapshot(json.loads(response.body))
def test_get_missing_param(self):
response = self.fetch(self.get_url('/aliases'))
self.assertEqual(response.code, 400, response.body)
@freeze_time('2020-11-01')
class TestAliasHandler(BaseHandlerTestCase):
def test_get_ok(self):
uuid_patch = patch(
'satellite.aliases.manager.uuid.uuid4',
Mock(return_value='7612e3e6-0a62-4ed8-a329-403bd26ce538'),
)
uuid_patch.start()
self.addCleanup(uuid_patch.stop)
alias = redact(
'123321',
generator_type=AliasGeneratorType.UUID,
store_type=AliasStoreType.PERSISTENT,
)
response = self.fetch(self.get_url(f'/aliases/{alias.public_alias}'))
self.assertEqual(response.code, 200, response.body)
self.assertMatchSnapshot(json.loads(response.body))
def test_get_unknown_alias(self):
response = self.fetch(self.get_url(
'/aliases/tok_tas_kgq94RpcPrAMSHJWh7o7P6',
))
self.assertEqual(response.code, 400, response.body)
self.assertMatchSnapshot(json.loads(response.body))
|
StarcoderdataPython
|
3280841
|
<reponame>toastisme/dials
from math import pi
import wx
from annlib_ext import AnnAdaptorSelfInclude
from wx.lib.agw import floatspin
import gltbx
import gltbx.gl as gl
import libtbx.phil
import wxtbx.utils
from libtbx import Auto
from scitbx.array_family import flex
from scitbx.math import minimum_covering_sphere
from wxtbx.segmentedctrl import (
SEGBTN_HORIZONTAL,
SegmentedRadioControl,
SegmentedToggleControl,
)
from dials.util import wx_viewer
from dials.util.reciprocal_lattice import Render3d
phil_scope = libtbx.phil.parse(
"""
include scope dials.util.reciprocal_lattice.phil_scope
show_rotation_axis = False
.type = bool
show_beam_vector = False
.type = bool
show_reciprocal_cell = False
.type = bool
label_nearest_point = False
.type = bool
marker_size = Auto
.type = int(value_min=1)
autospin = False
.type = bool
model_view_matrix = None
.type = floats(size=16)
filter_by_panel = None
.type = int
""",
process_includes=True,
)
# WX3 - WX4 compatibility
def _rewrite_event(unbound):
"""Decorator to intercept the event and add missing instance methods"""
def _wrapp(self, event):
event.GetPositionTuple = event.GetPosition
return unbound(self, event)
return _wrapp
# HACK: Monkeypatch wxtbx so that we don't use old interfaces
wxtbx.segmentedctrl.SegmentedControl.HitTest = _rewrite_event(
wxtbx.segmentedctrl.SegmentedControl.HitTest
)
wxtbx.segmentedctrl.SegmentedControl.OnMotion = _rewrite_event(
wxtbx.segmentedctrl.SegmentedControl.OnMotion
)
class ReciprocalLatticeViewer(wx.Frame, Render3d):
def __init__(self, parent, id, title, size, settings=None, *args, **kwds):
wx.Frame.__init__(self, parent, id, title, size=size, *args, **kwds)
Render3d.__init__(self, settings=settings)
self.parent = self.GetParent()
self.statusbar = self.CreateStatusBar()
self.sizer = wx.BoxSizer(wx.HORIZONTAL)
self.create_settings_panel()
self.sizer.Add(self.settings_panel, 0, wx.EXPAND)
self.create_viewer_panel()
self.sizer.Add(self.viewer, 1, wx.EXPAND | wx.ALL)
self.SetSizerAndFit(self.sizer)
self.SetMinSize(self.settings_panel.GetSize())
self.Bind(wx.EVT_CLOSE, self.OnClose, self)
self.Bind(wx.EVT_WINDOW_DESTROY, self.OnDestroy, self)
self.Bind(wx.EVT_ACTIVATE, self.OnActive)
self.viewer.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)
self.viewer.SetFocus()
def load_models(self, experiments, reflections):
Render3d.load_models(self, experiments, reflections)
if self.settings.beam_centre is not None:
self.settings_panel.beam_fast_ctrl.SetValue(self.settings.beam_centre[0])
self.settings_panel.beam_slow_ctrl.SetValue(self.settings.beam_centre[1])
if self.settings.marker_size is Auto:
max_radius = max(self.reflections["rlp"].norms())
volume = 4 / 3 * pi * max_radius ** 3
density = len(self.reflections) / volume
# Set marker size to between 5 and 50 depending on density, where
# 1000 < density < 20000 ==> 50 < marker_size < 5
marker_size = (-45 / 19000) * density + (5 + 900 / 19)
marker_size = max(marker_size, 5)
marker_size = min(marker_size, 50)
self.settings.marker_size = marker_size
self.settings_panel.marker_size_ctrl.SetValue(self.settings.marker_size)
self.settings_panel.add_experiments_buttons()
self.set_detector_panel_numbers()
def OnActive(self, event):
if self.IsShown() and type(self.viewer).__name__ != "_wxPyDeadObject":
self.viewer.Refresh()
def OnClose(self, event):
self.Unbind(wx.EVT_ACTIVATE)
self.Destroy()
event.Skip()
def OnDestroy(self, event):
if self.parent is not None:
self.parent.viewer = None
event.Skip()
def OnKeyDown(self, event):
key = event.GetUnicodeKey()
if key == wx.WXK_NONE:
key = event.GetKeyCode()
dxs = {wx.WXK_LEFT: -1, wx.WXK_RIGHT: +1, wx.WXK_UP: 0, wx.WXK_DOWN: 0}
dys = {wx.WXK_LEFT: 0, wx.WXK_RIGHT: 0, wx.WXK_UP: +1, wx.WXK_DOWN: -1}
if key in dxs:
dx = dxs[key]
dy = dys[key]
if event.ShiftDown():
scale = 0.1
else:
scale = 1.0
self.do_Step(dx, dy, scale)
def do_Step(self, dx, dy, scale):
v = self.viewer
rc = v.rotation_center
gl.glMatrixMode(gl.GL_MODELVIEW)
gltbx.util.rotate_object_about_eye_x_and_y(
scale, rc[0], rc[1], rc[2], dx, dy, 0, 0
)
v.OnRedraw()
def create_viewer_panel(self):
if self.settings.black_background:
background_rgb = (0, 0, 0)
else:
background_rgb = (255, 255, 255)
self.viewer = RLVWindow(
settings=self.settings,
parent=self,
size=(800, 600),
background_rgb=background_rgb,
)
def create_settings_panel(self):
self.settings_panel = SettingsWindow(self, -1, style=wx.RAISED_BORDER)
def set_points(self):
Render3d.set_points(self)
self.settings_panel.d_min_ctrl.SetValue(self.settings.d_min)
self.settings_panel.z_min_ctrl.SetValue(self.settings.z_min)
self.settings_panel.z_max_ctrl.SetValue(self.settings.z_max)
self.settings_panel.n_min_ctrl.SetValue(self.settings.n_min)
self.settings_panel.n_max_ctrl.SetValue(self.settings.n_max)
if self.settings.partiality_min is not None:
self.settings_panel.partiality_min_ctrl.SetValue(
self.settings.partiality_min
)
if self.settings.partiality_max is not None:
self.settings_panel.partiality_max_ctrl.SetValue(
self.settings.partiality_max
)
def set_detector_panel_numbers(self):
panel_numbers = sorted(set(self.reflections["panel"]))
panel_numbers = list(map(str, panel_numbers))
self.settings_panel.filter_by_panel_ctrl.SetItems(panel_numbers)
def update_settings(self, *args, **kwds):
if self.settings.beam_centre_panel and self.settings.beam_centre:
self.set_beam_centre(
self.settings.beam_centre_panel, self.settings.beam_centre
)
self.map_points_to_reciprocal_space()
self.set_points()
self.viewer.update_settings(*args, **kwds)
def update_statusbar(self):
model_view_matrix = gltbx.util.get_gl_modelview_matrix()
txt = (
"Model view matrix: "
+ "["
+ ", ".join("%.4f" % m for m in model_view_matrix)
+ "]"
)
self.statusbar.SetStatusText(txt)
class SettingsWindow(wxtbx.utils.SettingsPanel):
def __init__(self, *args, **kwds):
wxtbx.utils.SettingsPanel.__init__(self, *args, **kwds)
self.Bind(wx.EVT_CHAR, self.OnChar)
def OnChar(self, event):
self.GetParent().viewer.OnChar(event)
def add_controls(self):
# d_min control
self.d_min_ctrl = floatspin.FloatSpin(
parent=self, increment=0.05, min_val=0, digits=2
)
self.d_min_ctrl.Bind(wx.EVT_SET_FOCUS, lambda evt: None)
if wx.VERSION >= (2, 9): # XXX FloatSpin bug in 2.9.2/wxOSX_Cocoa
self.d_min_ctrl.SetBackgroundColour(self.GetBackgroundColour())
box = wx.BoxSizer(wx.HORIZONTAL)
self.panel_sizer.Add(box)
label = wx.StaticText(self, -1, "High resolution:")
box.Add(label, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
box.Add(self.d_min_ctrl, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.Bind(floatspin.EVT_FLOATSPIN, self.OnChangeSettings, self.d_min_ctrl)
self.z_min_ctrl = floatspin.FloatSpin(
parent=self, increment=1, min_val=0, digits=0
)
self.z_min_ctrl.Bind(wx.EVT_SET_FOCUS, lambda evt: None)
if wx.VERSION >= (2, 9): # XXX FloatSpin bug in 2.9.2/wxOSX_Cocoa
self.z_min_ctrl.SetBackgroundColour(self.GetBackgroundColour())
box = wx.BoxSizer(wx.HORIZONTAL)
self.panel_sizer.Add(box)
label = wx.StaticText(self, -1, "Min Z")
box.Add(label, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
box.Add(self.z_min_ctrl, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.Bind(floatspin.EVT_FLOATSPIN, self.OnChangeSettings, self.z_min_ctrl)
self.z_max_ctrl = floatspin.FloatSpin(
parent=self, increment=1, min_val=0, digits=0
)
self.z_max_ctrl.Bind(wx.EVT_SET_FOCUS, lambda evt: None)
if wx.VERSION >= (2, 9): # XXX FloatSpin bug in 2.9.2/wxOSX_Cocoa
self.z_max_ctrl.SetBackgroundColour(self.GetBackgroundColour())
box = wx.BoxSizer(wx.HORIZONTAL)
self.panel_sizer.Add(box)
label = wx.StaticText(self, -1, "Max Z")
box.Add(label, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
box.Add(self.z_max_ctrl, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.Bind(floatspin.EVT_FLOATSPIN, self.OnChangeSettings, self.z_max_ctrl)
# Control for spot size (utility depends on n_signal column in reflection
# file - will be ignored if not in file
self.n_min_ctrl = floatspin.FloatSpin(
parent=self, increment=1, min_val=0, digits=0
)
self.n_min_ctrl.Bind(wx.EVT_SET_FOCUS, lambda evt: None)
if wx.VERSION >= (2, 9): # XXX FloatSpin bug in 2.9.2/wxOSX_Cocoa
self.n_min_ctrl.SetBackgroundColour(self.GetBackgroundColour())
box = wx.BoxSizer(wx.HORIZONTAL)
self.panel_sizer.Add(box)
label = wx.StaticText(self, -1, "Min Pixels")
box.Add(label, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
box.Add(self.n_min_ctrl, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.Bind(floatspin.EVT_FLOATSPIN, self.OnChangeSettings, self.n_min_ctrl)
self.n_max_ctrl = floatspin.FloatSpin(
parent=self, increment=1, min_val=0, digits=0
)
self.n_max_ctrl.Bind(wx.EVT_SET_FOCUS, lambda evt: None)
if wx.VERSION >= (2, 9): # XXX FloatSpin bug in 2.9.2/wxOSX_Cocoa
self.n_max_ctrl.SetBackgroundColour(self.GetBackgroundColour())
box = wx.BoxSizer(wx.HORIZONTAL)
self.panel_sizer.Add(box)
label = wx.StaticText(self, -1, "Max Pixels")
box.Add(label, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
box.Add(self.n_max_ctrl, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.Bind(floatspin.EVT_FLOATSPIN, self.OnChangeSettings, self.n_max_ctrl)
# end new control
self.partiality_min_ctrl = floatspin.FloatSpin(
parent=self, increment=0.01, digits=3, min_val=0, max_val=1
)
self.partiality_min_ctrl.Bind(wx.EVT_SET_FOCUS, lambda evt: None)
if wx.VERSION >= (2, 9): # XXX FloatSpin bug in 2.9.2/wxOSX_Cocoa
self.partiality_min_ctrl.SetBackgroundColour(self.GetBackgroundColour())
box = wx.BoxSizer(wx.HORIZONTAL)
self.panel_sizer.Add(box)
label = wx.StaticText(self, -1, "Min partiality")
box.Add(label, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
box.Add(self.partiality_min_ctrl, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.Bind(
floatspin.EVT_FLOATSPIN, self.OnChangeSettings, self.partiality_min_ctrl
)
self.partiality_max_ctrl = floatspin.FloatSpin(
parent=self, increment=0.01, digits=3, min_val=0, max_val=1
)
self.partiality_max_ctrl.Bind(wx.EVT_SET_FOCUS, lambda evt: None)
if wx.VERSION >= (2, 9): # XXX FloatSpin bug in 2.9.2/wxOSX_Cocoa
self.partiality_max_ctrl.SetBackgroundColour(self.GetBackgroundColour())
box = wx.BoxSizer(wx.HORIZONTAL)
self.panel_sizer.Add(box)
label = wx.StaticText(self, -1, "Max partiality")
box.Add(label, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
box.Add(self.partiality_max_ctrl, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.Bind(
floatspin.EVT_FLOATSPIN, self.OnChangeSettings, self.partiality_max_ctrl
)
self.filter_by_panel_ctrl = wx.CheckListBox(parent=self, choices=["1"])
box = wx.BoxSizer(wx.HORIZONTAL)
self.panel_sizer.Add(box)
label = wx.StaticText(self, -1, "Filter by panel")
box.Add(label, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
box.Add(self.filter_by_panel_ctrl, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.filter_by_panel_ctrl.Bind(wx.EVT_CHECKLISTBOX, self.OnChangeSettings)
ctrls = self.create_controls(
setting="show_rotation_axis", label="Show rotation axis"
)
self.panel_sizer.Add(ctrls[0], 0, wx.ALL, 5)
ctrls = self.create_controls(
setting="show_beam_vector", label="Show beam vector"
)
self.panel_sizer.Add(ctrls[0], 0, wx.ALL, 5)
ctrls = self.create_controls(
setting="show_reciprocal_cell", label="Show reciprocal cell"
)
self.panel_sizer.Add(ctrls[0], 0, wx.ALL, 5)
ctrls = self.create_controls(
setting="label_nearest_point", label="Label nearest point"
)
self.panel_sizer.Add(ctrls[0], 0, wx.ALL, 5)
self.reverse_phi_ctrl = self.create_controls(
setting="reverse_phi", label="Invert rotation axis"
)[0]
self.panel_sizer.Add(self.reverse_phi_ctrl, 0, wx.ALL, 5)
self.Bind(wx.EVT_CHECKBOX, self.OnChangeSettings, self.reverse_phi_ctrl)
self.crystal_frame_tooltip = wx.ToolTip(
"Show the reciprocal lattice(s) in the crystal rather than the laboratory frame"
)
self.crystal_frame_ctrl = self.create_controls(
setting="crystal_frame", label="Show in crystal frame"
)[0]
self.crystal_frame_ctrl.SetToolTip(self.crystal_frame_tooltip)
self.panel_sizer.Add(self.crystal_frame_ctrl, 0, wx.ALL, 5)
self.Bind(wx.EVT_CHECKBOX, self.OnChangeSettings, self.crystal_frame_ctrl)
self.beam_panel_ctrl = floatspin.FloatSpin(
parent=self, min_val=0, increment=1, digits=0
)
self.beam_panel_ctrl.Bind(wx.EVT_SET_FOCUS, lambda evt: None)
if wx.VERSION >= (2, 9): # XXX FloatSpin bug in 2.9.2/wxOSX_Cocoa
self.beam_panel_ctrl.SetBackgroundColour(self.GetBackgroundColour())
box = wx.BoxSizer(wx.HORIZONTAL)
self.panel_sizer.Add(box)
label = wx.StaticText(self, -1, "Beam centre panel")
box.Add(label, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
box.Add(self.beam_panel_ctrl, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.Bind(floatspin.EVT_FLOATSPIN, self.OnChangeSettings, self.beam_panel_ctrl)
self.beam_fast_ctrl = floatspin.FloatSpin(parent=self, increment=0.01, digits=2)
self.beam_fast_ctrl.Bind(wx.EVT_SET_FOCUS, lambda evt: None)
if wx.VERSION >= (2, 9): # XXX FloatSpin bug in 2.9.2/wxOSX_Cocoa
self.beam_fast_ctrl.SetBackgroundColour(self.GetBackgroundColour())
box = wx.BoxSizer(wx.HORIZONTAL)
self.panel_sizer.Add(box)
label = wx.StaticText(self, -1, "Beam centre (mm)")
box.Add(label, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
box.Add(self.beam_fast_ctrl, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.Bind(floatspin.EVT_FLOATSPIN, self.OnChangeSettings, self.beam_fast_ctrl)
self.beam_slow_ctrl = floatspin.FloatSpin(parent=self, increment=0.01, digits=2)
self.beam_slow_ctrl.Bind(wx.EVT_SET_FOCUS, lambda evt: None)
if wx.VERSION >= (2, 9): # XXX FloatSpin bug in 2.9.2/wxOSX_Cocoa
self.beam_slow_ctrl.SetBackgroundColour(self.GetBackgroundColour())
box.Add(self.beam_slow_ctrl, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.Bind(floatspin.EVT_FLOATSPIN, self.OnChangeSettings, self.beam_slow_ctrl)
self.marker_size_ctrl = floatspin.FloatSpin(
parent=self, increment=1, digits=0, min_val=1
)
self.marker_size_ctrl.Bind(wx.EVT_SET_FOCUS, lambda evt: None)
if wx.VERSION >= (2, 9): # XXX FloatSpin bug in 2.9.2/wxOSX_Cocoa
self.marker_size_ctrl.SetBackgroundColour(self.GetBackgroundColour())
box = wx.BoxSizer(wx.HORIZONTAL)
self.panel_sizer.Add(box)
label = wx.StaticText(self, -1, "Marker size:")
box.Add(label, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
box.Add(self.marker_size_ctrl, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.Bind(floatspin.EVT_FLOATSPIN, self.OnChangeSettings, self.marker_size_ctrl)
self.btn = SegmentedRadioControl(self, style=SEGBTN_HORIZONTAL)
self.btn.AddSegment("all")
self.btn.AddSegment("indexed")
self.btn.AddSegment("unindexed")
self.btn.AddSegment("integrated")
self.btn.SetSelection(
["all", "indexed", "unindexed", "integrated"].index(self.settings.display)
)
self.Bind(wx.EVT_RADIOBUTTON, self.OnChangeSettings, self.btn)
self.GetSizer().Add(self.btn, 0, wx.ALL, 5)
self.outlier_btn = SegmentedRadioControl(self, style=SEGBTN_HORIZONTAL)
self.outlier_btn.AddSegment("all")
self.outlier_btn.AddSegment("inliers")
self.outlier_btn.AddSegment("outliers")
self.outlier_btn.SetSelection(
[None, "inliers", "outliers"].index(self.settings.outlier_display)
)
self.Bind(wx.EVT_RADIOBUTTON, self.OnChangeSettings, self.outlier_btn)
self.GetSizer().Add(self.outlier_btn, 0, wx.ALL, 5)
def add_value_widgets(self, sizer):
sizer.Add(
wx.StaticText(self.panel, -1, "Value:"),
0,
wx.ALL | wx.ALIGN_CENTER_VERTICAL,
5,
)
self.value_info = wx.TextCtrl(
self.panel, -1, size=(80, -1), style=wx.TE_READONLY
)
sizer.Add(self.value_info, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
def add_experiments_buttons(self):
n = flex.max(self.parent.reflections_input["id"])
if n <= 0:
self.expt_btn = None
return
box = wx.BoxSizer(wx.VERTICAL)
self.panel_sizer.Add(box)
label = wx.StaticText(self, -1, "Experiment ids:")
box.Add(label, 0, wx.ALL, 5)
self.expt_btn = SegmentedToggleControl(self, style=SEGBTN_HORIZONTAL)
for i in range(-1, n + 1):
self.expt_btn.AddSegment(str(i))
if (
self.settings.experiment_ids is not None
and i in self.settings.experiment_ids
):
self.expt_btn.SetValue(i + 1, True)
self.expt_btn.Realize()
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnChangeSettings, self.expt_btn)
box.Add(self.expt_btn, 0, wx.ALL, 5)
def OnChangeSettings(self, event):
self.settings.d_min = self.d_min_ctrl.GetValue()
self.settings.z_min = self.z_min_ctrl.GetValue()
self.settings.z_max = self.z_max_ctrl.GetValue()
self.settings.n_min = int(self.n_min_ctrl.GetValue())
self.settings.n_max = int(self.n_max_ctrl.GetValue())
self.settings.partiality_min = self.partiality_min_ctrl.GetValue()
self.settings.partiality_max = self.partiality_max_ctrl.GetValue()
old_beam_panel = self.settings.beam_centre_panel
old_beam_centre = self.settings.beam_centre
self.settings.beam_centre_panel = self.beam_panel_ctrl.GetValue()
self.settings.beam_centre = (
self.beam_fast_ctrl.GetValue(),
self.beam_slow_ctrl.GetValue(),
)
self.settings.reverse_phi = self.reverse_phi_ctrl.GetValue()
self.settings.crystal_frame = self.crystal_frame_ctrl.GetValue()
self.settings.marker_size = self.marker_size_ctrl.GetValue()
self.settings.filter_by_panel = self.filter_by_panel_ctrl.GetCheckedStrings()
for i, display in enumerate(("all", "indexed", "unindexed", "integrated")):
if self.btn.values[i]:
self.settings.display = display
break
for i, display in enumerate(("all", "inliers", "outliers")):
if self.outlier_btn.values[i]:
self.settings.outlier_display = display
break
if self.expt_btn is not None:
expt_ids = []
for i in range(len(self.expt_btn.segments)):
if self.expt_btn.GetValue(i):
expt_ids.append(i - 1)
self.settings.experiment_ids = expt_ids
try:
self.parent.update_settings()
except ValueError: # Handle beam centre changes, which could fail
self.settings.beam_centre_panel = old_beam_panel
self.beam_panel_ctrl.SetValue(old_beam_panel)
self.settings.beam_centre = old_beam_centre
if old_beam_centre is not None:
self.beam_fast_ctrl.SetValue(old_beam_centre[0])
self.beam_slow_ctrl.SetValue(old_beam_centre[1])
class RLVWindow(wx_viewer.show_points_and_lines_mixin):
def __init__(self, settings, *args, **kwds):
super().__init__(*args, **kwds)
self.settings = settings
self.points = flex.vec3_double()
self.colors = None
self.palette = None
self.rotation_axis = None
self.beam_vector = None
self.recip_latt_vectors = None
self.recip_crystal_vectors = None
self.flag_show_minimum_covering_sphere = False
self.minimum_covering_sphere = None
self.field_of_view_y = 0.001
if self.settings.autospin:
self.autospin_allowed = True
self.yspin = 1
self.xspin = 1
self.autospin = True
def set_points(self, points):
self.points = points
self.points_display_list = None
if self.minimum_covering_sphere is None:
self.update_minimum_covering_sphere()
def set_points_data(self, reflections):
dstar = reflections["rlp"].norms()
dstar.set_selected(dstar == 0, 1e-8)
self.points_data = {
"panel": reflections["panel"],
"id": reflections["id"],
"xyz": reflections["xyzobs.px.value"],
"d_spacing": 1 / dstar,
}
if "miller_index" in reflections:
self.points_data["miller_index"] = reflections["miller_index"]
def set_colors(self, colors):
assert len(colors) == len(self.points)
self.colors = colors
def set_palette(self, palette):
self.palette = palette
def draw_points(self):
if self.points_display_list is None:
self.points_display_list = gltbx.gl_managed.display_list()
self.points_display_list.compile()
gl.glLineWidth(1)
if self.colors is None:
self.colors = flex.vec3_double(len(self.points), (1, 1, 1))
for point, color in zip(self.points, self.colors):
self.draw_cross_at(point, color=color)
self.points_display_list.end()
self.points_display_list.call()
def set_rotation_axis(self, axis):
self.rotation_axis = axis
def set_beam_vector(self, beam):
self.beam_vector = beam
def set_reciprocal_lattice_vectors(self, vectors_per_crystal):
self.recip_latt_vectors = vectors_per_crystal
def set_reciprocal_crystal_vectors(self, vectors_per_crystal):
self.recip_crystal_vectors = vectors_per_crystal
# --- user input and settings
def update_settings(self):
self.points_display_list = None
self.Refresh()
def update_minimum_covering_sphere(self):
n_points = min(1000, self.points.size())
isel = flex.random_permutation(self.points.size())[:n_points]
self.minimum_covering_sphere = minimum_covering_sphere(self.points.select(isel))
def draw_cross_at(self, xyz, color=(1, 1, 1), f=None):
(x, y, z) = xyz
if f is None:
f = 0.01 * self.settings.marker_size
wx_viewer.show_points_and_lines_mixin.draw_cross_at(
self, (x, y, z), color=color, f=f
)
def DrawGL(self):
wx_viewer.show_points_and_lines_mixin.DrawGL(self)
if self.rotation_axis is not None and self.settings.show_rotation_axis:
self.draw_axis(self.rotation_axis, "phi")
if self.beam_vector is not None and self.settings.show_beam_vector:
self.draw_axis(self.beam_vector, "beam")
if self.settings.show_reciprocal_cell:
# if we don't have one sort of vector we don't have the other either
vectors = self.recip_latt_vectors
if self.settings.crystal_frame:
vectors = self.recip_crystal_vectors
if vectors:
for i, axes in enumerate(vectors):
if self.settings.experiment_ids:
if i not in self.settings.experiment_ids:
continue
j = (i + 1) % self.palette.size()
color = self.palette[j]
self.draw_cell(axes, color)
if self.settings.label_nearest_point:
self.label_nearest_point()
self.GetParent().update_statusbar()
def draw_axis(self, axis, label):
if self.minimum_covering_sphere is None:
self.update_minimum_covering_sphere()
s = self.minimum_covering_sphere
scale = max(max(s.box_max()), abs(min(s.box_min())))
gltbx.fonts.ucs_bitmap_8x13.setup_call_lists()
gl.glDisable(gl.GL_LIGHTING)
if self.settings.black_background:
gl.glColor3f(1.0, 1.0, 1.0)
else:
gl.glColor3f(0.0, 0.0, 0.0)
gl.glLineWidth(1.0)
gl.glBegin(gl.GL_LINES)
gl.glVertex3f(0.0, 0.0, 0.0)
gl.glVertex3f(axis[0] * scale, axis[1] * scale, axis[2] * scale)
gl.glEnd()
gl.glRasterPos3f(
0.5 + axis[0] * scale, 0.2 + axis[1] * scale, 0.2 + axis[2] * scale
)
gltbx.fonts.ucs_bitmap_8x13.render_string(label)
gl.glEnable(gl.GL_LINE_STIPPLE)
gl.glLineStipple(4, 0xAAAA)
gl.glBegin(gl.GL_LINES)
gl.glVertex3f(0.0, 0.0, 0.0)
gl.glVertex3f(-axis[0] * scale, -axis[1] * scale, -axis[2] * scale)
gl.glEnd()
gl.glDisable(gl.GL_LINE_STIPPLE)
def draw_cell(self, axes, color):
astar, bstar, cstar = axes[0], axes[1], axes[2]
gltbx.fonts.ucs_bitmap_8x13.setup_call_lists()
gl.glDisable(gl.GL_LIGHTING)
gl.glColor3f(*color)
gl.glLineWidth(2.0)
gl.glBegin(gl.GL_LINES)
gl.glVertex3f(0.0, 0.0, 0.0)
gl.glVertex3f(*astar.elems)
gl.glVertex3f(0.0, 0.0, 0.0)
gl.glVertex3f(*bstar.elems)
gl.glVertex3f(0.0, 0.0, 0.0)
gl.glVertex3f(*cstar.elems)
gl.glEnd()
gl.glRasterPos3f(*(1.01 * astar).elems)
gltbx.fonts.ucs_bitmap_8x13.render_string("a*")
gl.glRasterPos3f(*(1.01 * bstar).elems)
gltbx.fonts.ucs_bitmap_8x13.render_string("b*")
gl.glRasterPos3f(*(1.01 * cstar).elems)
gltbx.fonts.ucs_bitmap_8x13.render_string("c*")
gl.glEnable(gl.GL_LINE_STIPPLE)
gl.glLineStipple(4, 0xAAAA)
farpoint = astar + bstar + cstar
# a* face
gl.glBegin(gl.GL_LINE_LOOP)
gl.glVertex3f(*farpoint.elems)
gl.glVertex3f(*(farpoint - bstar).elems)
gl.glVertex3f(*(farpoint - bstar - cstar).elems)
gl.glVertex3f(*(farpoint - cstar).elems)
gl.glEnd()
# b* face
gl.glBegin(gl.GL_LINE_LOOP)
gl.glVertex3f(*farpoint.elems)
gl.glVertex3f(*(farpoint - astar).elems)
gl.glVertex3f(*(farpoint - astar - cstar).elems)
gl.glVertex3f(*(farpoint - cstar).elems)
gl.glEnd()
# c* face
gl.glBegin(gl.GL_LINE_LOOP)
gl.glVertex3f(*farpoint.elems)
gl.glVertex3f(*(farpoint - bstar).elems)
gl.glVertex3f(*(farpoint - bstar - astar).elems)
gl.glVertex3f(*(farpoint - astar).elems)
gl.glEnd()
gl.glDisable(gl.GL_LINE_STIPPLE)
def label_nearest_point(self):
ann = AnnAdaptorSelfInclude(self.points.as_double(), 3)
ann.query(self.rotation_center)
i = ann.nn[0]
gltbx.fonts.ucs_bitmap_8x13.setup_call_lists()
gl.glDisable(gl.GL_LIGHTING)
gl.glColor3f(1.0, 1.0, 1.0)
gl.glLineWidth(1.0)
xyz = self.points_data["xyz"][i]
exp_id = self.points_data["id"][i]
panel = self.points_data["panel"][i]
d_spacing = self.points_data["d_spacing"][i]
label = (
f"id: {exp_id}; panel: {panel}\n"
f"xyz: {xyz[0]:.1f} {xyz[1]:.1f} {xyz[2]:.1f}\n"
f"res: {d_spacing:.2f} Angstrom"
)
if "miller_index" in self.points_data and exp_id != -1:
hkl = self.points_data["miller_index"][i]
label += f"\nhkl: {hkl}"
line_spacing = round(gltbx.fonts.ucs_bitmap_8x13.height())
for j, string in enumerate(label.splitlines()):
gl.glRasterPos3f(*self.points[i])
gl.glBitmap(0, 0, 0.0, 0.0, line_spacing, -j * line_spacing, b" ")
gltbx.fonts.ucs_bitmap_8x13.render_string(string)
def rotate_view(self, x1, y1, x2, y2, shift_down=False, scale=0.1):
super().rotate_view(x1, y1, x2, y2, shift_down=shift_down, scale=scale)
def OnLeftUp(self, event):
self.was_dragged = True
super().OnLeftUp(event)
def initialize_modelview(self, eye_vector=None, angle=None):
super().initialize_modelview(eye_vector=eye_vector, angle=angle)
self.rotation_center = (0, 0, 0)
self.move_to_center_of_viewport(self.rotation_center)
if self.settings.model_view_matrix is not None:
gl.glLoadMatrixd(self.settings.model_view_matrix)
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.