filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_18334
|
from construct import *
from .common import *
"""
Formats: wpm
Version: 0
The path map file describes the pathing in a map.
"""
PathPoint = FlagsEnum(Byte,
can_walk = 0x02,
can_fly = 0x04,
can_build = 0x08,
is_blight = 0x20,
is_ground = 0x40, # or water
is_unknown = 0x80
)
PathMapFile = Struct(
"file_id" / Const(b"MP3W"),
"version" / Integer,
"path_map_width" / Integer,
"path_map_height" / Integer,
"path_map" / Array(this.path_map_width * this.path_map_height, PathPoint)
)
|
the-stack_106_18336
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Factory the instantiates and return the valid (dynamic) module/class
from a machine names.
"""
from models.ModelFactory import ModelFactory
from executor.Execute import Execute
class MachineFactory(ModelFactory):
"""Identify and return the correct machine model"""
def __init__(self, name):
self.name = name
if not name:
self.name = self._auto_detect()
super(MachineFactory, self).__init__('machines')
def _auto_detect(self):
"""Detect architecture if empty"""
result = Execute().run(['uname', '-m'])
if result.returncode:
msg = "'uname -m' error: [" + result.stderr + "]"
raise RuntimeError("Error auto-detecting machine type: " + msg)
if not result.stdout:
raise RuntimeError("Unable to detect machine type with uname")
return result.stdout.strip()
def getMachine(self):
"""Loads machine model and returns"""
model = self._load_model(self.name + '_model.py')
if not model:
raise ImportError("Can't find model for machine " + self.name)
return model
|
the-stack_106_18339
|
import os
import string
import random
import json
def store_cpp_format(parsed_json):
file_name = "backend/data/"+''.join(random.choices(string.ascii_letters + string.digits, k=8)) + ".data"
f = open(file_name, "w+")
#load major grade-model
json_file = open("assets/informatik.json")
model = json.load(json_file)
maxDelCredits = model["maxDelCredits"]
#first line: maximum credits deletable
f.write(str(maxDelCredits)+"\n")
sections = model["sections"]
for section in sections:
if "id" in section:
section_id = str(section["id"])
if(section_id in parsed_json and not parsed_json[section_id] == {}):
section_grades = parsed_json[section_id]
subject_ids = section_grades.keys()
grade_credits = dict((sub["id"], sub["credits"]) for sub in section["subjects"])
f.write(";".join([str(section_grades[sid])+","+str(grade_credits[sid])+","+str(sid) for sid in subject_ids]))
f.write("\n")
elif(section_id == "appl"):
minors = section["categories"]
for minor in minors:
minor_id = minor["id"]
if(minor_id in parsed_json and not parsed_json[minor_id] == {}):
minor_grades = parsed_json[minor_id]
subject_ids = minor_grades.keys()
grade_credits = dict((sub["id"], sub["credits"]) for sub in minor["subjects"])
f.write(";".join([str(minor_grades[sid])+","+str(grade_credits[sid])+","+str(sid) for sid in subject_ids]))
break
return file_name
def solve_cpp(parsed_json):
file_name = store_cpp_format(parsed_json)
stream = os.popen("./backend/solve " + file_name)
console_output = stream.read()
if console_output:
return {"error": console_output}
output = []
with open(file_name) as f:
output = f.read().splitlines()
os.remove(file_name)
return {"grade": float(output[0]), "deletions": output[1:]}
|
the-stack_106_18346
|
#!/usr/bin/env python3
"""
CPAchecker is a tool for configurable software verification.
This file is part of CPAchecker.
Copyright (C) 2007-2014 Dirk Beyer
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
CPAchecker web page:
http://cpachecker.sosy-lab.org
"""
# prepare for Python 3
from __future__ import absolute_import, division, print_function, unicode_literals
import glob
import os
import platform
import sys
sys.dont_write_bytecode = True # prevent creation of .pyc files
for egg in glob.glob(os.path.join(os.path.dirname(__file__), os.pardir, 'lib', 'python-benchmark', '*.whl')):
sys.path.insert(0, egg)
import benchexec.benchexec
# Add ./benchmark/tools to __path__ of benchexec.tools package
# such that additional tool-wrapper modules can be placed in this directory.
import benchexec.tools
benchexec.tools.__path__ = [os.path.join(os.path.dirname(__file__), 'benchmark', 'tools')] + benchexec.tools.__path__
class Benchmark(benchexec.benchexec.BenchExec):
"""
An extension of BenchExec for use with CPAchecker
that supports executing the benchmarks in the VerifierCloud.
"""
DEFAULT_OUTPUT_PATH = "test/results/"
def create_argument_parser(self):
parser = super(Benchmark, self).create_argument_parser()
vcloud_args = parser.add_argument_group('Options for using VerifierCloud')
vcloud_args.add_argument("--cloud",
dest="cloud",
action="store_true",
help="Use VerifierCloud to execute benchmarks.")
vcloud_args.add_argument("--cloudMaster",
dest="cloudMaster",
metavar="HOST",
help="Sets the master host of the VerifierCloud instance to be used. If this is a HTTP URL, the web interface is used.")
vcloud_args.add_argument("--cloudPriority",
dest="cloudPriority",
metavar="PRIORITY",
help="Sets the priority for this benchmark used in the VerifierCloud. Possible values are IDLE, LOW, HIGH, URGENT.")
vcloud_args.add_argument("--cloudCPUModel",
dest="cpu_model", type=str, default=None,
metavar="CPU_MODEL",
help="Only execute runs in the VerifierCloud on CPU models that contain the given string.")
vcloud_args.add_argument("--cloudUser",
dest="cloudUser",
metavar="USER:PWD",
help="The user and password for the VerifierCloud (if using the web interface).")
vcloud_args.add_argument("--revision",
dest="revision",
metavar="BRANCH:REVISION",
help="The svn revision of CPAchecker to use (if using the web interface of the VerifierCloud).")
vcloud_args.add_argument("--justReprocessResults",
dest="reprocessResults",
action="store_true",
help="Do not run the benchmarks. Assume that the benchmarks were already executed in the VerifierCloud and the log files are stored (use --startTime to point the script to the results).")
vcloud_args.add_argument("--cloudClientHeap",
dest="cloudClientHeap",
metavar="MB",
default=100,
type=int,
help="The heap-size (in MB) used by the VerifierCloud client. A too small heap-size may terminate the client without any results.")
vcloud_args.add_argument("--cloudSubmissionThreads",
dest="cloud_threads",
default=5,
type=int,
help="The number of threads used for parallel run submission (if using the web interface of the VerifierCloud).")
vcloud_args.add_argument("--cloudPollInterval",
dest="cloud_poll_interval",
metavar="SECONDS",
default=5,
type=int,
help="The interval in seconds for polling results from the server (if using the web interface of the VerifierCloud).")
return parser
def load_executor(self):
if self.config.cloud:
if self.config.cloudMaster and "http" in self.config.cloudMaster:
import benchmark.webclient_benchexec as executor
else:
import benchmark.vcloud as executor
else:
executor = super(Benchmark, self).load_executor()
return executor
def check_existing_results(self, benchmark):
if not self.config.reprocessResults:
super(Benchmark, self).check_existing_results(benchmark)
if __name__ == "__main__":
# Add directory with binaries to path.
bin_dir = "lib/native/x86_64-linux" if platform.machine() == "x86_64" else \
"lib/native/x86-linux" if platform.machine() == "i386" else None
if bin_dir:
bin_dir = os.path.join(os.path.dirname(__file__), os.pardir, bin_dir)
os.environ['PATH'] += os.pathsep + bin_dir
benchexec.benchexec.main(Benchmark())
|
the-stack_106_18348
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from base64 import b64encode
from select import select
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.ssh.hooks.ssh import SSHHook
from airflow.utils.decorators import apply_defaults
class SSHOperator(BaseOperator):
"""
SSHOperator to execute commands on given remote host using the ssh_hook.
:param ssh_hook: predefined ssh_hook to use for remote execution.
Either `ssh_hook` or `ssh_conn_id` needs to be provided.
:type ssh_hook: airflow.providers.ssh.hooks.ssh.SSHHook
:param ssh_conn_id: connection id from airflow Connections.
`ssh_conn_id` will be ignored if `ssh_hook` is provided.
:type ssh_conn_id: str
:param remote_host: remote host to connect (templated)
Nullable. If provided, it will replace the `remote_host` which was
defined in `ssh_hook` or predefined in the connection of `ssh_conn_id`.
:type remote_host: str
:param command: command to execute on remote host. (templated)
:type command: str
:param timeout: timeout (in seconds) for executing the command. The default is 10 seconds.
:type timeout: int
:param environment: a dict of shell environment variables. Note that the
server will reject them silently if `AcceptEnv` is not set in SSH config.
:type environment: dict
:param get_pty: request a pseudo-terminal from the server. Set to ``True``
to have the remote process killed upon task timeout.
The default is ``False`` but note that `get_pty` is forced to ``True``
when the `command` starts with ``sudo``.
:type get_pty: bool
"""
template_fields = ('command', 'remote_host')
template_ext = ('.sh',)
@apply_defaults
def __init__(self,
ssh_hook=None,
ssh_conn_id=None,
remote_host=None,
command=None,
timeout=10,
environment=None,
get_pty=False,
*args,
**kwargs):
super().__init__(*args, **kwargs)
self.ssh_hook = ssh_hook
self.ssh_conn_id = ssh_conn_id
self.remote_host = remote_host
self.command = command
self.timeout = timeout
self.environment = environment
self.get_pty = self.command.startswith('sudo') or get_pty
def execute(self, context):
try:
if self.ssh_conn_id:
if self.ssh_hook and isinstance(self.ssh_hook, SSHHook):
self.log.info("ssh_conn_id is ignored when ssh_hook is provided.")
else:
self.log.info("ssh_hook is not provided or invalid. "
"Trying ssh_conn_id to create SSHHook.")
self.ssh_hook = SSHHook(ssh_conn_id=self.ssh_conn_id,
timeout=self.timeout)
if not self.ssh_hook:
raise AirflowException("Cannot operate without ssh_hook or ssh_conn_id.")
if self.remote_host is not None:
self.log.info("remote_host is provided explicitly. "
"It will replace the remote_host which was defined "
"in ssh_hook or predefined in connection of ssh_conn_id.")
self.ssh_hook.remote_host = self.remote_host
if not self.command:
raise AirflowException("SSH command not specified. Aborting.")
with self.ssh_hook.get_conn() as ssh_client:
self.log.info("Running command: %s", self.command)
# set timeout taken as params
stdin, stdout, stderr = ssh_client.exec_command(command=self.command,
get_pty=self.get_pty,
timeout=self.timeout,
environment=self.environment
)
# get channels
channel = stdout.channel
# closing stdin
stdin.close()
channel.shutdown_write()
agg_stdout = b''
agg_stderr = b''
# capture any initial output in case channel is closed already
stdout_buffer_length = len(stdout.channel.in_buffer)
if stdout_buffer_length > 0:
agg_stdout += stdout.channel.recv(stdout_buffer_length)
# read from both stdout and stderr
while not channel.closed or \
channel.recv_ready() or \
channel.recv_stderr_ready():
readq, _, _ = select([channel], [], [], self.timeout)
for recv in readq:
if recv.recv_ready():
line = stdout.channel.recv(len(recv.in_buffer))
agg_stdout += line
self.log.info(line.decode('utf-8').strip('\n'))
if recv.recv_stderr_ready():
line = stderr.channel.recv_stderr(len(recv.in_stderr_buffer))
agg_stderr += line
self.log.warning(line.decode('utf-8').strip('\n'))
if stdout.channel.exit_status_ready()\
and not stderr.channel.recv_stderr_ready()\
and not stdout.channel.recv_ready():
stdout.channel.shutdown_read()
stdout.channel.close()
break
stdout.close()
stderr.close()
exit_status = stdout.channel.recv_exit_status()
if exit_status == 0:
enable_pickling = conf.getboolean(
'core', 'enable_xcom_pickling'
)
if enable_pickling:
return agg_stdout
else:
return b64encode(agg_stdout).decode('utf-8')
else:
error_msg = agg_stderr.decode('utf-8')
raise AirflowException("error running cmd: {0}, error: {1}"
.format(self.command, error_msg))
except Exception as e:
raise AirflowException("SSH operator error: {0}".format(str(e)))
return True
def tunnel(self):
"""
Get ssh tunnel
"""
ssh_client = self.ssh_hook.get_conn()
ssh_client.get_transport()
|
the-stack_106_18350
|
# pylint: disable=protected-access
import hashlib
import logging
import os
import shutil
from datetime import datetime, timezone
from pathlib import Path
from typing import Optional, Union, List, Tuple
from uuid import UUID, uuid4
from pydantic import BaseModel
from . import protocol
from .file_reader import AsyncFile, async_stat
from .protocol import Inode, Directory, Backup, BackupSessionConfig
_CONFIG_FILE = 'config.json'
logger = logging.getLogger(__name__)
class Configuration(BaseModel):
store_split_count = 1
store_split_size = 2
class LocalDatabase:
config: Configuration
_CLIENT_DIR = 'client'
_STORE_DIR = 'store'
def __init__(self, base_path: Path):
self._base_path = base_path
self.config = Configuration.parse_file(base_path / _CONFIG_FILE)
def save_config(self):
with (self._base_path / _CONFIG_FILE).open('w') as file:
file.write(self.config.json(indent=True))
def open_client_session(self, client_id_or_name: str) -> "LocalDatabaseServerSession":
try:
try:
client_id = UUID(client_id_or_name)
client_path = self._base_path / self._CLIENT_DIR / str(client_id)
except ValueError:
client_path = self._base_path / self._CLIENT_DIR / client_id_or_name
if client_path.is_symlink():
client_id = os.readlink(client_path)
client_path = self._base_path / self._CLIENT_DIR / client_id
return LocalDatabaseServerSession(self, client_path)
except FileNotFoundError as exc:
logger.error(f"Session not found {client_id_or_name}")
raise protocol.SessionClosed(f"No such session {client_id_or_name}") from exc
def store_path_for(self, ref_hash: str) -> Path:
split_size = self.config.store_split_size
split_count = self.config.store_split_count
split = [ref_hash[x:x+split_size] for x in range(0, split_count * split_size, split_size)]
return self._base_path.joinpath(self._STORE_DIR, *split, ref_hash)
def create_client(self, client_config: protocol.ClientConfiguration) -> protocol.ServerSession:
(self._base_path / self._CLIENT_DIR).mkdir(exist_ok=True, parents=True)
client_name_path = self._base_path / self._CLIENT_DIR / client_config.client_name
client_name_path.symlink_to(str(client_config.client_id))
client_path = self._base_path / self._CLIENT_DIR / str(client_config.client_id)
client_path.mkdir(exist_ok=False, parents=True)
with (client_path / _CONFIG_FILE).open('w') as file:
file.write(client_config.json(indent=True))
return LocalDatabaseServerSession(self, client_path)
@classmethod
def create_database(cls, base_path: Path, configuration: Configuration) -> "LocalDatabase":
base_path.mkdir(exist_ok=False, parents=True)
with (base_path / _CONFIG_FILE).open('w') as file:
file.write(configuration.json(indent=True))
(base_path / cls._STORE_DIR).mkdir(exist_ok=False, parents=True)
(base_path / cls._CLIENT_DIR).mkdir(exist_ok=False, parents=True)
return cls(base_path)
class LocalDatabaseServerSession(protocol.ServerSession):
_BACKUPS = 'backup'
_SESSIONS = 'sessions'
_TIMESTMAP_FORMAT = "%Y-%m-%d_%H:%M:%S.%f"
_DIR_SUFFIX = ".d"
client_config: protocol.ClientConfiguration = None
def __init__(self, database: LocalDatabase, client_path: Path):
super().__init__()
self._database = database
self._client_path = client_path
with (client_path / _CONFIG_FILE).open('r') as file:
self.client_config = protocol.ClientConfiguration.parse_raw(file.read())
def save_config(self):
with (self._client_path / _CONFIG_FILE).open('w') as file:
file.write(self.client_config.json(indent=True))
async def start_backup(self, backup_date: datetime, allow_overwrite: bool = False,
description: Optional[str] = None) -> protocol.BackupSession:
backup_date = protocol.normalize_backup_date(backup_date, self.client_config.backup_granularity,
self.client_config.timezone)
if not allow_overwrite:
# TODO consider raising the same exception if an open session already exists for the same date
if self._path_for_backup_date(backup_date).exists():
raise protocol.DuplicateBackup(f"Backup exists {backup_date.isoformat()}")
backup_session_id = uuid4()
backup_session_path = self._path_for_session_id(backup_session_id)
backup_session_path.mkdir(exist_ok=False, parents=True)
session_config = protocol.BackupSessionConfig(
client_id=self.client_config.client_id,
session_id=backup_session_id,
allow_overwrite=allow_overwrite,
backup_date=backup_date,
description=description,
)
with (backup_session_path / _CONFIG_FILE).open('w') as file:
file.write(session_config.json(indent=True))
return LocalDatabaseBackupSession(self, backup_session_path)
async def resume_backup(self, *, session_id: Optional[UUID] = None,
backup_date: Optional[datetime] = None) -> protocol.BackupSession:
if session_id is not None:
backup_path = self._path_for_session_id(session_id)
return LocalDatabaseBackupSession(self, backup_path)
if backup_date is not None:
# This is inefficient if there are a lot of sessions but it get's the job done.
backup_date = protocol.normalize_backup_date(backup_date, self.client_config.backup_granularity,
self.client_config.timezone)
for session_path in (self._client_path / self._SESSIONS).iterdir():
session = LocalDatabaseBackupSession(self, session_path)
if session.config.backup_date == backup_date:
return session
raise protocol.NotFoundException(f"Backup date not found {backup_date}")
raise ValueError("Either session_id or backup_date must be specified but neither were")
async def list_backup_sessions(self) -> List[protocol.BackupSessionConfig]:
results = []
for backup in (self._client_path / self._SESSIONS).iterdir():
with (backup / _CONFIG_FILE).open('r') as file:
backup_config = protocol.BackupSessionConfig.parse_raw(file.read())
results.append(backup_config)
return results
async def list_backups(self) -> List[Tuple[datetime, str]]:
results = []
for backup in (self._client_path / self._BACKUPS).iterdir():
with backup.open('r') as file:
backup_config = protocol.Backup.parse_raw(file.read())
results.append((backup_config.backup_date, backup_config.description))
return results
async def get_backup(self, backup_date: Optional[datetime] = None) -> Optional[Backup]:
if backup_date is None:
try:
backup_path = next(iter(sorted((self._client_path / self._BACKUPS).iterdir(), reverse=True)))
except (FileNotFoundError, StopIteration):
logger.warning(f"No backup found for {self.client_config.client_name} ({self.client_config.client_id})")
return None
else:
backup_date = protocol.normalize_backup_date(backup_date, self.client_config.backup_granularity,
self.client_config.timezone)
backup_path = self._path_for_backup_date(backup_date)
with backup_path.open('r') as file:
return protocol.Backup.parse_raw(file.read())
async def get_directory(self, inode: Inode) -> Directory:
if inode.type != protocol.FileType.DIRECTORY:
raise ValueError(f"Cannot open file type {inode.type} as a directory")
inode_hash = inode.hash + self._DIR_SUFFIX
with self._database.store_path_for(inode_hash).open('r') as file:
return Directory.parse_raw(file.read())
async def get_file(self, inode: Inode, target_path: Optional[Path] = None,
restore_permissions: bool = False, restore_owner: bool = False) -> Optional[protocol.FileReader]:
if inode.type not in (protocol.FileType.REGULAR, protocol.FileType.LINK, protocol.FileType.PIPE):
raise ValueError(f"Cannot read a file type {inode.type}")
result_path = self._database.store_path_for(inode.hash)
if target_path is not None:
with await AsyncFile.open(result_path, "r") as content:
await protocol.restore_file(target_path, inode, content, restore_owner, restore_permissions)
return None
return await AsyncFile.open(result_path, "r")
def complete_backup(self, meta: protocol.Backup, overwrite: bool):
backup_path = self._path_for_backup_date(meta.backup_date)
backup_path.parent.mkdir(exist_ok=True, parents=True)
with backup_path.open('w' if overwrite else 'x') as file:
file.write(meta.json(indent=True))
def _path_for_backup_date(self, backup_date: datetime) -> Path:
return self._client_path / self._BACKUPS / (backup_date.strftime(self._TIMESTMAP_FORMAT) + '.json')
def _path_for_session_id(self, session_id: UUID) -> Path:
return self._client_path / self._SESSIONS / str(session_id)
class LocalDatabaseBackupSession(protocol.BackupSession):
_PARTIAL = 'partial'
_NEW_OBJECTS = 'new_objects'
_ROOTS = 'roots'
def __init__(self, client_session: LocalDatabaseServerSession, session_path: Path):
super().__init__()
self._server_session = client_session
self._session_path = session_path
try:
with (session_path / _CONFIG_FILE).open('r') as file:
self._config = protocol.BackupSessionConfig.parse_raw(file.read())
except FileNotFoundError as exc:
raise protocol.SessionClosed(session_path.name) from exc
(session_path / self._NEW_OBJECTS).mkdir(exist_ok=True, parents=True)
(session_path / self._ROOTS).mkdir(exist_ok=True, parents=True)
(session_path / self._PARTIAL).mkdir(exist_ok=True, parents=True)
@property
def config(self) -> BackupSessionConfig:
return self._config
async def directory_def(self, definition: protocol.Directory, replaces: Optional[UUID] = None
) -> protocol.DirectoryDefResponse:
if not self.is_open:
raise protocol.SessionClosed()
for name, child in definition.children.items():
if child.hash is None:
raise protocol.InvalidArgumentsError(f"Child {name} has no hash value")
directory_hash, content = definition.hash()
if self._object_exists(directory_hash + self._server_session._DIR_SUFFIX):
# An empty response here means "success".
return protocol.DirectoryDefResponse()
missing = []
for name, inode in definition.children.items():
if not self._object_exists(inode.hash):
missing.append(name)
if missing:
return protocol.DirectoryDefResponse(missing_files=missing)
tmp_path = self._temp_path()
with tmp_path.open('xb') as file:
try:
file.write(content)
tmp_path.rename(self._store_path_for(directory_hash + self._server_session._DIR_SUFFIX))
except:
tmp_path.unlink()
raise
# Success
return protocol.DirectoryDefResponse(ref_hash=directory_hash)
async def upload_file_content(self, file_content: Union[protocol.FileReader, bytes], resume_id: UUID,
resume_from: int = 0, is_complete: bool = True) -> Optional[str]:
if not self.is_open:
raise protocol.SessionClosed()
hash_object = hashlib.sha256()
temp_file = self._temp_path(resume_id)
with await AsyncFile.open(temp_file, 'w') as target:
# If we are completing the file we must hash it.
if is_complete and resume_from > 0:
while target.tell() < resume_from:
bytes_read = await target.read(min(protocol.READ_SIZE, resume_from - target.tell()))
if not bytes_read:
bytes_read = bytes(resume_from - target.tell())
target.seek(resume_from, os.SEEK_SET)
hash_object.update(bytes_read)
# If not complete then we just seek to the requested resume_from position
elif resume_from > 0:
target.seek(resume_from, os.SEEK_SET)
# Write the file content
if isinstance(file_content, bytes):
hash_object.update(file_content)
await target.write(file_content)
else:
bytes_read = await file_content.read(protocol.READ_SIZE)
while bytes_read:
if is_complete:
hash_object.update(bytes_read)
await target.write(bytes_read)
bytes_read = await file_content.read(protocol.READ_SIZE)
if not is_complete:
return None
# Move the temporary file to new_objects named as it's hash
ref_hash = hash_object.hexdigest()
if self._object_exists(ref_hash):
logger.warning(f"File already exists after upload {resume_id} as {ref_hash}")
temp_file.unlink()
else:
logger.debug(f"File upload complete {resume_id} as {ref_hash}")
temp_file.rename(self._new_object_path_for(ref_hash))
return ref_hash
async def add_root_dir(self, root_dir_name: str, inode: protocol.Inode) -> None:
if not self.is_open:
raise protocol.SessionClosed()
location_hash = inode.hash
if inode.type is protocol.FileType.DIRECTORY:
location_hash += self._server_session._DIR_SUFFIX
if not self._object_exists(location_hash):
raise ValueError(f"Cannot create {root_dir_name} - does not exist: {inode.hash}")
file_path = self._session_path / self._ROOTS / root_dir_name
with file_path.open('x') as file:
file.write(inode.json())
async def check_file_upload_size(self, resume_id: UUID) -> int:
if not self.is_open:
raise protocol.SessionClosed()
return (await async_stat(self._temp_path(resume_id))).st_size
async def complete(self) -> protocol.Backup:
if not self.is_open:
raise protocol.SessionClosed()
logger.info(f"Committing {self._session_path.name} for {self._server_session.client_config.client_name} "
f"({self._server_session.client_config.client_id}) - {self._config.backup_date}")
for file_path in (self._session_path / self._NEW_OBJECTS).iterdir():
try:
target_path = self._server_session._database.store_path_for(file_path.name)
target_path.parent.mkdir(parents=True, exist_ok=True)
logger.debug(f"Moving {file_path.name} to store")
file_path.rename(target_path)
except FileExistsError:
# This should be rare. To happen two concurrent backup sessions must try to add the same new file.
logger.warning(f"Another session has already uploaded {file_path.name}... skipping file.")
roots = {}
for file_path in (self._session_path / self._ROOTS).iterdir():
with file_path.open('r') as file:
roots[file_path.name] = protocol.Inode.parse_raw(file.read())
backup_meta = protocol.Backup(
client_id=self._server_session.client_config.client_id,
client_name=self._server_session.client_config.client_name,
backup_date=self._config.backup_date,
started=self._config.started,
completed=datetime.now(timezone.utc),
description=self._config.description,
roots=roots,
)
self._server_session.complete_backup(backup_meta, self._config.allow_overwrite)
await self.discard()
return backup_meta
async def discard(self) -> None:
if not self.is_open:
raise protocol.SessionClosed()
shutil.rmtree(self._session_path)
def _object_exists(self, ref_hash: str) -> bool:
return (self._server_session._database.store_path_for(ref_hash).exists()
or self._store_path_for(ref_hash).exists())
def _store_path_for(self, ref_hash: str) -> Path:
return self._session_path / self._NEW_OBJECTS / ref_hash
def _temp_path(self, resume_id: Optional[UUID] = None) -> Path:
if resume_id is None:
resume_id = uuid4()
return self._session_path / self._PARTIAL / str(resume_id)
def _new_object_path_for(self, ref_hash: str) -> Path:
return self._session_path / self._NEW_OBJECTS / ref_hash
@property
def server_session(self) -> protocol.ServerSession:
return self._server_session
@property
def is_open(self) -> bool:
return self._session_path.exists()
|
the-stack_106_18351
|
import json
import logging
import random
import string
import requests
import hmac
from jwkest.jwk import rsa_load, RSAKey
from jwkest.jws import JWS
from satosa.internal import InternalData
from ..micro_services.base import ResponseMicroService
from ..response import Redirect
import time
logger = logging.getLogger(__name__)
class Webauthn(ResponseMicroService):
def __init__(self, config, *args, **kwargs):
super().__init__(*args, **kwargs)
self.redirect_url = config["redirect_url"]
self.api_url = config["api_url"]
self.exclude = config["exclude"]
self.user_id = config["user_identificator"]
self.conflict_compatibility = config["conflict_compatibility"]
self.included_requesters = config["included_requesters"] or []
self.excluded_requesters = config["excluded_requesters"] or []
self.signing_key = RSAKey(key=rsa_load(config["private_key"]), use="sig", alg="RS256")
self.endpoint = "/process"
self.id_to_attr = config.get("id_to_attr", None)
logger.info("Webauthn is active")
def _handle_webauthn_response(self, context):
saved_state = context.state[self.name]
internal_response = InternalData.from_dict(saved_state)
message = {"user_id": internal_response["attributes"][self.user_id][0], "nonce": internal_response['nonce'],
"time": str(int(time.time()))}
message_json = json.dumps(message)
jws = JWS(message_json, alg=self.signing_key.alg).sign_compact([self.signing_key])
request = self.api_url + "/" + jws
response = requests.get(request)
response_dict = json.loads(response.text)
if response_dict["result"] != "okay" or not hmac.compare_digest(response_dict["nonce"], internal_response["nonce"]):
raise Exception("Authentication was unsuccessful.")
if "authn_context_class_ref" in context.state:
internal_response["auth_info"]["auth_class_ref"] = context.state["authn_context_class_ref"]
return super().process(context, internal_response)
def process(self, context, internal_response):
client_mfa_requested = False
client_sfa_requested = False
if "authn_context_class_ref" in context.state and "mfa" in context.state["authn_context_class_ref"]:
client_mfa_requested = True
if "authn_context_class_ref" in context.state and (
"sfa" in context.state["authn_context_class_ref"] or
"PasswordProtectedTransport" in context.state["authn_context_class_ref"]):
client_sfa_requested = True
config_mfa_requested = True
internal_dict = internal_response.to_dict()
if self.exclude and internal_dict.get("requester") in self.excluded_requesters:
config_mfa_requested = False
if not self.exclude and not (internal_dict.get("requester") in self.included_requesters):
config_mfa_requested = False
if not client_mfa_requested and not config_mfa_requested:
return super().process(context, internal_response)
if client_sfa_requested and config_mfa_requested:
context.state["conflict"] = True
else:
context.state["conflict"] = False
if not self.conflict_compatibility and context.state["conflict"]:
raise Exception("CONFLICT - SP and the Request are in a conflict - authentication could not take place.")
user_id = internal_response["attributes"][self.user_id][0]
letters = string.ascii_lowercase
actual_time = str(int(time.time()))
rand = random.SystemRandom()
random_string = actual_time + ''.join(rand.choice(letters) for i in range(54))
internal_response['nonce'] = random_string
context.state[self.name] = internal_response.to_dict()
message = {"user_id": user_id, "nonce": random_string, "time": actual_time}
message_json = json.dumps(message)
jws = JWS(message_json, alg=self.signing_key.alg).sign_compact([self.signing_key])
return Redirect("%s/%s" % (self.redirect_url + "/" + jws, ""))
def register_endpoints(self):
return [("^webauthn%s$" % self.endpoint, self._handle_webauthn_response)]
|
the-stack_106_18353
|
from __future__ import print_function
from builtins import input
from builtins import str
from builtins import range
from builtins import object
from future.utils import string_types
from influxdb import InfluxDBClient
from influxdb.exceptions import InfluxDBServerError, InfluxDBClientError
from ast import literal_eval
import getpass
import logging
import requests.exceptions
import sys
class StatsProcessorState(object):
def __init__(self):
self.influxdb_points = None
self.points_written = None
self.reset()
def reset(self):
self.influxdb_points = []
self.points_written = 0
# influxdb_plugin state
g_state = StatsProcessorState()
# InfluxDBClient interface
g_client = None
LOG = logging.getLogger(__name__)
# Number of points to queue up before writing it to the database.
MAX_POINTS_PER_WRITE = 100
# separator used to concatenate stat keys with sub-keys derived from stats
# whose value is a dict or list.
SUB_KEY_SEPARATOR = "."
def start(argv):
"""
Instantiate an InfluxDBClient. The expected inputs are the host/address and
port of the InfluxDB and the name of the database to use. If the database
does not exist then it will be created. If the fourth arg is "auth" then it
will prompt the user for the InfluxDB's username and password.
"""
influxdb_host = argv[0]
influxdb_port = int(argv[1])
influxdb_name = argv[2]
influxdb_ssl = False
influxdb_verifyssl = False
influxdb_username = "root"
influxdb_password = "root"
if len(argv) > 3:
if argv[3] == "auth":
influxdb_username = input("InfluxDB username: ")
influxdb_password = getpass.getpass("Password: ")
else:
influxdb_username = argv[3]
influxdb_password = argv[4]
influxdb_ssl = literal_eval(argv[5])
influxdb_verifyssl = literal_eval(argv[6])
LOG.info(
"Connecting to: %s@%s:%d database:%s ssl=%s verify_ssl=%s.",
influxdb_username,
influxdb_host,
influxdb_port,
influxdb_name,
influxdb_ssl,
influxdb_verifyssl,
)
global g_client
g_client = InfluxDBClient(
host=influxdb_host,
port=influxdb_port,
database=influxdb_name,
username=influxdb_username,
password=influxdb_password,
ssl=influxdb_ssl,
verify_ssl=influxdb_verifyssl
)
create_database = True
try:
databases = g_client.get_list_database()
except (requests.exceptions.ConnectionError, InfluxDBClientError) as exc:
print(
"Failed to connect to InfluxDB server at %s:%s "
"database: %s.\nERROR: %s"
% (influxdb_host, str(influxdb_port), influxdb_name, str(exc)),
file=sys.stderr,
)
sys.exit(1)
for database in databases:
if database["name"] == influxdb_name:
create_database = False
break
if create_database is True:
LOG.info("Creating database: %s.", influxdb_name)
g_client.create_database(influxdb_name)
def begin_process(cluster):
LOG.debug("Begin processing %s stats.", cluster)
def process_stat(cluster, stat):
"""
Convert Isilon stat query result to InfluxDB point and send to the
InfluxDB service. Organize the measurements by cluster and node via tags.
"""
# Process stat(s) and then write points if list is large enough.
tags = {"cluster": cluster}
if stat.devid != 0:
tags["node"] = stat.devid
influxdb_points = _influxdb_points_from_stat(stat.time, tags, stat.key, stat.value)
if influxdb_points == []:
return
for influxdb_point in influxdb_points:
if len(influxdb_point["fields"]) > 0:
g_state.influxdb_points.append(influxdb_point)
num_points = len(g_state.influxdb_points)
if num_points > MAX_POINTS_PER_WRITE:
g_state.points_written += _write_points(
g_state.influxdb_points, num_points
)
g_state.influxdb_points = []
def end_process(cluster):
# send left over points to influxdb
num_points = len(g_state.influxdb_points)
if num_points > 0:
g_state.points_written += _write_points(g_state.influxdb_points, num_points)
LOG.debug(
"Done processing %s stats, wrote %d points.", cluster, g_state.points_written
)
g_state.reset()
def _add_field(fields, field_name, field_value, field_value_type):
if field_value_type == int:
# convert integers to float because InfluxDB only supports 64 bit
# signed integers, so doing this prevents an "out of range" error when
# inserting values that are unsigned 64 bit integers.
# Note that it is not clear if the PAPI is smart enough to always
# encode 64 bit unsigned integers as type 'long' even when the actual
# value is fits into a 64 bit signed integer and because InfluxDB
# wants a measurement to always be of the same type, the safest thing
# to do is convert integers to float.
field_value = float(field_value)
fields.append((field_name, field_value))
def _process_stat_dict(stat_value, fields, tags, prefix=""):
"""
Add (field_name, field_value) tuples to the fields list for any
non-string or non-"id" items in the stat_value dict so that they can be
used for the "fields" parameter of the InfluxDB point.
Any string or keys with "id" on the end of their name get turned into tags.
"""
for key, value in stat_value.items():
value_type = type(value)
field_name = prefix + key
if isinstance(value, string_types) or (key[-2:] == "id" and value_type == int):
tags[field_name] = value
elif value_type == list:
list_prefix = field_name + SUB_KEY_SEPARATOR
_process_stat_list(value, fields, tags, list_prefix)
elif value_type == dict:
dict_prefix = field_name + SUB_KEY_SEPARATOR
_process_stat_dict(value, fields, tags, dict_prefix)
else:
_add_field(fields, field_name, value, value_type)
def _process_stat_list(stat_value, fields, tags, prefix=""):
"""
Add (field_name, field_value) tuples to the fields list for any
non-string or non-"id" items in the stat_value dict so that they can be
used for the "fields" parameter of the InfluxDB point.
"""
field_name = prefix + "value"
for index in range(0, len(stat_value)):
list_value = stat_value[index]
value_type = type(list_value)
if value_type == dict:
_process_stat_dict(list_value, fields, tags, prefix)
else:
item_name = field_name + SUB_KEY_SEPARATOR + str(index)
if value_type == list:
# AFAIK there are no instances of a list that contains a list
# but just in case one is added in the future, deal with it.
item_name += SUB_KEY_SEPARATOR
_process_stat_list(list_value, fields, tags, item_name)
else:
_add_field(fields, item_name, list_value, value_type)
def _influxdb_points_from_stat(stat_time, tags, stat_key, stat_value):
"""
Create InfluxDB points/measurements from the stat query result.
"""
points = []
fields = []
stat_value_type = type(stat_value)
if stat_value_type == list:
for stat in stat_value:
(fields, point_tags) = _influxdb_point_from_stat(
stat_time, tags, stat_key, stat
)
points.append(
_build_influxdb_point(stat_time, point_tags, stat_key, fields)
)
elif stat_value_type == dict:
point_tags = tags.copy()
_process_stat_dict(stat_value, fields, point_tags)
points.append(_build_influxdb_point(stat_time, point_tags, stat_key, fields))
else:
if stat_value == "":
return None # InfluxDB does not like empty string stats
_add_field(fields, "value", stat_value, stat_value_type)
points.append(_build_influxdb_point(stat_time, tags.copy(), stat_key, fields))
return points
def _influxdb_point_from_stat(stat_time, tags, stat_key, stat_value):
"""
Create InfluxDB points/measurements from the stat query result.
"""
point_tags = tags.copy()
fields = []
stat_value_type = type(stat_value)
if stat_value_type == dict:
_process_stat_dict(stat_value, fields, point_tags)
elif stat_value_type == list:
_process_stat_list(stat_value, fields, point_tags)
else:
if stat_value == "":
return None # InfluxDB does not like empty string stats
_add_field(fields, "value", stat_value, stat_value_type)
return (fields, point_tags)
def _build_influxdb_point(unix_ts_secs, tags, measurement, fields):
"""
Build the json for an InfluxDB data point.
"""
timestamp_ns = unix_ts_secs * 1000000000 # convert to nanoseconds
point_json = {
"measurement": measurement,
"tags": tags,
"time": timestamp_ns,
"fields": {},
}
for field_name, field_value in fields:
point_json["fields"][field_name] = field_value
return point_json
def _get_point_names(points):
names = ""
for point in points:
names += point["measurement"]
names += " "
return names
def _write_points(points, num_points):
"""
Write the points to the InfluxDB in groups that are MAX_POINTS_PER_WRITE in
size.
"""
LOG.debug("Writing points %d", num_points)
write_index = 0
points_written = 0
while write_index < num_points:
max_write_index = write_index + MAX_POINTS_PER_WRITE
write_points = points[write_index:max_write_index]
try:
g_client.write_points(write_points)
points_written += len(write_points)
except InfluxDBServerError as svr_exc:
LOG.error(
"InfluxDBServerError: %s\nFailed to write points: %s",
str(svr_exc),
_get_point_names(write_points),
)
except InfluxDBClientError as client_exc:
LOG.error(
"InfluxDBClientError writing points: %s\n" "Error: %s",
_get_point_names(write_points),
str(client_exc),
)
except requests.exceptions.ConnectionError as req_exc:
LOG.error(
"ConnectionError exception caught writing points: %s\n" "Error: %s",
_get_point_names(write_points),
str(req_exc),
)
write_index += MAX_POINTS_PER_WRITE
return points_written
|
the-stack_106_18355
|
import tkinter as tk
import tk_tools
def add_row():
row = [1, 2, 3]
label_grid.add_row(row)
def remove_row():
label_grid.remove_row(0)
if __name__ == '__main__':
root = tk.Tk()
label_grid = tk_tools.LabelGrid(root, 3, ['Column0', 'Column1', 'Column2'])
label_grid.grid(row=0, column=0)
add_row_btn = tk.Button(text='Add Row', command=add_row)
add_row_btn.grid(row=1, column=0, sticky='EW')
remove_row_btn = tk.Button(text='Remove Row', command=remove_row)
remove_row_btn.grid(row=2, column=0, sticky='EW')
root.mainloop()
|
the-stack_106_18357
|
from __future__ import absolute_import
from __future__ import print_function
import sys
import requests
import myoperator
import Parameters, DictServer, XMLParser
from PannzerFunctions import Cleaner
class Runner:
def __init__(self, glob, operator_name=None, CHUNK=100, liveData=None, MAXRES=64000, PACKETSIZE=100000):
"""
glob = object handle containing spreadsheets,dictionaries,parameters
CHUNK = number of entries (query sequences, blocks) in buffer. Buffer is used for lazy dictionary lookup
liveData = name of status file (number of processed queries). None implies no output
"""
self.pythonversion=int(sys.version_info[0])
print("# Python version: ",self.pythonversion, file=sys.stderr)
self.glob=glob
self.MAXRES=MAXRES # maximum size of SANSparallel query string
self.liveData=liveData
self.sentquery=0
self.CHUNK=CHUNK
self.PACKETSIZE=PACKETSIZE # guard against long FASTA sequences
self.colnames=None
self.block_column_index=0
# initialize operator
self.operator_name=operator_name
if not self.operator_name: self.operator_name=self.glob.param['input_OPERATOR']
[self.myoperator]=self.glob.use_operators([operator_name])
self.do_lazy=(len(self.glob.dictlist)>1 or (len(self.glob.dictlist)==1 and self.glob.dictlist[0]!='GOIDELIC')) # glob.dictlist defined in operators' __init__
self.linewise=False
if isinstance(self.myoperator, myoperator.RowOperator):
self.blockwise=False
elif isinstance(self.myoperator, myoperator.BlockOperator):
self.blockwise=True
elif isinstance(self.myoperator, myoperator.TextOperator):
self.linewise=True
else:
sys.stderr.write("# Invalid operator %s. Exiting!\n" %operator_name)
sys.exit()
# parameter nicknames
self.REMOTE=self.glob.param['CONN_REMOTE']
self.PORTNO=self.glob.param['CONN_PORTNO']
self.H=self.glob.param['SANS_H']
self.HX=self.glob.param['SANS_HX']
self.R=self.glob.param['SANS_R']
self.VOTELIST_SIZE=self.glob.param['SANS_VOTELIST_SIZE']
self.SANSPROTOCOL=self.glob.param['SANS_PROTOCOL']
self.SSEQ=self.glob.param['SANS_SSEQ']
self.RANGES=self.glob.param['SANS_RANGES']
self.SANSHOST=self.glob.param['CONN_SANSHOST']
self.SANSPORT=self.glob.param['CONN_SANSPORT']
self.DICTHOST=self.glob.param['CONN_HOSTNAME']
self.DICTPORT=self.glob.param['CONN_PORTNO']
def lazyRunner(self, infile, output_files=['--'], input_format="FASTA", colnames=None, queryspecies="auto", block_column_index=0, block_column_name=None):
"""
Main function to process data streams with SANSPANZ operators.
infile = input file name; None implies STDIN
output_files = output file names for each spreadsheet [data, DE predictions, GO predictions, annotations]; None implies no output
input_format = format of input file: "FASTA" = sequences in FASTA format, "tab" = tab-separated values
colnames = column names of input data stream. None implies column header is first non-comment line of input; automatic if input_format is FASTA
queryspecies = "auto" implies parsing from Uniprot entry's OS= tag, None implies input data has isquery/species columns, must be supplied if input_format is "FASTA"
"""
self.have_colnames=False
if colnames:
self.colnames=colnames.split()
self.colmap=self.glob.sheets[0].use_columns(self.colnames)
self.have_colnames=True
print("# Received colnames:",self.colnames,self.colmap, file=sys.stderr)
self.blocking_initialized=(block_column_name is None) # block_column_index is overwritten in first process_chunk if block_column_name is defined
self.block_column_name=block_column_name
# load small dictionaries: GOIDELIC, including rootcount
if "GOIDELIC" in self.glob.dictlist:
if self.REMOTE:
tmp=DictServer.DICTquery("GOIDELIC",self.pythonversion,REMOTE=self.REMOTE,HOSTNAME=self.DICTHOST,PORTNO=self.DICTPORT).split("\n")
self.glob.load_goidelic_data(tmp)
else:
fn=self.glob.param['DATA_DIR']+'/'+self.glob.param['DATA_GOIDELIC']
self.glob.load_goidelic(fn)
print('# loaded GOIDELIC remote=',self.REMOTE,len(self.glob.GOcounts),len(self.glob.GOparents),len(self.glob.ontology),len(self.glob.godesc),len(self.glob.EC),len(self.glob.
KEGG), file=sys.stderr)
# chunk header is known if FASTA or colnames argument is defined; otherwise it must be captured from input data stream
if input_format=="FASTA" and not self.linewise:
# get colnames from XMLparser with empty message
self.xml=XMLParser.XMLParser(queryspecies)
self.colnames=self.xml.stream("",True,False,self.SSEQ,self.RANGES)[0].rstrip().split("\t")
self.colmap=self.glob.sheets[0].use_columns(self.colnames)
self.have_colnames=True
# open IO channels
data_in=self.open_IO_channels(infile,output_files)
for sheet in self.glob.sheets: sheet.output(header=True)
# fill buffer
self.olduid='?'
self.iquery=0
packet=''
while True:
line=data_in.readline()
if not line: break
if line[0]=='#': continue # ignore comment lines
# shortcut: process line by line
if self.linewise:
self.myoperator.process(line)
continue
# CHUNK counter
if self.test_newentry(input_format,line):
if (self.iquery % self.CHUNK == 0) or (input_format=="FASTA" and not self.linewise and len(packet)>self.PACKETSIZE):
#if self.iquery % self.CHUNK == 0:
self.process_chunk(input_format,packet)
packet=''
self.iquery+=1
# append line to packet
packet+=line
# last buffer
self.process_chunk(input_format,packet) # linewise has empty packet, returns immediately
self.myoperator.finalise()
# close IO channels
self.close_IO_channels()
def process_chunk(self,input_format,packet):
if len(packet)<1: return
# split to lines (removes \n)
if input_format=="FASTA":
lines=self.SANSquery(packet)[0].split("\n")
elif input_format=="tab":
lines=packet.split("\n")
else:
sys.stderr.write("ERROR: unknown input_format %s\n" %input_format)
return
# add input colnames if not defined in operator
if not self.have_colnames:
self.colnames=lines[0].split("\t") # comments were removed on data input
self.colmap=self.glob.sheets[0].use_columns(self.colnames)
startrow=1 # data starts after header row
self.have_colnames=True
else:
startrow=0 # no header row
if (not self.blocking_initialized) and self.block_column_name: # overwrite self.block_column_index
print("find %s in " %self.block_column_name,self.colnames, file=sys.stderr)
self.block_column_index=self.colnames.index(self.block_column_name)
self.blocking_initialized=True
sys.stderr.write("# block_column_index = %i name = %s\n" %(self.block_column_index,self.block_column_name))
# capture dictionary keys from input data
if self.do_lazy: self.load_private_online_dictionaries(lines[startrow:])
# send one block or chunk-of-rows to operator
for sheet in self.glob.sheets: sheet.empty_block()
olduid='?'
for line in lines[startrow:]:
if not line: continue
row=line.split("\t")
if self.blockwise:
if row[self.block_column_index] != olduid:
olduid=row[self.block_column_index]
self.myoperator.process(self.glob.sheets[0].block)
# output result
for sheet in self.glob.sheets:
sheet.output(result=True)
sheet.empty_block()
datarow=[]
self.glob.sheets[0].append_row(datarow) # empty, n.d.
datarow=self.glob.sheets[0].block[-1]
try:
for i in range(0,len(self.colmap)):
ix=self.colmap[i]
datarow[ix]=row[i]
except:
sys.stderr.write("# Input error: %s\n" %line)
self.glob.sheets[0].block.pop() # remove last entry
continue
# last block
if self.blockwise:
self.myoperator.process(self.glob.sheets[0].block)
else:
for row in self.glob.sheets[0].block: self.myoperator.process(row)
# output result
for sheet in self.glob.sheets: sheet.output(result=True)
# update liveData
if self.liveData:
fh=open(self.liveData,"w")
fh.write("%i\n" %self.iquery)
fh.close()
def load_private_online_dictionaries(self,lines):
uacc=[]
uspecies=[]
udesc=[]
uword=[]
if "GODICT" in self.glob.dictlist:
try:
spid_col=self.colnames.index('spid')
except:
spid_col=self.colnames.index('qpid')
uspid=self.catch_unique(spid_col,lines)
# grab accession: spid is db|accession|pid
x={}
for spid in uspid:
try:
acc=spid.split("|")[1]
x[acc]=1
except:
sys.stderr.write("# Warning: no accession number from %s\n" %spid)
uacc=x.keys()
if "LINEAGE" in self.glob.dictlist or "TAXID" in self.glob.dictlist:
species_col=self.colnames.index('species') # use spreadsheet's colnames
uspecies=self.catch_unique(species_col,lines)
if "DESCCOUNT" in self.glob.dictlist or "WORDCOUNT" in self.glob.dictlist:
desc_col=self.colnames.index('desc')
x={}
tmp=self.catch_unique(desc_col,lines)
# call Cleaner function
for desc in tmp: x[Cleaner(desc.upper())]=1 ## operators.Cleaner !!!
udesc=x.keys()
if "WORDCOUNT" in self.glob.dictlist:
x={}
for desc in udesc:
for word in desc.split(): x[word.upper()]=1
uword=x.keys()
# compose DictServer message
msg=""
for key in uspecies: msg+="LINEAGE"+"\t"+key.upper()+"\n"
for key in uspecies: msg+="TAXID"+"\t"+key.upper()+"\n"
for key in uacc: msg+="GODICT"+"\t"+key.upper()+"\n"
for key in uword: msg+="WORDCOUNT"+"\t"+key.upper()+"\n"
for key in udesc: msg+="DESCCOUNT"+"\t"+key.upper()+"\n"
if "WORDCOUNT" in self.glob.dictlist: msg+="NWORDTOTAL\n"
if "DESCCOUNT" in self.glob.dictlist: msg+="NPROT\n"
if "ECWEIGHT" in self.glob.dictlist: # evidence code weights for Blast2GO
for key in uacc: msg+="ECWEIGHT"+"\t"+key.upper()+"\n"
# send request to DictServer
tmp=DictServer.DICTquery(msg,self.pythonversion,REMOTE=self.REMOTE,HOSTNAME=self.DICTHOST,PORTNO=self.PORTNO).split("\n")
# initialize dictionaries
self.glob.GOdict={}
self.glob.GOdict_weights={}
self.glob.desccounts={}
self.glob.lineage={}
self.glob.taxid={}
self.glob.wordcounts={}
# load values to dictionaries (keys occur in chunk)
for row in tmp:
if not row: continue
(table,key,value)=row.split("\t")
if table == "GODICT":
self.glob.GOdict[key]=value
elif table == "DESCCOUNT":
self.glob.desccounts[key]=int(value)
elif table == "LINEAGE":
self.glob.lineage[key]=value
elif table == "WORDCOUNT":
self.glob.wordcounts[key]=int(value)
elif table == "TAXID":
self.glob.taxid[key]=value
elif table == "NPROT":
self.glob.nprot=int(value)
elif table == "NWORDTOTAL":
self.glob.nwordtotal=int(value)
elif table == "ECWEIGHT":
self.glob.GOdict_weights[key]=value
else:
sys.stderr.write("# unknown table: %s\n" %str(row))
sys.stderr.write("# Dictionary sizes: GOdict %i, lineage %i, taxid %i, desccounts %i, wordcounts %i\n" %(len(self.glob.GOdict), len(self.glob.lineage), len(self.glob.taxid), len(self.glob.desccounts), len(self.glob.wordcounts)))
def catch_unique(self,target_col,lines):
"Returns list of unique keys in target_col of lines. Lines has rows of tab-separated data. Keys are uppercase."
tmp={}
for line in lines:
if not line: continue # skip empty line
try:
key=line.split("\t")[target_col].upper()
tmp[key]=1
except:
sys.stderr.write("# Warning: column %i not found on line %s\n" %(target_col,line))
return(tmp.keys())
def SANSquery(self,message,SANSURL="http://ekhidna2.biocenter.helsinki.fi/cgi-bin/sans/sans.cgi"):
# send chunk of query sequences to SANSparallel server
sys.stderr.write("# Calling SANSparallel, message size is %i bytes\n" %len(message))
if self.REMOTE:
values={'query': message, 'mode': 'raw', 'db': 'uniprot', 'H': '100', 'protocol': '0'}
# try 3 times
itried=0
while itried<2:
try :
r=requests.post(SANSURL,data=values)
break
except requests.exceptions.RequestException as rerr :
print("Error,", str(rerr), file=sys.stderr)
itried+=1
#sys.exit(1)
print("# Result size is %i bytes" % len(r.text), file=sys.stderr)
if r.text == "" :
print("Error: calling SANSparallel remotely gave empty result", file=sys.stderr)
sys.exit(1)
#tmp=r.text
if self.pythonversion == 2:
tmp=r.text.encode('ascii','ignore')
else:
tmp=r.text
#print("# SANS result type is ",type(tmp),file=sys.stderr)
else: # local server
# extract hdr,seq from FASTA
qbuffer=''
hdr=''
seq=''
for line in message.split("\n"):
if not line: continue
line=line.replace("\"","")
if line[0]=='>':
self.sentquery+=1
qbuffer+="%i %i %i 20 2 11 1.0 %i %i %i \"%s\" \"%s\" </QUERY>\n" %(self.sentquery, self.H, self.HX, self.R, self.VOTELIST_SIZE, self.SANSPROTOCOL,seq[0:self.MAXRES],hdr[0:self.MAXRES])
hdr=line.strip()
seq=''
else:
seq+=line.strip().upper().replace(" ","")
# last entry
qbuffer+="%i %i %i 20 2 11 1.0 %i %i %i \"%s\" \"%s\" </QUERY>\n" %(self.sentquery, self.H, self.HX, self.R, self.VOTELIST_SIZE, self.SANSPROTOCOL,seq[0:self.MAXRES],hdr[0:self.MAXRES])
#sys.stderr.write("# qbuffer %s" %qbuffer)
# send request to SANSparallel server in proper format
tmp=DictServer.Generic_client(qbuffer,self.pythonversion,HOSTNAME=self.SANSHOST,PORTNO=self.SANSPORT)
#print("SANS result type is ",type(tmp), file=sys.stderr)
#sys.stderr.write(tmp)
sys.stderr.write("# SANSparallel returned %i bytes\n" %len(tmp))
# format conversion to tabular bytestream
data=self.xml.stream(tmp.split("\n"),header=False,bracket=False,output_sseq=self.SSEQ,output_ranges=self.RANGES)
return(data[0],data[1]) # SANS tabular, metadata
def test_newentry(self,input_format,line):
if input_format=="FASTA":
try:
if line[0]==">":
return(True)
else:
return(False)
except:
return(False)
if input_format=="tab":
row=line.split("\t")
try:
uid=row[self.block_column_index]
if uid != self.olduid:
self.olduid=uid
return(True)
else:
return(False)
except:
return(False)
return(False)
def open_IO_channels(self,infile,OUT_ARRAY):
"""
In SANSPANZ, the following sheets are used:
FileOut = name of data spreadsheet's output file (default = no output)
OUT_DE = name of cluster_data spreadsheet's output file (default = no output)
OUT_GO = name of goclass_data spreadsheet's output file (default = no output)
OUT_ANNO = name of anno_data spreadsheet's output file (default = no output)
"""
data_in=sys.stdin
if not infile == "--": data_in=open(infile,"r")
# Redirect output to files; default = sys.stdout
for i in range(0,self.glob.nsheet):
self.glob.sheets[i].connection=None
try:
x=OUT_ARRAY[i]
except:
x=self.operator_name+'.out_'+str(i)
sys.stderr.write('# set output file %i to %s\n' %(i,x))
if x == "--":
self.glob.sheets[i].fh=sys.stdout
elif x:
self.glob.sheets[i].fh=open(x,"w")
else:
self.glob.sheets[i].fh=None
return(data_in)
def close_IO_channels(self):
# close output files
for i in range(0,self.glob.nsheet):
if self.glob.sheets[i].fh: self.glob.sheets[i].fh.close()
if __name__=="__main__":
# glob is object handle containing spreadsheets,dictionaries,parameters
glob=Parameters.WorkSpace(configFile=None)
# liveData is update every CHUNK queries
z=Runner.Runner(glob, method=glob.param['input_OPERATOR'], CHUNK=glob.param['input_CHUNK'], liveData=glob.param['input_LIVEDATA'])
# input_format='FASTA'|'tab'
# if colnames as argument then data has no header rows
# queryspecies "auto" implies queries are Uniprot entries, None implies queryspecies is defined in input data and input_format is "tab", otherwise given by --queryspecies option on command line
z.lazyRunner(glob.param['input_FILE'], ['--','DE.out','GO.out','anno.out'], input_format="FASTA", colnames=None, queryspecies=glob.param['input_QUERYSPECIES'])
|
the-stack_106_18360
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Contains the base Layer class, from which all layers inherit."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import inspect # Necessary supplement to tf_inspect to deal with variadic args.
import itertools
import json
import threading
import numpy as np
from six.moves import zip # pylint: disable=redefined-builtin
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.autograph.core import ag_ctx
from tensorflow.python.autograph.impl import api as autograph
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.distribute import values as distribute_values
from tensorflow.python.eager import context
from tensorflow.python.eager import execute
from tensorflow.python.eager import function
from tensorflow.python.framework import auto_control_deps
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend
from tensorflow.python.keras import constraints
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.engine import input_spec
from tensorflow.python.keras.engine import node as node_module
from tensorflow.python.keras.mixed_precision.experimental import autocast_variable
from tensorflow.python.keras.mixed_precision.experimental import policy
from tensorflow.python.keras.saving.saved_model import save as saved_model
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.utils import tf_utils
# A module that only depends on `keras.layers` import these from here.
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.keras.utils.generic_utils import to_snake_case # pylint: disable=unused-import
from tensorflow.python.keras.utils.tf_utils import is_tensor_or_tensor_list # pylint: disable=unused-import
from tensorflow.python.module import module
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.training.tracking import data_structures
from tensorflow.python.training.tracking import layer_utils as trackable_layer_utils
from tensorflow.python.training.tracking import object_identity
from tensorflow.python.training.tracking import tracking
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
from tensorflow.python.util import serialization
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import keras_export
from tensorflow.tools.docs import doc_controls
# Prefix that is added to the TF op layer names.
_TF_OP_LAYER_NAME_PREFIX = 'tf_op_layer_'
@keras_export('keras.layers.Layer')
class Layer(module.Module):
"""Base layer class.
This is the class from which all layers inherit.
A layer is a class implementing common neural networks operations, such
as convolution, batch norm, etc. These operations require managing weights,
losses, updates, and inter-layer connectivity.
Users will just instantiate a layer and then treat it as a callable.
We recommend that descendants of `Layer` implement the following methods:
* `__init__()`: Save configuration in member variables
* `build()`: Called once from `__call__`, when we know the shapes of inputs
and `dtype`. Should have the calls to `add_weight()`, and then
call the super's `build()` (which sets `self.built = True`, which is
nice in case the user wants to call `build()` manually before the
first `__call__`).
* `call()`: Called in `__call__` after making sure `build()` has been called
once. Should actually perform the logic of applying the layer to the
input tensors (which should be passed in as the first argument).
Arguments:
trainable: Boolean, whether the layer's variables should be trainable.
name: String name of the layer.
dtype: Default dtype of the layer's weights (default of `None` means use the
type of the first input).
dynamic: Set this to `True` if your layer should only be run eagerly, and
should not be used to generate a static computation graph.
This would be the case for a Tree-RNN or a recursive network,
for example, or generally for any layer that manipulates tensors
using Python control flow. If `False`, we assume that the layer can
safely be used to generate a static computation graph.
Read-only properties:
name: The name of the layer (string).
dtype: Default dtype of the layer's weights (default of `None` means use the
type of the first input).
updates: List of update ops of this layer.
losses: List of losses added by this layer.
trainable_weights: List of variables to be included in backprop.
non_trainable_weights: List of variables that should not be
included in backprop.
weights: The concatenation of the lists trainable_weights and
non_trainable_weights (in this order).
Mutable properties:
trainable: Whether the layer should be trained (boolean).
input_spec: Optional (list of) `InputSpec` object(s) specifying the
constraints on inputs that can be accepted by the layer.
"""
# See tf.Module for the usage of this property.
# The key for _obj_reference_counts_dict is a Trackable, which could be a
# variable or layer etc. tf.Module._flatten will fail to flatten the key
# since it is trying to convert Trackable to a string. This attribute can be
# ignored even after the fix of nest lib, since the trackable object should
# already been available as individual attributes. _obj_reference_counts_dict
# just contains a copy of them.
_TF_MODULE_IGNORED_PROPERTIES = frozenset(itertools.chain(
('_obj_reference_counts_dict',),
module.Module._TF_MODULE_IGNORED_PROPERTIES
))
@trackable.no_automatic_dependency_tracking
def __init__(self, trainable=True, name=None, dtype=None, dynamic=False,
**kwargs):
# These properties should be set by the user via keyword arguments.
# note that 'dtype', 'input_shape' and 'batch_input_shape'
# are only applicable to input layers: do not pass these keywords
# to non-input layers.
allowed_kwargs = {
'input_shape',
'batch_input_shape',
'batch_size',
'weights',
'activity_regularizer',
}
# Validate optional keyword arguments.
generic_utils.validate_kwargs(kwargs, allowed_kwargs)
# Mutable properties
# Indicates whether the layer's weights are updated during training
# and whether the layer's updates are run during training.
self._trainable = trainable
# A stateful layer is a layer whose updates are run during inference too,
# for instance stateful RNNs.
self.stateful = False
# Indicates whether `build` needs to be called upon layer call, to create
# the layer's weights.
self.built = False
# Provides information about which inputs are compatible with the layer.
self.input_spec = None
self.supports_masking = False
self._init_set_name(name)
self._activity_regularizer = kwargs.pop('activity_regularizer', None)
self._maybe_create_attribute('_trainable_weights', [])
self._maybe_create_attribute('_non_trainable_weights', [])
self._updates = []
# Object to store all thread local layer properties.
self._thread_local = threading.local()
# A list of zero-argument lambdas which return Tensors, used for variable
# regularizers.
self._callable_losses = []
# A list of symbolic Tensors containing activity regularizers and losses
# manually added through `add_loss` in graph-building mode.
self._losses = []
# A list of metric instances corresponding to the symbolic metric tensors
# added using the `add_metric` API.
self._metrics = []
self._set_dtype_and_policy(dtype)
self._call_convention = (base_layer_utils
.CallConvention.EXPLICIT_INPUTS_ARGUMENT)
# Dependencies tracked via attribute assignment.
self._maybe_create_attribute('_layers', [])
# These lists will be filled via successive calls
# to self._add_inbound_node().
self._inbound_nodes = []
self._outbound_nodes = []
call_fn_args = self._call_fn_args
self._expects_training_arg = ('training' in call_fn_args or
self._call_accepts_kwargs)
self._expects_mask_arg = ('mask' in call_fn_args or
self._call_accepts_kwargs)
# Whether the `call` method can be used to build a TF graph without issues.
self._dynamic = dynamic
# Manage input shape information if passed.
if 'input_shape' in kwargs or 'batch_input_shape' in kwargs:
# In this case we will later create an input layer
# to insert before the current layer
if 'batch_input_shape' in kwargs:
batch_input_shape = tuple(kwargs['batch_input_shape'])
elif 'input_shape' in kwargs:
if 'batch_size' in kwargs:
batch_size = kwargs['batch_size']
else:
batch_size = None
batch_input_shape = (batch_size,) + tuple(kwargs['input_shape'])
self._batch_input_shape = batch_input_shape
# Manage initial weight values if passed.
if 'weights' in kwargs:
self._initial_weights = kwargs['weights']
else:
self._initial_weights = None
def build(self, input_shape):
"""Creates the variables of the layer (optional, for subclass implementers).
This is a method that implementers of subclasses of `Layer` or `Model`
can override if they need a state-creation step in-between
layer instantiation and layer call.
This is typically used to create the weights of `Layer` subclasses.
Arguments:
input_shape: Instance of `TensorShape`, or list of instances of
`TensorShape` if the layer expects a list of inputs
(one instance per input).
"""
self.built = True
@doc_controls.for_subclass_implementers
def call(self, inputs, **kwargs): # pylint: disable=unused-argument
"""This is where the layer's logic lives.
Arguments:
inputs: Input tensor, or list/tuple of input tensors.
**kwargs: Additional keyword arguments.
Returns:
A tensor or list/tuple of tensors.
"""
return inputs
@doc_controls.for_subclass_implementers
def add_weight(self,
name=None,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=None,
constraint=None,
partitioner=None,
use_resource=None,
synchronization=tf_variables.VariableSynchronization.AUTO,
aggregation=tf_variables.VariableAggregation.NONE,
**kwargs):
"""Adds a new variable to the layer.
Arguments:
name: Variable name.
shape: Variable shape. Defaults to scalar if unspecified.
dtype: The type of the variable. Defaults to `self.dtype` or `float32`.
initializer: Initializer instance (callable).
regularizer: Regularizer instance (callable).
trainable: Boolean, whether the variable should be part of the layer's
"trainable_variables" (e.g. variables, biases)
or "non_trainable_variables" (e.g. BatchNorm mean and variance).
Note that `trainable` cannot be `True` if `synchronization`
is set to `ON_READ`.
constraint: Constraint instance (callable).
partitioner: Partitioner to be passed to the `Trackable` API.
use_resource: Whether to use `ResourceVariable`.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
**kwargs: Additional keyword arguments. Accepted values are `getter` and
`collections`.
Returns:
The created variable. Usually either a `Variable` or `ResourceVariable`
instance. If `partitioner` is not `None`, a `PartitionedVariable`
instance is returned.
Raises:
RuntimeError: If called with partitioned variable regularization and
eager execution is enabled.
ValueError: When giving unsupported dtype and no initializer or when
trainable has been set to True with synchronization set as `ON_READ`.
"""
if shape is None:
shape = ()
# Validate optional keyword arguments.
for kwarg in kwargs:
if kwarg not in ['getter', 'collections', 'experimental_autocast']:
raise TypeError('Unknown keyword argument:', kwarg)
getter = kwargs.pop('getter', base_layer_utils.make_variable)
collections_arg = kwargs.pop('collections', None)
# 'experimental_autocast' can be set to False by the caller to indicate an
# AutoCastVariable should never be created.
autocast = kwargs.pop('experimental_autocast', True)
if dtype is None:
dtype = self.dtype or backend.floatx()
dtype = dtypes.as_dtype(dtype)
if self._dtype is None:
self._dtype = dtype.base_dtype.name
initializer = initializers.get(initializer)
regularizer = regularizers.get(regularizer)
constraint = constraints.get(constraint)
if synchronization == tf_variables.VariableSynchronization.ON_READ:
if trainable:
raise ValueError(
'Synchronization value can be set to '
'VariableSynchronization.ON_READ only for non-trainable variables. '
'You have specified trainable=True and '
'synchronization=VariableSynchronization.ON_READ.')
else:
# Set trainable to be false when variable is to be synced on read.
trainable = False
elif trainable is None:
trainable = True
# Initialize variable when no initializer provided
if initializer is None:
# If dtype is DT_FLOAT, provide a uniform unit scaling initializer
if dtype.is_floating:
initializer = initializers.glorot_uniform()
# If dtype is DT_INT/DT_UINT, provide a default value `zero`
# If dtype is DT_BOOL, provide a default value `FALSE`
elif dtype.is_integer or dtype.is_unsigned or dtype.is_bool:
initializer = initializers.zeros()
# NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here?
else:
raise ValueError('An initializer for variable %s of type %s is required'
' for layer %s' % (name, dtype.base_dtype, self.name))
if autocast and self._mixed_precision_policy.should_cast_variables:
# Wrap 'getter' with a version that returns an AutoCastVariable.
old_getter = getter
def getter(*args, **kwargs): # pylint: disable=function-redefined
variable = old_getter(*args, **kwargs)
if isinstance(variable, distribute_values.DistributedVariable):
return autocast_variable.AutoCastDistributedVariable(variable)
else:
return autocast_variable.AutoCastVariable(variable)
variable = self._add_variable_with_custom_getter(
name=name,
shape=shape,
# TODO(allenl): a `make_variable` equivalent should be added as a
# `Trackable` method.
getter=getter,
# Manage errors in Layer rather than Trackable.
overwrite=True,
initializer=initializer,
dtype=dtype,
constraint=constraint,
trainable=trainable,
partitioner=partitioner,
use_resource=use_resource,
collections=collections_arg,
synchronization=synchronization,
aggregation=aggregation)
backend.track_variable(variable)
if regularizer is not None:
# TODO(fchollet): in the future, this should be handled at the
# level of variable creation, and weight regularization losses
# should be variable attributes.
name_in_scope = variable.name[:variable.name.find(':')]
self._handle_weight_regularization(name_in_scope,
variable,
regularizer)
if trainable:
self._trainable_weights.append(variable)
else:
self._non_trainable_weights.append(variable)
return variable
@base_layer_utils.default
def get_config(self):
"""Returns the config of the layer.
A layer config is a Python dictionary (serializable)
containing the configuration of a layer.
The same layer can be reinstantiated later
(without its trained weights) from this configuration.
The config of a layer does not include connectivity
information, nor the layer class name. These are handled
by `Network` (one layer of abstraction above).
Returns:
Python dictionary.
"""
all_args = tf_inspect.getfullargspec(self.__init__).args
config = {'name': self.name, 'trainable': self.trainable}
if hasattr(self, '_batch_input_shape'):
config['batch_input_shape'] = self._batch_input_shape
if hasattr(self, 'dtype'):
config['dtype'] = self.dtype
if hasattr(self, 'dynamic'):
# Only include `dynamic` in the `config` if it is `True`
if self.dynamic:
config['dynamic'] = self.dynamic
elif 'dynamic' in all_args:
all_args.remove('dynamic')
expected_args = config.keys()
# Finds all arguments in the `__init__` that are not in the config:
extra_args = [arg for arg in all_args if arg not in expected_args]
# Check that either the only argument in the `__init__` is `self`,
# or that `get_config` has been overridden:
if len(extra_args) > 1 and hasattr(self.get_config, '_is_default'):
raise NotImplementedError('Layers with arguments in `__init__` must '
'override `get_config`.')
# TODO(reedwm): Handle serializing self._mixed_precision_policy.
return config
@classmethod
def from_config(cls, config):
"""Creates a layer from its config.
This method is the reverse of `get_config`,
capable of instantiating the same layer from the config
dictionary. It does not handle layer connectivity
(handled by Network), nor weights (handled by `set_weights`).
Arguments:
config: A Python dictionary, typically the
output of get_config.
Returns:
A layer instance.
"""
return cls(**config)
def compute_output_shape(self, input_shape):
"""Computes the output shape of the layer.
If the layer has not been built, this method will call `build` on the
layer. This assumes that the layer will later be used with inputs that
match the input shape provided here.
Arguments:
input_shape: Shape tuple (tuple of integers)
or list of shape tuples (one per output tensor of the layer).
Shape tuples can include None for free dimensions,
instead of an integer.
Returns:
An input shape tuple.
"""
if context.executing_eagerly():
# In this case we build the model first in order to do shape inference.
# This is acceptable because the framework only calls
# `compute_output_shape` on shape values that the layer would later be
# built for. It would however cause issues in case a user attempts to
# use `compute_output_shape` manually with shapes that are incompatible
# with the shape the Layer will be called on (these users will have to
# implement `compute_output_shape` themselves).
self._maybe_build(input_shape)
with context.graph_mode():
graph = func_graph.FuncGraph('graph')
with graph.as_default():
input_shape = tf_utils.convert_shapes(input_shape, to_tuples=False)
inputs = nest.map_structure(
base_layer_utils.generate_placeholders_from_shape, input_shape)
try:
if self._expects_training_arg:
outputs = self(inputs, training=False)
else:
outputs = self(inputs)
except TypeError:
raise NotImplementedError('We could not automatically infer '
'the static shape of the layer\'s output.'
' Please implement the '
'`compute_output_shape` method on your '
'layer (%s).' % self.__class__.__name__)
return nest.map_structure(lambda t: t.shape, outputs)
raise NotImplementedError
@doc_controls.for_subclass_implementers
def compute_output_signature(self, input_signature):
"""Compute the output tensor signature of the layer based on the inputs.
Unlike a TensorShape object, a TensorSpec object contains both shape
and dtype information for a tensor. This method allows layers to provide
output dtype information if it is different from the input dtype.
For any layer that doesn't implement this function,
the framework will fall back to use `compute_output_shape`, and will
assume that the output dtype matches the input dtype.
Args:
input_signature: Single TensorSpec or nested structure of TensorSpec
objects, describing a candidate input for the layer.
Returns:
Single TensorSpec or nested structure of TensorSpec objects, describing
how the layer would transform the provided input.
Raises:
TypeError: If input_signature contains a non-TensorSpec object.
"""
def check_type_return_shape(s):
if not isinstance(s, tensor_spec.TensorSpec):
raise TypeError(
'Only TensorSpec signature types are supported, '
'but saw signature signature entry: {}.'.format(s))
return s.shape
input_shape = nest.map_structure(check_type_return_shape, input_signature)
output_shape = self.compute_output_shape(input_shape)
if self._mixed_precision_policy.should_cast_variables:
# If using mixed precision, and weights are cast to input dtype, we should
# not infer the dtype from self.dtype
dtype = None
else:
dtype = self.dtype
if dtype is None:
input_dtypes = [s.dtype for s in nest.flatten(input_signature)]
# Default behavior when self.dtype is None, is to use the first input's
# dtype.
dtype = input_dtypes[0]
return nest.map_structure(
lambda s: tensor_spec.TensorSpec(dtype=dtype, shape=s),
output_shape)
@base_layer_utils.default
def compute_mask(self, inputs, mask=None): # pylint: disable=unused-argument
"""Computes an output mask tensor.
Arguments:
inputs: Tensor or list of tensors.
mask: Tensor or list of tensors.
Returns:
None or a tensor (or list of tensors,
one per output tensor of the layer).
"""
if not self.supports_masking:
if any(m is not None for m in nest.flatten(mask)):
raise TypeError('Layer ' + self.name + ' does not support masking, '
'but was passed an input_mask: ' + str(mask))
# masking not explicitly supported: return None as mask.
return None
# if masking is explicitly supported, by default
# carry over the input mask
return mask
def __call__(self, inputs, *args, **kwargs):
"""Wraps `call`, applying pre- and post-processing steps.
Arguments:
inputs: input tensor(s).
*args: additional positional arguments to be passed to `self.call`.
**kwargs: additional keyword arguments to be passed to `self.call`.
Returns:
Output tensor(s).
Note:
- The following optional keyword arguments are reserved for specific uses:
* `training`: Boolean scalar tensor of Python boolean indicating
whether the `call` is meant for training or inference.
* `mask`: Boolean input mask.
- If the layer's `call` method takes a `mask` argument (as some Keras
layers do), its default value will be set to the mask generated
for `inputs` by the previous layer (if `input` did come from
a layer that generated a corresponding mask, i.e. if it came from
a Keras layer with masking support.
Raises:
ValueError: if the layer's `call` method returns None (an invalid value).
"""
call_context = base_layer_utils.call_context()
input_list = nest.flatten(inputs)
# We will attempt to build a TF graph if & only if all inputs are symbolic.
# This is always the case in graph mode. It can also be the case in eager
# mode when all inputs can be traced back to `keras.Input()` (when building
# models using the functional API).
build_graph = tf_utils.are_all_symbolic_tensors(input_list)
# Accept NumPy and scalar inputs by converting to Tensors.
if any(isinstance(x, (np.ndarray, float, int)) for x in input_list):
def _convert_non_tensor(x):
# Don't call `ops.convert_to_tensor` on all `inputs` because
# `SparseTensors` can't be converted to `Tensor`.
if isinstance(x, (np.ndarray, float, int)):
return ops.convert_to_tensor(x)
return x
inputs = nest.map_structure(_convert_non_tensor, inputs)
input_list = nest.flatten(inputs)
# Handle `mask` propagation from previous layer to current layer. Masks can
# be propagated explicitly via the `mask` argument, or implicitly via
# setting the `_keras_mask` attribute on the inputs to a Layer. Masks passed
# explicitly take priority.
input_masks = self._collect_input_masks(inputs, args, kwargs)
if (self._expects_mask_arg and input_masks is not None and
not self._call_arg_was_passed('mask', args, kwargs)):
kwargs['mask'] = input_masks
# If `training` argument was not explicitly passed, propagate `training`
# value from this layer's calling layer.
training_arg_passed_by_framework = False
# Priority 1: `training` was explicitly passed.
if self._call_arg_was_passed('training', args, kwargs):
training_value = self._get_call_arg_value('training', args, kwargs)
if not self._expects_training_arg:
kwargs.pop('training')
else:
training_value = None
# Priority 2: `training` was passed to a parent layer.
if call_context.training is not None:
training_value = call_context.training
# Priority 3a: `learning_phase()` has been set.
elif backend.global_learning_phase_is_set():
training_value = backend.learning_phase()
# Priority 3b: Pass the `learning_phase()` if in the Keras FuncGraph.
elif build_graph:
with backend.get_graph().as_default():
if base_layer_utils.is_in_keras_graph():
training_value = backend.learning_phase()
if self._expects_training_arg and training_value is not None:
kwargs['training'] = training_value
training_arg_passed_by_framework = True
# Only create Keras history if at least one tensor originates from a
# `keras.Input`. Otherwise this Layer may be being used outside the Keras
# framework.
if build_graph and base_layer_utils.needs_keras_history(inputs):
base_layer_utils.create_keras_history(inputs)
# Clear eager losses on top level model call.
# We are clearing the losses only on the top level model call and not on
# every layer/model call because layer/model may be reused.
if (base_layer_utils.is_in_eager_or_tf_function() and
not call_context.in_call):
self._clear_losses()
with call_context.enter(self, inputs, build_graph, training_value):
# Check input assumptions set after layer building, e.g. input shape.
if build_graph:
# Symbolic execution on symbolic tensors. We will attempt to build
# the corresponding TF subgraph inside `backend.get_graph()`
input_spec.assert_input_compatibility(self.input_spec, inputs,
self.name)
graph = backend.get_graph()
with graph.as_default(), backend.name_scope(self._name_scope()):
# Build layer if applicable (if the `build` method has been
# overridden).
self._maybe_build(inputs)
# Wrapping `call` function in autograph to allow for dynamic control
# dependencies in call. We are limiting this to subclassed layers as
# autograph is strictly needed only for subclassed layers and models.
# tf_convert will respect the value of autograph setting in the
# enclosing tf.function, if any.
if base_layer_utils.is_subclassed(self):
call_fn = autograph.tf_convert(
self.call, ag_ctx.control_status_ctx())
else:
call_fn = self.call
if not self.dynamic:
try:
with base_layer_utils.autocast_context_manager(
input_list,
self._mixed_precision_policy.should_cast_variables):
# Add auto_control_deps in V2 when they are not already added by
# a `tf.function`.
if (ops.executing_eagerly_outside_functions() and
not base_layer_utils.is_in_eager_or_tf_function()):
with auto_control_deps.AutomaticControlDependencies() as acd:
outputs = call_fn(inputs, *args, **kwargs)
# Wrap Tensors in `outputs` in `tf.identity` to avoid
# circular dependencies.
outputs = base_layer_utils.mark_as_return(outputs, acd)
else:
outputs = call_fn(inputs, *args, **kwargs)
except TypeError as e:
exception_str = str(e)
exception_msg = 'Tensor objects are only iterable when eager'
if exception_msg in exception_str:
raise TypeError('You are attempting to use Python control '
'flow in a layer that was not declared to be '
'dynamic. Pass `dynamic=True` to the class '
'constructor.\nEncountered error:\n"""\n' +
exception_str + '\n"""')
raise
else:
# We will use static shape inference to return symbolic tensors
# matching the specifications of the layer outputs.
# Since `self.dynamic` is True, we will never attempt to
# run the underlying TF graph (which is disconnected).
# TODO(fchollet): consider py_func as an alternative, which
# would enable us to run the underlying graph if needed.
outputs = self._symbolic_call(inputs)
if outputs is None:
raise ValueError('A layer\'s `call` method should return a '
'Tensor or a list of Tensors, not None '
'(layer: ' + self.name + ').')
if base_layer_utils.have_all_keras_metadata(inputs):
if training_arg_passed_by_framework:
kwargs.pop('training')
inputs, outputs = self._set_connectivity_metadata_(
inputs, outputs, args, kwargs)
self._handle_activity_regularization(inputs, outputs)
self._set_mask_metadata(inputs, outputs, input_masks)
if hasattr(self, '_set_inputs') and not self.inputs:
# Subclassed network: explicitly set metadata normally set by
# a call to self._set_inputs().
# TODO(b/120997007): This should be done in Eager as well, but
# causes garbage collection issues because of the placeholders
# created on the default Keras graph.
self._set_inputs(inputs, outputs)
else:
# Eager execution on data tensors.
with backend.name_scope(self._name_scope()):
self._maybe_build(inputs)
with base_layer_utils.autocast_context_manager(
input_list, self._mixed_precision_policy.should_cast_variables):
outputs = self.call(inputs, *args, **kwargs)
self._handle_activity_regularization(inputs, outputs)
self._set_mask_metadata(inputs, outputs, input_masks)
return outputs
@property
def dtype(self):
return self._dtype
@property
def name(self):
return self._name
@property
def dynamic(self):
return self._dynamic
@property
def trainable(self):
return self._trainable
@trainable.setter
def trainable(self, value):
self._trainable = value
for layer in getattr(self, '_layers', []):
layer.trainable = value
@property
def activity_regularizer(self):
"""Optional regularizer function for the output of this layer."""
return self._activity_regularizer
@activity_regularizer.setter
def activity_regularizer(self, regularizer):
"""Optional regularizer function for the output of this layer."""
self._activity_regularizer = regularizer
@property
def input_spec(self):
return self._input_spec
@input_spec.setter
# Must be decorated to prevent tracking, since the input_spec can be nested
# InputSpec objects.
@trackable.no_automatic_dependency_tracking
def input_spec(self, value):
for v in nest.flatten(value):
if v is not None and not isinstance(v, InputSpec):
raise TypeError('Layer input_spec must be an instance of InputSpec. '
'Got: {}'.format(v))
self._input_spec = value
@property
def trainable_weights(self):
if self.trainable:
nested = self._gather_children_attribute('trainable_weights')
return self._trainable_weights + nested
else:
return []
@property
def non_trainable_weights(self):
if self.trainable:
nested = self._gather_children_attribute('non_trainable_weights')
return self._non_trainable_weights + nested
else:
nested = self._gather_children_attribute('weights')
return self._trainable_weights + self._non_trainable_weights + nested
@property
def weights(self):
"""Returns the list of all layer variables/weights.
Returns:
A list of variables.
"""
return self.trainable_weights + self.non_trainable_weights
@property
def updates(self):
if not self.trainable and not self.stateful:
return []
with backend.get_graph().as_default():
updates = []
for u in self._updates:
if callable(u):
try:
u = u()
except ValueError as e:
if 'Trying to capture a tensor from an inner function' in str(e):
base_layer_utils.check_graph_consistency(
method='add_update', force_raise=True)
raise
base_layer_utils.check_graph_consistency(u, method='add_update')
updates.append(u)
return updates + self._gather_children_attribute('updates')
@property
def losses(self):
"""Losses which are associated with this `Layer`.
Variable regularization tensors are created when this property is accessed,
so it is eager safe: accessing `losses` under a `tf.GradientTape` will
propagate gradients back to the corresponding variables.
Returns:
A list of tensors.
"""
collected_losses = []
# If any eager losses are present, we assume the model to be part of an
# eager training loop (either a custom one or the one used when
# `run_eagerly=True`), and so we always return just the eager losses in that
# case.
if self._eager_losses:
collected_losses.extend(self._eager_losses)
else:
collected_losses.extend(self._losses)
for regularizer in self._callable_losses:
loss_tensor = regularizer()
if loss_tensor is not None:
collected_losses.append(loss_tensor)
return collected_losses + self._gather_children_attribute('losses')
@doc_controls.for_subclass_implementers
def add_loss(self, losses, inputs=None):
"""Add loss tensor(s), potentially dependent on layer inputs.
Some losses (for instance, activity regularization losses) may be dependent
on the inputs passed when calling a layer. Hence, when reusing the same
layer on different inputs `a` and `b`, some entries in `layer.losses` may
be dependent on `a` and some on `b`. This method automatically keeps track
of dependencies.
This method can be used inside a subclassed layer or model's `call`
function, in which case `losses` should be a Tensor or list of Tensors.
Example:
```python
class MyLayer(tf.keras.layers.Layer):
def call(inputs, self):
self.add_loss(tf.abs(tf.reduce_mean(inputs)), inputs=True)
return inputs
```
This method can also be called directly on a Functional Model during
construction. In this case, any loss Tensors passed to this Model must
be symbolic and be able to be traced back to the model's `Input`s. These
losses become part of the model's topology and are tracked in `get_config`.
Example:
```python
inputs = tf.keras.Input(shape=(10,))
x = tf.keras.layers.Dense(10)(inputs)
outputs = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs, outputs)
# Actvity regularization.
model.add_loss(tf.abs(tf.reduce_mean(x)))
```
If this is not the case for your loss (if, for example, your loss references
a `Variable` of one of the model's layers), you can wrap your loss in a
zero-argument lambda. These losses are not tracked as part of the model's
topology since they can't be serialized.
Example:
```python
inputs = tf.keras.Input(shape=(10,))
x = tf.keras.layers.Dense(10)(inputs)
outputs = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs, outputs)
# Weight regularization.
model.add_loss(lambda: tf.reduce_mean(x.kernel))
```
The `get_losses_for` method allows to retrieve the losses relevant to a
specific set of inputs.
Arguments:
losses: Loss tensor, or list/tuple of tensors. Rather than tensors, losses
may also be zero-argument callables which create a loss tensor.
inputs: Ignored when executing eagerly. If anything other than None is
passed, it signals the losses are conditional on some of the layer's
inputs, and thus they should only be run where these inputs are
available. This is the case for activity regularization losses, for
instance. If `None` is passed, the losses are assumed
to be unconditional, and will apply across all dataflows of the layer
(e.g. weight regularization losses).
"""
def _tag_unconditional(loss):
if callable(loss):
loss = loss()
if loss is None:
return None # Will be filtered out when computing the .losses property
if not tensor_util.is_tensor(loss):
loss = ops.convert_to_tensor(loss, dtype=backend.floatx())
loss._unconditional_loss = (inputs is None) # pylint: disable=protected-access
return loss
losses = nest.flatten(losses)
callable_losses = []
eager_losses = []
symbolic_losses = []
for loss in losses:
if callable(loss):
callable_losses.append(functools.partial(_tag_unconditional, loss))
continue
if loss is None:
continue
if not tensor_util.is_tensor(loss):
loss = ops.convert_to_tensor(loss, dtype=backend.floatx())
# TF Functions should take the eager path.
if (tf_utils.is_symbolic_tensor(loss) and
not base_layer_utils.is_in_tf_function()):
symbolic_losses.append(_tag_unconditional(loss))
base_layer_utils.check_graph_consistency(loss, method='add_loss')
elif tensor_util.is_tensor(loss):
eager_losses.append(_tag_unconditional(loss))
self._callable_losses += callable_losses
in_call_context = base_layer_utils.call_context().in_call
if eager_losses and not in_call_context:
raise ValueError(
'Expected a symbolic Tensors or a callable for the loss value. '
'Please wrap your loss computation in a zero argument `lambda`.')
self._eager_losses += eager_losses
if in_call_context:
for symbolic_loss in symbolic_losses:
self._losses.append(symbolic_loss)
else:
for symbolic_loss in symbolic_losses:
if getattr(self, '_is_graph_network', False):
new_layers = base_layer_utils.create_keras_history(symbolic_loss)
# Losses must be keyed on inputs no matter what in order to
# be supported in DistributionStrategy.
add_loss_layer = AddLoss(unconditional=False)
add_loss_layer(symbolic_loss)
new_layers.append(add_loss_layer)
self._insert_layers(new_layers)
else:
# Possible a loss was added in a Layer's `build`.
self._losses.append(symbolic_loss)
@trackable.no_automatic_dependency_tracking
def _clear_losses(self):
"""Used every step in eager to reset losses."""
self._eager_losses = []
if hasattr(self, '_layers'):
for layer in trackable_layer_utils.filter_empty_layer_containers(
self._layers):
layer._clear_losses()
@property
def metrics(self):
return self._metrics + self._gather_children_attribute('metrics')
@doc_controls.for_subclass_implementers
def add_metric(self, value, aggregation=None, name=None):
"""Adds metric tensor to the layer.
Args:
value: Metric tensor.
aggregation: Sample-wise metric reduction function. If `aggregation=None`,
it indicates that the metric tensor provided has been aggregated
already. eg, `bin_acc = BinaryAccuracy(name='acc')` followed by
`model.add_metric(bin_acc(y_true, y_pred))`. If aggregation='mean', the
given metric tensor will be sample-wise reduced using `mean` function.
eg, `model.add_metric(tf.reduce_sum(outputs), name='output_mean',
aggregation='mean')`.
name: String metric name.
Raises:
ValueError: If `aggregation` is anything other than None or `mean`.
"""
if aggregation is not None and aggregation != 'mean':
raise ValueError(
'We currently support only `mean` sample-wise metric aggregation. '
'You provided aggregation=`%s`' % aggregation)
from_metric_obj = hasattr(value, '_metric_obj')
is_symbolic = tf_utils.is_symbolic_tensor(value)
in_call_context = base_layer_utils.call_context().in_call
if name is None and not from_metric_obj:
# Eg. `self.add_metric(math_ops.reduce_sum(x), aggregation='mean')`
# In eager mode, we use metric name to lookup a metric. Without a name,
# a new Mean metric wrapper will be created on every model/layer call.
# So, we raise an error when no name is provided.
# We will do the same for symbolic mode for consistency although a name
# will be generated if no name is provided.
# We will not raise this error in the foll use case for the sake of
# consistency as name in provided in the metric constructor.
# mean = metrics.Mean(name='my_metric')
# model.add_metric(mean(outputs))
raise ValueError('Please provide a name for your metric like '
'`self.add_metric(tf.reduce_sum(inputs), '
'name=\'mean_activation\', aggregation=\'mean\')`')
elif from_metric_obj:
name = value._metric_obj.name
if in_call_context:
# TF Function path should take the eager path.
if is_symbolic and not base_layer_utils.is_in_tf_function():
self._symbolic_add_metric(value, aggregation, name)
else:
self._eager_add_metric(value, aggregation, name)
else:
if not is_symbolic:
raise ValueError('Expected a symbolic Tensor for the metric value, '
'received: ' + str(value))
# Possible a metric was added in a Layer's `build`.
if not getattr(self, '_is_graph_network', False):
with backend.get_graph().as_default():
self._symbolic_add_metric(value, aggregation, name)
return
if from_metric_obj:
raise ValueError('Using the result of calling a `Metric` object '
'when calling `add_metric` on a Functional '
'Model is not supported. Please pass the '
'Tensor to monitor directly.')
# Insert layers into the Keras Graph Network.
new_layers = base_layer_utils.create_keras_history(value)
add_metric_layer = AddMetric(aggregation, name)
add_metric_layer(value)
new_layers.append(add_metric_layer)
self._insert_layers(new_layers)
@deprecation.deprecated_args(None, '`inputs` is now automatically inferred',
'inputs')
@doc_controls.for_subclass_implementers
def add_update(self, updates, inputs=None):
"""Add update op(s), potentially dependent on layer inputs.
Weight updates (for instance, the updates of the moving mean and variance
in a BatchNormalization layer) may be dependent on the inputs passed
when calling a layer. Hence, when reusing the same layer on
different inputs `a` and `b`, some entries in `layer.updates` may be
dependent on `a` and some on `b`. This method automatically keeps track
of dependencies.
The `get_updates_for` method allows to retrieve the updates relevant to a
specific set of inputs.
This call is ignored when eager execution is enabled (in that case, variable
updates are run on the fly and thus do not need to be tracked for later
execution).
Arguments:
updates: Update op, or list/tuple of update ops, or zero-arg callable
that returns an update op. A zero-arg callable should be passed in
order to disable running the updates by setting `trainable=False`
on this Layer, when executing in Eager mode.
inputs: Deprecated, will be automatically inferred.
"""
if ds_context.has_strategy() and ds_context.in_cross_replica_context():
# Updates don't need to be run in a cross-replica context.
if (ops.executing_eagerly_outside_functions() and
not base_layer_utils.is_in_keras_graph()):
raise RuntimeError( # pylint: disable=g-doc-exception
'`add_update` was called in a cross-replica context. This is not '
'expected. If you require this feature, please file an issue.')
return
updates = generic_utils.to_list(updates)
call_context = base_layer_utils.call_context()
# All updates can be run immediately in Eager or in a tf.function.
if base_layer_utils.is_in_eager_or_tf_function():
if not call_context.frozen:
for update in updates:
if callable(update):
update()
return
if call_context.in_call:
relevant_inputs = call_context.inputs
else:
inbound_nodes = getattr(self, '_inbound_nodes', [])
relevant_inputs = [node.input_tensors for node in inbound_nodes]
def process_update(x):
"""Standardize update ops.
Arguments:
x: Tensor, op, or callable.
Returns:
An update op.
"""
if callable(x):
update = lambda: process_update(x())
if not ops.executing_eagerly_outside_functions():
# In V1 mode, call the callable right away and process. This is needed
# for TPU strategy.
return update()
elif isinstance(x, ops.Operation):
update = x
elif hasattr(x, 'op'):
update = x.op
else:
update = ops.convert_to_tensor(x)
reachable = tf_utils.get_reachable_from_inputs(relevant_inputs, [update])
update._unconditional_update = update not in reachable
return update
updates = [process_update(x) for x in updates]
# Non-callable Updates are run automatically inside `call` in V2, so
# they do not need to be tracked later.
if ops.executing_eagerly_outside_functions() and call_context.in_call:
updates = [u for u in updates if callable(u)]
self._updates += updates
def set_weights(self, weights):
"""Sets the weights of the layer, from Numpy arrays.
Arguments:
weights: a list of Numpy arrays. The number
of arrays and their shape must match
number of the dimensions of the weights
of the layer (i.e. it should match the
output of `get_weights`).
Raises:
ValueError: If the provided weights list does not match the
layer's specifications.
"""
params = self.weights
if len(params) != len(weights):
raise ValueError('You called `set_weights(weights)` on layer "' +
self.name + '" with a weight list of length ' +
str(len(weights)) + ', but the layer was expecting ' +
str(len(params)) + ' weights. Provided weights: ' +
str(weights)[:50] + '...')
if not params:
return
weight_value_tuples = []
for p, w in zip(params, weights):
ref_shape = p.shape
if not ref_shape.is_compatible_with(w.shape):
raise ValueError('Layer weight shape ' + str(ref_shape) +
' not compatible with '
'provided weight shape ' + str(w.shape))
weight_value_tuples.append((p, w))
backend.batch_set_value(weight_value_tuples)
def get_weights(self):
"""Returns the current weights of the layer.
Returns:
Weights values as a list of numpy arrays.
"""
params = self.weights
return backend.batch_get_value(params)
def get_updates_for(self, inputs):
"""Retrieves updates relevant to a specific set of inputs.
Arguments:
inputs: Input tensor or list/tuple of input tensors.
Returns:
List of update ops of the layer that depend on `inputs`.
"""
if inputs is None:
# Requesting unconditional updates.
return [u for u in self.updates if u._unconditional_update]
# Requesting input-conditional updates.
updates = [u for u in self.updates if not u._unconditional_update]
inputs = nest.flatten(inputs)
reachable = tf_utils.get_reachable_from_inputs(inputs, updates)
return [u for u in updates if u in reachable]
def get_losses_for(self, inputs):
"""Retrieves losses relevant to a specific set of inputs.
Arguments:
inputs: Input tensor or list/tuple of input tensors.
Returns:
List of loss tensors of the layer that depend on `inputs`.
"""
if inputs is None:
# Requesting unconditional losses.
return [l for l in self.losses if l._unconditional_loss]
# Requesting input-conditional losses.
losses = [l for l in self.losses if not l._unconditional_loss]
inputs = nest.flatten(inputs)
reachable = tf_utils.get_reachable_from_inputs(inputs, losses)
return [l for l in losses if l in reachable]
def get_input_mask_at(self, node_index):
"""Retrieves the input mask tensor(s) of a layer at a given node.
Arguments:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A mask tensor
(or list of tensors if the layer has multiple inputs).
"""
inputs = self.get_input_at(node_index)
if isinstance(inputs, list):
return [getattr(x, '_keras_mask', None) for x in inputs]
else:
return getattr(inputs, '_keras_mask', None)
def get_output_mask_at(self, node_index):
"""Retrieves the output mask tensor(s) of a layer at a given node.
Arguments:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A mask tensor
(or list of tensors if the layer has multiple outputs).
"""
output = self.get_output_at(node_index)
if isinstance(output, list):
return [getattr(x, '_keras_mask', None) for x in output]
else:
return getattr(output, '_keras_mask', None)
@property
def input_mask(self):
"""Retrieves the input mask tensor(s) of a layer.
Only applicable if the layer has exactly one inbound node,
i.e. if it is connected to one incoming layer.
Returns:
Input mask tensor (potentially None) or list of input
mask tensors.
Raises:
AttributeError: if the layer is connected to
more than one incoming layers.
"""
inputs = self.input
if isinstance(inputs, list):
return [getattr(x, '_keras_mask', None) for x in inputs]
else:
return getattr(inputs, '_keras_mask', None)
@property
def output_mask(self):
"""Retrieves the output mask tensor(s) of a layer.
Only applicable if the layer has exactly one inbound node,
i.e. if it is connected to one incoming layer.
Returns:
Output mask tensor (potentially None) or list of output
mask tensors.
Raises:
AttributeError: if the layer is connected to
more than one incoming layers.
"""
output = self.output
if isinstance(output, list):
return [getattr(x, '_keras_mask', None) for x in output]
else:
return getattr(output, '_keras_mask', None)
def get_input_shape_at(self, node_index):
"""Retrieves the input shape(s) of a layer at a given node.
Arguments:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A shape tuple
(or list of shape tuples if the layer has multiple inputs).
Raises:
RuntimeError: If called in Eager mode.
"""
return self._get_node_attribute_at_index(node_index, 'input_shapes',
'input shape')
def get_output_shape_at(self, node_index):
"""Retrieves the output shape(s) of a layer at a given node.
Arguments:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A shape tuple
(or list of shape tuples if the layer has multiple outputs).
Raises:
RuntimeError: If called in Eager mode.
"""
return self._get_node_attribute_at_index(node_index, 'output_shapes',
'output shape')
def get_input_at(self, node_index):
"""Retrieves the input tensor(s) of a layer at a given node.
Arguments:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A tensor (or list of tensors if the layer has multiple inputs).
Raises:
RuntimeError: If called in Eager mode.
"""
return self._get_node_attribute_at_index(node_index, 'input_tensors',
'input')
def get_output_at(self, node_index):
"""Retrieves the output tensor(s) of a layer at a given node.
Arguments:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A tensor (or list of tensors if the layer has multiple outputs).
Raises:
RuntimeError: If called in Eager mode.
"""
return self._get_node_attribute_at_index(node_index, 'output_tensors',
'output')
@property
def input(self):
"""Retrieves the input tensor(s) of a layer.
Only applicable if the layer has exactly one input,
i.e. if it is connected to one incoming layer.
Returns:
Input tensor or list of input tensors.
Raises:
RuntimeError: If called in Eager mode.
AttributeError: If no inbound nodes are found.
"""
if not self._inbound_nodes:
raise AttributeError('Layer ' + self.name +
' is not connected, no input to return.')
return self._get_node_attribute_at_index(0, 'input_tensors', 'input')
@property
def output(self):
"""Retrieves the output tensor(s) of a layer.
Only applicable if the layer has exactly one output,
i.e. if it is connected to one incoming layer.
Returns:
Output tensor or list of output tensors.
Raises:
AttributeError: if the layer is connected to more than one incoming
layers.
RuntimeError: if called in Eager mode.
"""
if not self._inbound_nodes:
raise AttributeError('Layer ' + self.name + ' has no inbound nodes.')
return self._get_node_attribute_at_index(0, 'output_tensors', 'output')
@property
def input_shape(self):
"""Retrieves the input shape(s) of a layer.
Only applicable if the layer has exactly one input,
i.e. if it is connected to one incoming layer, or if all inputs
have the same shape.
Returns:
Input shape, as an integer shape tuple
(or list of shape tuples, one tuple per input tensor).
Raises:
AttributeError: if the layer has no defined input_shape.
RuntimeError: if called in Eager mode.
"""
if not self._inbound_nodes:
raise AttributeError('The layer has never been called '
'and thus has no defined input shape.')
all_input_shapes = set(
[str(node.input_shapes) for node in self._inbound_nodes])
if len(all_input_shapes) == 1:
return self._inbound_nodes[0].input_shapes
else:
raise AttributeError('The layer "' + str(self.name) +
' has multiple inbound nodes, '
'with different input shapes. Hence '
'the notion of "input shape" is '
'ill-defined for the layer. '
'Use `get_input_shape_at(node_index)` '
'instead.')
def count_params(self):
"""Count the total number of scalars composing the weights.
Returns:
An integer count.
Raises:
ValueError: if the layer isn't yet built
(in which case its weights aren't yet defined).
"""
if not self.built:
if self.__class__.__name__ == 'Sequential':
with tf_utils.maybe_init_scope(self):
self.build() # pylint: disable=no-value-for-parameter
else:
raise ValueError('You tried to call `count_params` on ' + self.name +
', but the layer isn\'t built. '
'You can build it manually via: `' + self.name +
'.build(batch_input_shape)`.')
return int(sum(np.prod(w.shape.as_list()) for w in self.weights))
@property
def output_shape(self):
"""Retrieves the output shape(s) of a layer.
Only applicable if the layer has one output,
or if all outputs have the same shape.
Returns:
Output shape, as an integer shape tuple
(or list of shape tuples, one tuple per output tensor).
Raises:
AttributeError: if the layer has no defined output shape.
RuntimeError: if called in Eager mode.
"""
if not self._inbound_nodes:
raise AttributeError('The layer has never been called '
'and thus has no defined output shape.')
all_output_shapes = set(
[str(node.output_shapes) for node in self._inbound_nodes])
if len(all_output_shapes) == 1:
return self._inbound_nodes[0].output_shapes
else:
raise AttributeError('The layer "%s"'
' has multiple inbound nodes, '
'with different output shapes. Hence '
'the notion of "output shape" is '
'ill-defined for the layer. '
'Use `get_output_shape_at(node_index)` '
'instead.' % self.name)
@property
@doc_controls.do_not_doc_inheritable
def inbound_nodes(self):
"""Deprecated, do NOT use! Only for compatibility with external Keras."""
return self._inbound_nodes
@property
@doc_controls.do_not_doc_inheritable
def outbound_nodes(self):
"""Deprecated, do NOT use! Only for compatibility with external Keras."""
return self._outbound_nodes
##############################################################################
# Methods & attributes below are public aliases of other methods. #
##############################################################################
@deprecation.deprecated(
date=None, instructions='Please use `layer.__call__` method instead.')
@doc_controls.do_not_doc_inheritable
def apply(self, inputs, *args, **kwargs):
"""Deprecated, do NOT use!
This is an alias of `self.__call__`.
Arguments:
inputs: Input tensor(s).
*args: additional positional arguments to be passed to `self.call`.
**kwargs: additional keyword arguments to be passed to `self.call`.
Returns:
Output tensor(s).
"""
return self.__call__(inputs, *args, **kwargs)
@deprecation.deprecated(
date=None, instructions='Please use `layer.add_weight` method instead.')
@doc_controls.do_not_doc_inheritable
def add_variable(self, *args, **kwargs):
"""Deprecated, do NOT use! Alias for `add_weight`."""
return self.add_weight(*args, **kwargs)
@property
def variables(self):
"""Returns the list of all layer variables/weights.
Alias of `self.weights`.
Returns:
A list of variables.
"""
return self.weights
@property
def trainable_variables(self):
return self.trainable_weights
@property
def non_trainable_variables(self):
return self.non_trainable_weights
##############################################################################
# Methods & attributes below are all private and only used by the framework. #
##############################################################################
def _set_dtype_and_policy(self, dtype):
"""Sets self._dtype and self._mixed_precision_policy."""
if dtype:
if isinstance(dtype, policy.Policy):
self._mixed_precision_policy = dtype
self._dtype = self._mixed_precision_policy.default_variable_dtype
else:
# If a non-policy dtype is passed, no casting should be done. So we use
# the "infer" policy, which does no casting.
self._mixed_precision_policy = policy.Policy('infer')
self._dtype = dtypes.as_dtype(dtype).name
else:
self._mixed_precision_policy = policy.global_policy()
# If the global policy has not been set, it will be an "infer" policy
# without a default variable dtype, and so self._dtype will be None. In
# that case, self._dtype will be set when the layer is built or called.
self._dtype = self._mixed_precision_policy.default_variable_dtype
def _name_scope(self):
return self.name
def _init_set_name(self, name, zero_based=True):
if not name:
self._name = backend.unique_object_name(
generic_utils.to_snake_case(self.__class__.__name__),
zero_based=zero_based)
else:
self._name = name
def _get_existing_metric(self, name=None):
match = [m for m in self._metrics if m.name == name]
if not match:
return
if len(match) > 1:
raise ValueError(
'Please provide different names for the metrics you have added. '
'We found {} metrics with the name: "{}"'.format(len(match), name))
return match[0]
def _eager_add_metric(self, value, aggregation=None, name=None):
# If the given metric is available in `metrics` list we just update state
# on it, otherwise we create a new metric instance and
# add it to the `metrics` list.
metric_obj = getattr(value, '_metric_obj', None)
if metric_obj:
name = metric_obj.name
match = self._get_existing_metric(name)
if match:
# Tensors that come from a Metric object already updated the Metric state.
if not metric_obj:
match(value)
return
if not metric_obj:
assert aggregation is not None
metric_obj, _ = base_layer_utils.create_mean_metric(value, name)
self._metrics.append(metric_obj)
def _symbolic_add_metric(self, value, aggregation=None, name=None):
base_layer_utils.check_graph_consistency(value, method='add_metric')
match = self._get_existing_metric(name)
if aggregation is None:
# Iterate over the metrics and check if the given metric exists already.
# This can happen when a metric instance is created in subclassed model
# layer `__init__` and we have tracked that instance already in
# model.__setattr__.
if match:
result_tensor = value
metric_obj = match
elif hasattr(value, '_metric_obj'):
# We track the instance using the metadata on the result tensor.
result_tensor = value
metric_obj = result_tensor._metric_obj
self._metrics.append(metric_obj)
else:
raise ValueError(
'We do not support adding an aggregated metric result tensor that '
'is not the output of a `tf.keras.metrics.Metric` metric instance. '
'Without having access to the metric instance we cannot reset the '
'state of a metric after every epoch during training. You can '
'create a `tf.keras.metrics.Metric` instance and pass the result '
'here or pass an un-aggregated result with `aggregation` parameter '
'set as `mean`. For example: `self.add_metric(tf.reduce_sum(inputs)'
', name=\'mean_activation\', aggregation=\'mean\')`')
else:
# If a non-aggregated tensor is given as input (ie. `aggregation` is
# explicitly set to `mean`), we wrap the tensor in `Mean` metric.
if match:
result_tensor = match(value)
metric_obj = match
else:
metric_obj, result_tensor = base_layer_utils.create_mean_metric(
value, name)
self._metrics.append(metric_obj)
def _handle_weight_regularization(self, name, variable, regularizer):
"""Create lambdas which compute regularization losses."""
def _loss_for_variable(v):
"""Creates a regularization loss `Tensor` for variable `v`."""
with backend.name_scope(name + '/Regularizer'):
regularization = regularizer(v)
return regularization
if isinstance(variable, tf_variables.PartitionedVariable):
for v in variable:
self.add_loss(functools.partial(_loss_for_variable, v))
else:
self.add_loss(functools.partial(_loss_for_variable, variable))
def _handle_activity_regularization(self, inputs, outputs):
# Apply activity regularization.
# Note that it should be applied every time the layer creates a new
# output, since it is output-specific.
if self._activity_regularizer:
output_list = nest.flatten(outputs)
with backend.name_scope('ActivityRegularizer'):
for output in output_list:
activity_loss = self._activity_regularizer(output)
batch_size = math_ops.cast(
array_ops.shape(output)[0], activity_loss.dtype)
# Make activity regularization strength batch-agnostic.
mean_activity_loss = activity_loss / batch_size
base_layer_utils.check_graph_consistency(
mean_activity_loss, method='activity_regularizer')
self.add_loss(mean_activity_loss, inputs=inputs)
def _set_mask_metadata(self, inputs, outputs, previous_mask):
flat_outputs = nest.flatten(outputs)
mask_already_computed = (
getattr(self, '_compute_output_and_mask_jointly', False) or
all(getattr(x, '_keras_mask', None) is not None for x in flat_outputs))
# Only compute the mask if the Layer explicitly supports masking or has
# overridden `compute_mask`.
should_compute_mask = (
hasattr(self, 'compute_mask') and
(self.supports_masking or
not getattr(self.compute_mask, '_is_default', False)))
if mask_already_computed:
flat_masks = [getattr(x, '_keras_mask', None) for x in flat_outputs]
elif not should_compute_mask:
flat_masks = [None for _ in flat_outputs]
else:
output_masks = self.compute_mask(inputs, previous_mask)
# `compute_mask` can return a single `None` even when a Layer
# has multiple outputs.
if output_masks is None:
flat_masks = [None for _ in flat_outputs]
else:
flat_masks = nest.flatten(output_masks)
for output, mask in zip(flat_outputs, flat_masks):
try:
output._keras_mask = mask
except AttributeError:
# C Type such as np.ndarray.
pass
if tf_utils.are_all_symbolic_tensors(flat_outputs):
for output in flat_outputs:
if getattr(output, '_keras_mask', None) is not None:
# Do not track masks for `TensorFlowOpLayer` construction.
output._keras_mask._keras_history_checked = True
def _collect_input_masks(self, inputs, args, kwargs):
"""Checks if `mask` argument was passed, else gathers mask from inputs."""
if self._call_arg_was_passed('mask', args, kwargs):
return self._get_call_arg_value('mask', args, kwargs)
if not self._should_compute_mask:
return None
input_masks = nest.map_structure(lambda t: getattr(t, '_keras_mask', None),
inputs)
if generic_utils.is_all_none(input_masks):
return None
return input_masks
def _call_arg_was_passed(self, arg_name, args, kwargs):
if arg_name in kwargs:
return True
# Ignore `inputs` arg.
if arg_name in dict(zip(self._call_fn_args[1:], args)):
return True
return False
def _get_call_arg_value(self, arg_name, args, kwargs):
if arg_name in kwargs:
return kwargs[arg_name]
# Ignore `inputs` arg.
args_dict = dict(zip(self._call_fn_args[1:], args))
return args_dict[arg_name]
def _set_connectivity_metadata_(self, inputs, outputs, args, kwargs):
call_convention = getattr(
self, '_call_convention',
base_layer_utils.CallConvention.EXPLICIT_INPUTS_ARGUMENT)
if args:
if call_convention == (base_layer_utils
.CallConvention.EXPLICIT_INPUTS_ARGUMENT):
raise TypeError(
'This layer ("{}") takes an `inputs` argument in `call()`, '
'and only the `inputs` argument may be specified as a positional '
'argument. Pass everything else as a keyword argument '
'(those arguments will not be tracked '
'as inputs to the layer).'.format(self.name))
elif call_convention == (base_layer_utils
.CallConvention.SINGLE_POSITIONAL_ARGUMENT):
raise TypeError(
'This layer ("{}") takes a single positional argument in `call()`,'
' which is by convention the `inputs` argument, '
'and only this argument may be specified as a positional argument. '
'Pass everything else as a keyword argument '
'(those arguments will not be tracked '
'as inputs to the layer).'.format(self.name))
# If the layer returns tensors from its inputs, unmodified,
# we copy them to avoid loss of tensor metadata.
output_ls = nest.flatten(outputs)
inputs_ls = nest.flatten(inputs)
output_ls_copy = []
for x in output_ls:
if x in inputs_ls:
with backend.name_scope(self.name):
x = array_ops.identity(x)
output_ls_copy.append(x)
outputs = nest.pack_sequence_as(outputs, output_ls_copy)
inputs, kwargs = self._inputs_from_call_args(
call_args=(inputs,) + args, call_kwargs=kwargs)
# Add an inbound node to the layer, so it can keep track of this call.
# This updates the layer history of the output tensor(s).
kwargs.pop('mask', None) # `mask` should not be serialized.
self._add_inbound_node(
input_tensors=inputs, output_tensors=outputs, arguments=kwargs)
return inputs, outputs
def _inputs_from_call_args(self, call_args, call_kwargs):
"""Get Layer inputs from __call__ *args and **kwargs.
Args:
call_args: The positional arguments passed to __call__.
call_kwargs: The keyword argument dict passed to __call__.
Returns:
A tuple of (inputs, non_input_kwargs). These may be the same objects as
were passed in (call_args and call_kwargs).
"""
call_convention = getattr(
self, '_call_convention',
base_layer_utils.CallConvention.EXPLICIT_INPUTS_ARGUMENT)
if (call_convention in (
base_layer_utils.CallConvention.EXPLICIT_INPUTS_ARGUMENT,
base_layer_utils.CallConvention.SINGLE_POSITIONAL_ARGUMENT)):
assert len(call_args) == 1 # TypeError raised earlier in __call__.
return call_args[0], call_kwargs
else:
call_arg_spec = tf_inspect.getfullargspec(self.call)
# There is no explicit "inputs" argument expected or provided to
# call(). Arguments which have default values are considered non-inputs,
# and arguments without are considered inputs.
if call_arg_spec.defaults:
if call_arg_spec.varargs is not None:
raise TypeError(
'Layers may not accept both positional arguments and '
'arguments with default values (unable to determine which '
'are inputs to the layer). '
'Issue occurred with layer "%s"' % (self.name))
keyword_arg_names = set(
call_arg_spec.args[-len(call_arg_spec.defaults):])
else:
keyword_arg_names = set()
# Training is never an input argument name, to allow signatures like
# call(x, training).
keyword_arg_names.add('training')
_, unwrapped_call = tf_decorator.unwrap(self.call)
bound_args = inspect.getcallargs(
unwrapped_call, *call_args, **call_kwargs)
if call_arg_spec.varkw is not None:
var_kwargs = bound_args.pop(call_arg_spec.varkw)
bound_args.update(var_kwargs)
keyword_arg_names = keyword_arg_names.union(var_kwargs.keys())
all_args = call_arg_spec.args
if all_args and bound_args[all_args[0]] is self:
# Ignore the 'self' argument of methods
bound_args.pop(call_arg_spec.args[0])
all_args = all_args[1:]
non_input_arg_values = {}
input_arg_values = []
remaining_args_are_keyword = False
for argument_name in all_args:
if argument_name in keyword_arg_names:
remaining_args_are_keyword = True
else:
if remaining_args_are_keyword:
raise TypeError(
'Found a positional argument in a layer call after a non-input '
'argument. All arguments after "training" must be keyword '
'arguments, and are not tracked as inputs to the layer. '
'Issue occurred with layer "%s"' % (self.name))
if remaining_args_are_keyword:
non_input_arg_values[argument_name] = bound_args[argument_name]
else:
input_arg_values.append(bound_args[argument_name])
if call_arg_spec.varargs is not None:
input_arg_values.extend(bound_args[call_arg_spec.varargs])
return input_arg_values, non_input_arg_values
def _add_inbound_node(self,
input_tensors,
output_tensors,
arguments=None):
"""Internal method to create an inbound node for the layer.
Arguments:
input_tensors: list of input tensors.
output_tensors: list of output tensors.
arguments: dictionary of keyword arguments that were passed to the
`call` method of the layer at the call that created the node.
"""
inbound_layers = nest.map_structure(lambda t: t._keras_history.layer,
input_tensors)
node_indices = nest.map_structure(lambda t: t._keras_history.node_index,
input_tensors)
tensor_indices = nest.map_structure(lambda t: t._keras_history.tensor_index,
input_tensors)
# Create node, add it to inbound nodes.
node_module.Node(
self,
inbound_layers=inbound_layers,
node_indices=node_indices,
tensor_indices=tensor_indices,
input_tensors=input_tensors,
output_tensors=output_tensors,
arguments=arguments)
# Update tensor history metadata.
# The metadata attribute consists of
# 1) a layer instance
# 2) a node index for the layer
# 3) a tensor index for the node.
# The allows layer reuse (multiple nodes per layer) and multi-output
# or multi-input layers (e.g. a layer can return multiple tensors,
# and each can be sent to a different layer).
for i, tensor in enumerate(nest.flatten(output_tensors)):
tensor._keras_history = KerasHistory(self,
len(self._inbound_nodes) - 1, i) # pylint: disable=protected-access
def _get_node_attribute_at_index(self, node_index, attr, attr_name):
"""Private utility to retrieves an attribute (e.g. inputs) from a node.
This is used to implement the methods:
- get_input_shape_at
- get_output_shape_at
- get_input_at
etc...
Arguments:
node_index: Integer index of the node from which
to retrieve the attribute.
attr: Exact node attribute name.
attr_name: Human-readable attribute name, for error messages.
Returns:
The layer's attribute `attr` at the node of index `node_index`.
Raises:
RuntimeError: If the layer has no inbound nodes, or if called in Eager
mode.
ValueError: If the index provided does not match any node.
"""
if not self._inbound_nodes:
raise RuntimeError('The layer has never been called '
'and thus has no defined ' + attr_name + '.')
if not len(self._inbound_nodes) > node_index:
raise ValueError('Asked to get ' + attr_name + ' at node ' +
str(node_index) + ', but the layer has only ' +
str(len(self._inbound_nodes)) + ' inbound nodes.')
values = getattr(self._inbound_nodes[node_index], attr)
if isinstance(values, list) and len(values) == 1:
return values[0]
else:
return values
def _maybe_build(self, inputs):
# Check input assumptions set before layer building, e.g. input rank.
if not self.built:
input_spec.assert_input_compatibility(
self.input_spec, inputs, self.name)
input_list = nest.flatten(inputs)
if input_list and self._dtype is None:
try:
self._dtype = input_list[0].dtype.base_dtype.name
except AttributeError:
pass
input_shapes = None
if all(hasattr(x, 'shape') for x in input_list):
input_shapes = nest.map_structure(lambda x: x.shape, inputs)
# Only call `build` if the user has manually overridden the build method.
if not hasattr(self.build, '_is_default'):
# Any setup work performed only once should happen in an `init_scope`
# to avoid creating symbolic Tensors that will later pollute any eager
# operations.
with tf_utils.maybe_init_scope(self):
self.build(input_shapes)
# We must set self.built since user defined build functions are not
# constrained to set self.built.
self.built = True
# Optionally load weight values specified at layer instantiation.
if getattr(self, '_initial_weights', None) is not None:
self.set_weights(self._initial_weights)
self._initial_weights = None
def _symbolic_call(self, inputs):
input_shapes = nest.map_structure(lambda x: x.shape, inputs)
output_shapes = self.compute_output_shape(input_shapes)
def _make_placeholder_like(shape):
ph = backend.placeholder(shape=shape, dtype=self.dtype)
ph._keras_mask = None
return ph
return nest.map_structure(_make_placeholder_like, output_shapes)
def _get_trainable_state(self):
"""Get the `trainable` state of each sublayer.
Returns:
A dict mapping all sublayers to their `trainable` value.
"""
layers = trackable_layer_utils.filter_empty_layer_containers(self._layers)
# Keep track of each top-level layers' `trainable` as well as the
# state of all of its sublayers.
trainable_state = {self: self.trainable}
for layer in layers:
trainable_state.update(layer._get_trainable_state())
return trainable_state
def _set_trainable_state(self, trainable_state):
"""Set `trainable` state for each sublayer."""
layers = trackable_layer_utils.filter_empty_layer_containers(self._layers)
if self in trainable_state:
self.trainable = trainable_state[self]
for layer in layers:
layer._set_trainable_state(trainable_state)
@property
def _obj_reference_counts(self):
"""A dictionary counting the number of attributes referencing an object."""
self._maybe_create_attribute('_obj_reference_counts_dict',
object_identity.ObjectIdentityDictionary())
return self._obj_reference_counts_dict
def _maybe_create_attribute(self, name, default_value):
"""Create the attribute with the default value if it hasn't been created.
This is useful for fields that is used for tracking purpose,
_trainable_weights, or _layers. Note that user could create a layer subclass
and assign an internal field before invoking the Layer.__init__(), the
__setattr__() need to create the tracking fields and __init__() need to not
override them.
Args:
name: String, the name of the attribute.
default_value: Object, the default value of the attribute.
"""
if not hasattr(self, name):
super(Layer, self).__setattr__(name, default_value)
def __delattr__(self, name):
# For any super.__delattr__() call, we will directly use the implementation
# in Trackable and skip the behavior in AutoTrackable. The Layer was
# originally use Trackable as base class, the change of using Module as base
# class forced us to have AutoTrackable in the class hierarchy. Skipping
# the __delattr__ and __setattr__ in AutoTrackable will keep the status quo.
existing_value = getattr(self, name, None)
# If this value is replacing an existing object assigned to an attribute, we
# should clean it out to avoid leaking memory. First we check if there are
# other attributes referencing it.
reference_counts = self._obj_reference_counts
if existing_value not in reference_counts:
super(tracking.AutoTrackable, self).__delattr__(name)
return
reference_count = reference_counts[existing_value]
if reference_count > 1:
# There are other remaining references. We can't remove this object from
# _layers etc.
reference_counts[existing_value] = reference_count - 1
super(tracking.AutoTrackable, self).__delattr__(name)
return
else:
# This is the last remaining reference.
del reference_counts[existing_value]
super(tracking.AutoTrackable, self).__delattr__(name)
if (isinstance(existing_value, Layer)
or trackable_layer_utils.has_weights(existing_value)):
super(tracking.AutoTrackable, self).__setattr__(
'_layers',
[l for l in self._layers if l is not existing_value])
if isinstance(existing_value, tf_variables.Variable):
super(tracking.AutoTrackable, self).__setattr__(
'_trainable_weights',
[w for w in self._trainable_weights if w is not existing_value])
super(tracking.AutoTrackable, self).__setattr__(
'_non_trainable_weights',
[w for w in self._non_trainable_weights if w is not existing_value])
def __setattr__(self, name, value):
if (name == '_self_setattr_tracking' or
not getattr(self, '_self_setattr_tracking', True) or
getattr(self, '_is_graph_network', False) or
# Exclude @property.setters from tracking
hasattr(self.__class__, name)):
try:
super(tracking.AutoTrackable, self).__setattr__(name, value)
except AttributeError:
raise AttributeError(
('Can\'t set the attribute "{}", likely because it conflicts with '
'an existing read-only @property of the object. Please choose a '
'different name.').format(name))
return
# Keep track of trackable objects, for the needs of `Network.save_weights`.
value = data_structures.sticky_attribute_assignment(
trackable=self, value=value, name=name)
reference_counts = self._obj_reference_counts
reference_counts[value] = reference_counts.get(value, 0) + 1
# Clean out the old attribute, which clears _layers and _trainable_weights
# if necessary.
try:
self.__delattr__(name)
except AttributeError:
pass
# TODO(scottzhu): Need to track Module object as well for weight tracking.
# Be careful about metric if it becomes a Module in future.
# Append value to self._layers if relevant
if (isinstance(value, Layer) or
trackable_layer_utils.has_weights(value)):
self._maybe_create_attribute('_layers', [])
# We need to check object identity to avoid de-duplicating empty
# container types which compare equal.
if not any((layer is value for layer in self._layers)):
self._layers.append(value)
if hasattr(value, '_use_resource_variables'):
# Legacy layers (V1 tf.layers) must always use
# resource variables.
value._use_resource_variables = True
# Append value to list of trainable / non-trainable weights if relevant
# TODO(b/125122625): This won't pick up on any variables added to a
# list/dict after creation.
for val in nest.flatten(value):
# TODO(b/126450014): Remove `_UnreadVariable` check here when assign ops
# no longer return True for isinstance Variable checks.
if (isinstance(val, tf_variables.Variable) and
not isinstance(val, resource_variable_ops._UnreadVariable)): # pylint: disable=protected-access
# Users may add extra weights/variables
# simply by assigning them to attributes (invalid for graph networks)
self._maybe_create_attribute('_trainable_weights', [])
self._maybe_create_attribute('_non_trainable_weights', [])
if val not in self._trainable_weights + self._non_trainable_weights:
if val.trainable:
self._trainable_weights.append(val)
else:
self._non_trainable_weights.append(val)
backend.track_variable(val)
# Skip the auto trackable from tf.Module to keep status quo. See the comment
# at __delattr__.
super(tracking.AutoTrackable, self).__setattr__(name, value)
def _gather_children_attribute(self, attribute):
assert attribute in {
'weights', 'trainable_weights', 'non_trainable_weights', 'updates',
'losses', 'metrics'
}
if hasattr(self, '_layers'):
nested_layers = trackable_layer_utils.filter_empty_layer_containers(
self._layers)
return list(
itertools.chain.from_iterable(
getattr(layer, attribute) for layer in nested_layers))
return []
# This is a hack so that the is_layer (within
# training/trackable/layer_utils.py) check doesn't get the weights attr.
# TODO(b/110718070): Remove when fixed.
def _is_layer(self):
return True
@property
@tracking.cached_per_instance
def _call_fn_args(self):
all_args = tf_inspect.getfullargspec(self.call).args
# Scrub `self` that appears if a decorator was applied.
if all_args and all_args[0] == 'self':
return all_args[1:]
return all_args
@property
@tracking.cached_per_instance
def _call_accepts_kwargs(self):
return tf_inspect.getfullargspec(self.call).varkw is not None
@property
@tracking.cached_per_instance
def _should_compute_mask(self):
return ('mask' in self._call_fn_args or
getattr(self, 'compute_mask', None) is not None)
@property
def _object_identifier(self):
"""String stored in object identifier field in the SavedModel proto.
Returns:
A string with the object identifier, which is used at load time.
"""
return '_tf_keras_layer'
@property
def _eager_losses(self):
# A list of loss values containing activity regularizers and losses
# manually added through `add_loss` during eager execution. It is cleared
# after every batch.
# Because we plan on eventually allowing a same model instance to be trained
# in eager mode or graph mode alternatively, we need to keep track of
# eager losses and symbolic losses via separate attributes.
if not hasattr(self._thread_local, '_eager_losses'):
self._thread_local._eager_losses = []
return self._thread_local._eager_losses
@_eager_losses.setter
def _eager_losses(self, losses):
self._thread_local._eager_losses = losses
@property
def _tracking_metadata(self):
"""String stored in metadata field in the SavedModel proto.
Returns:
A serialized JSON storing information necessary for recreating this layer.
"""
# TODO(kathywu): Add support for metrics serialization.
# TODO(kathywu): Synchronize with the keras spec (go/keras-json-spec) once
# the python config serialization has caught up.
# Create a dictionary containing python layer state attributes. Any new
# attribute that impacts the layer execution in some way should be added to
# this dict.
# Unlike a model's JSON configuration, which only
# contains class_name and each layer's get_config() object, this stores more
# information to accurately recreate the layer.
# For backwards compatibility, any changes to this list should be additive.
# Modifying or removing attributes may only be done with a sufficient
# explanation.
metadata = dict(
class_name=type(self).__name__,
name=self.name,
trainable=self.trainable,
expects_training_arg=self._expects_training_arg,
dtype=self.dtype,
batch_input_shape=getattr(self, '_batch_input_shape', None))
try:
# Store the config dictionary, which is only used by the revived object
# to return the original config when revived_obj.get_config() is called.
# It is not important for recreating the revived object.
metadata['config'] = self.get_config()
except NotImplementedError:
# in the case of a subclassed model, the get_config() method will throw
# a NotImplementedError.
pass
if self.input_spec is not None:
# Layer's input_spec has already been type-checked in the property setter.
metadata['input_spec'] = nest.map_structure(
lambda x: None if x is None else serialize_keras_object(x),
self.input_spec)
else:
metadata['input_spec'] = None
if (self.activity_regularizer is not None and
hasattr(self.activity_regularizer, 'get_config')):
metadata['activity_regularizer'] = serialize_keras_object(
self.activity_regularizer)
else:
metadata['activity_regularizer'] = None
return json.dumps(metadata, default=serialization.get_json_type)
def _list_extra_dependencies_for_serialization(self, serialization_cache):
"""Lists extra dependencies to serialize to SavedModel.
By overriding this method, extra dependencies can be attached to the
serialized Layer. For example, this is used to save the list of `variables`
and `trainable_variables`, which are python properties in a Layer object,
but are represented as a static list in the SavedModel.
Args:
serialization_cache: A dictionary shared between all objects in the same
object graph. This object is passed to both
`_list_extra_dependencies_for_serialization` and
`_list_functions_for_serialization`.
Returns:
A dictionary mapping attribute names to trackable objects. The entire list
of attributes are listed in the `saved_model._LayerAttributes` class.
"""
return (saved_model.serialize_all_attributes(self, serialization_cache)
.objects_to_serialize)
def _list_functions_for_serialization(self, serialization_cache):
"""Lists the functions to include when serializing a Layer.
Args:
serialization_cache: Dictionary passed to all objects in the same object
graph during serialization.
Returns:
A dictionary mapping attribute names to `Function` or
`ConcreteFunction`. The entire list of attributes are listed in the
`saved_model._LayerAttributes` class.
"""
# Create a dictionary containing the layer's call and loss functions.
fns = (saved_model.serialize_all_attributes(self, serialization_cache)
.functions_to_serialize)
# The parent Autotrackable class saves all user-defined tf.functions, and
# returns them in _list_functions_for_serialization(). Add these functions
# to the dict.
fns.update(super(Layer, self)._list_functions_for_serialization(
serialization_cache))
return fns
class TensorFlowOpLayer(Layer):
"""Wraps a TensorFlow Operation in a Layer.
This class is used internally by the Functional API. When a user
uses a raw TensorFlow Operation on symbolic tensors originating
from an `Input` Layer, the resultant operation will be wrapped
with this Layer object in order to make the operation compatible
with the Keras API.
This Layer will create a new, identical operation (except for inputs
and outputs) every time it is called. If `run_eagerly` is `True`,
the op creation and calculation will happen inside an Eager function.
Instances of this Layer are created when `autolambda` is called, which
is whenever a Layer's `__call__` encounters symbolic inputs that do
not have Keras metadata, or when a Network's `__init__` encounters
outputs that do not have Keras metadata.
Attributes:
node_def: String, the serialized NodeDef of the Op this layer will wrap.
constants: Dict of NumPy arrays, the values of any Tensors needed for this
Operation that do not originate from a Keras `Input` Layer. Since all
placeholders must come from Keras `Input` Layers, these Tensors must be
treated as constant in the Functional API.
name: String, the name of the Layer.
trainable: Bool, whether this Layer is trainable. Currently Variables are
not supported, and so this parameter has no effect.
dtype: The default dtype of this Layer. Inherited from `Layer` and has no
effect on this class, however is used in `get_config`.
"""
def __init__(self,
node_def,
constants=None,
name=None,
trainable=True,
dtype=None):
super(TensorFlowOpLayer, self).__init__(
name=_TF_OP_LAYER_NAME_PREFIX + name, trainable=trainable, dtype=dtype)
self.node_def = node_def_pb2.NodeDef.FromString(node_def)
self.constants = constants or {}
# Layer uses original op unless it is called on new inputs.
# This means `built` is not set in `__call__`.
self.built = True
def call(self, inputs):
if context.executing_eagerly():
return self._defun_call(inputs)
return self._make_op(inputs)
def _make_node_def(self, graph):
node_def = node_def_pb2.NodeDef()
node_def.CopyFrom(self.node_def)
node_def.name = graph.unique_name(node_def.name)
return node_def
def _make_op(self, inputs):
inputs = nest.flatten(inputs)
graph = inputs[0].graph
node_def = self._make_node_def(graph)
with graph.as_default():
for index, constant in self.constants.items():
# Recreate constant in graph to add distribution context.
value = tensor_util.constant_value(constant)
if value is not None:
constant = constant_op.constant(value, name=node_def.input[index])
inputs.insert(index, constant)
# Check for case where first input should be a list of Tensors.
if 'N' in node_def.attr:
num_tensors = node_def.attr['N'].i
inputs = [inputs[:num_tensors]] + inputs[num_tensors:]
c_op = ops._create_c_op(graph, node_def, inputs, control_inputs=[])
op = graph._create_op_from_tf_operation(c_op)
op._control_flow_post_processing()
# Record the gradient because custom-made ops don't go through the
# code-gen'd eager call path
op_type = compat.as_str(op.op_def.name)
attr_names = [compat.as_str(attr.name) for attr in op.op_def.attr]
attrs = []
for attr_name in attr_names:
attrs.append(attr_name)
attrs.append(op.get_attr(attr_name))
attrs = tuple(attrs)
execute.record_gradient(op_type, op.inputs, attrs, op.outputs, op.name)
if len(op.outputs) == 1:
return op.outputs[0]
return op.outputs
@function.defun
def _defun_call(self, inputs):
"""Wraps the op creation method in an Eager function for `run_eagerly`."""
return self._make_op(inputs)
def get_config(self):
config = super(TensorFlowOpLayer, self).get_config()
config.update({
'node_def': self.node_def.SerializeToString(),
'constants': {
i: backend.get_value(c) for i, c in self.constants.items()
}
})
return config
class AddLoss(Layer):
"""Adds its inputs as a loss.
Attributes:
unconditional: Whether or not the loss should be conditioned on the inputs.
"""
def __init__(self, unconditional, **kwargs):
super(AddLoss, self).__init__(**kwargs)
self.unconditional = unconditional
def call(self, inputs):
self.add_loss(inputs, inputs=(not self.unconditional))
return inputs
def get_config(self):
config = super(AddLoss, self).get_config()
config.update({'unconditional': self.unconditional})
return config
class AddMetric(Layer):
"""Adds its inputs as a metric.
Attributes:
aggregation: 'mean' or None. How the inputs should be aggregated.
metric_name: The name to use for this metric.
"""
def __init__(self, aggregation=None, metric_name=None, **kwargs):
super(AddMetric, self).__init__(**kwargs)
self.aggregation = aggregation
self.metric_name = metric_name
def call(self, inputs):
self.add_metric(inputs, self.aggregation, self.metric_name)
return inputs
def get_config(self):
config = super(AddMetric, self).get_config()
config.update({
'aggregation': self.aggregation,
'metric_name': self.metric_name
})
return config
class KerasHistory(
collections.namedtuple('KerasHistory',
['layer', 'node_index', 'tensor_index'])):
"""Tracks the Layer call that created a Tensor, for Keras Graph Networks.
During construction of Keras Graph Networks, this metadata is added to
each Tensor produced as the output of a Layer, starting with an
`InputLayer`. This allows Keras to track how each Tensor was produced, and
this information is later retraced by the `keras.engine.Network` class to
reconstruct the Keras Graph Network.
Attributes:
layer: The Layer that produced the Tensor.
node_index: The specific call to the Layer that produced this Tensor. Layers
can be called multiple times in order to share weights. A new node is
created every time a Tensor is called.
tensor_index: The output index for this Tensor. Always zero if the Layer
that produced this Tensor only has one output. Nested structures of
Tensors are deterministically assigned an index via `nest.flatten`.
"""
# Added to maintain memory and performance characteristics of `namedtuple`
# while subclassing.
__slots__ = ()
# Avoid breaking users who directly import this symbol from this file.
# TODO(fchollet): remove this.
InputSpec = input_spec.InputSpec # pylint:disable=invalid-name
|
the-stack_106_18361
|
# coding=utf-8
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# The class for a temperature and humidity sensor (Grove - Temperature and Humidity)
from cisco_deviot.thing import Property, PropertyType
from cisco_grovepi.sensor import Sensor
class Thermometer(Sensor):
def __init__(self, tid, name, pin):
Sensor.__init__(self, tid, name, pin, "temperature")
self.add_property(Property(name="temperature", type=PropertyType.INT, value=0, unit="°C", range=[0, 100]))
self.add_property(Property(name="humidity", type=PropertyType.INT, value=0, unit="%", range=[0, 100]))
def update_state(self):
data = Sensor.analog_read(self, "dht")
if data is not None:
self.update_property(temperature=data[0], humidity=data[1])
|
the-stack_106_18362
|
import torch
from torch import nn
from torch.nn import DataParallel
from torch.nn.parallel import DistributedDataParallel
import torch.backends.cudnn as cudnn
# from torchvision import models
import numpy as np
from FastAutoAugment.networks.resnet import ResNet
from FastAutoAugment.networks.pyramidnet import PyramidNet
from FastAutoAugment.networks.shakeshake.shake_resnet import ShakeResNet
from FastAutoAugment.networks.wideresnet import WideResNet
from FastAutoAugment.networks.shakeshake.shake_resnext import ShakeResNeXt
from FastAutoAugment.networks.efficientnet_pytorch import EfficientNet, RoutingFn
from FastAutoAugment.tf_port.tpu_bn import TpuBatchNormalization
def get_model(conf, num_class=10, local_rank=-1):
name = conf['type']
if name == 'resnet50':
model = ResNet(dataset='imagenet', depth=50, num_classes=num_class, bottleneck=True)
elif name == 'resnet200':
model = ResNet(dataset='imagenet', depth=200, num_classes=num_class, bottleneck=True)
elif name == 'wresnet40_2':
model = WideResNet(40, 2, dropout_rate=0.0, num_classes=num_class)
elif name == 'wresnet28_10':
model = WideResNet(28, 10, dropout_rate=0.0, num_classes=num_class)
elif name == 'shakeshake26_2x32d':
model = ShakeResNet(26, 32, num_class)
elif name == 'shakeshake26_2x64d':
model = ShakeResNet(26, 64, num_class)
elif name == 'shakeshake26_2x96d':
model = ShakeResNet(26, 96, num_class)
elif name == 'shakeshake26_2x112d':
model = ShakeResNet(26, 112, num_class)
elif name == 'shakeshake26_2x96d_next':
model = ShakeResNeXt(26, 96, 4, num_class)
elif name == 'pyramid':
model = PyramidNet('cifar10', depth=conf['depth'], alpha=conf['alpha'], num_classes=num_class, bottleneck=conf['bottleneck'])
elif 'efficientnet' in name:
model = EfficientNet.from_name(name, condconv_num_expert=conf['condconv_num_expert'], norm_layer=None) # TpuBatchNormalization
if local_rank >= 0:
model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
def kernel_initializer(module):
def get_fan_in_out(module):
num_input_fmaps = module.weight.size(1)
num_output_fmaps = module.weight.size(0)
receptive_field_size = 1
if module.weight.dim() > 2:
receptive_field_size = module.weight[0][0].numel()
fan_in = num_input_fmaps * receptive_field_size
fan_out = num_output_fmaps * receptive_field_size
return fan_in, fan_out
if isinstance(module, torch.nn.Conv2d):
# https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py#L58
fan_in, fan_out = get_fan_in_out(module)
torch.nn.init.normal_(module.weight, mean=0.0, std=np.sqrt(2.0 / fan_out))
if module.bias is not None:
torch.nn.init.constant_(module.bias, val=0.)
elif isinstance(module, RoutingFn):
torch.nn.init.xavier_uniform_(module.weight)
torch.nn.init.constant_(module.bias, val=0.)
elif isinstance(module, torch.nn.Linear):
# https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py#L82
fan_in, fan_out = get_fan_in_out(module)
delta = 1.0 / np.sqrt(fan_out)
torch.nn.init.uniform_(module.weight, a=-delta, b=delta)
if module.bias is not None:
torch.nn.init.constant_(module.bias, val=0.)
model.apply(kernel_initializer)
else:
raise NameError('no model named, %s' % name)
if local_rank >= 0:
device = torch.device('cuda', local_rank)
model = model.to(device)
model = DistributedDataParallel(model, device_ids=[local_rank], output_device=local_rank)
else:
model = model.cuda()
# model = DataParallel(model)
cudnn.benchmark = True
return model
def num_class(dataset):
return {
'cifar10': 10,
'reduced_cifar10': 10,
'cifar10.1': 10,
'cifar100': 100,
'svhn': 10,
'reduced_svhn': 10,
'imagenet': 1000,
'reduced_imagenet': 120,
}[dataset]
|
the-stack_106_18363
|
import pytest
from abi.ERC20 import ERC20
@pytest.fixture(scope="module")
def burner(UniswapLPBurner, alice, receiver):
yield UniswapLPBurner.deploy(receiver, receiver, alice, alice, {"from": alice})
UNISWAP_LP_DAI_ETH = "0xa478c2975ab1ea89e8196811f51a7b7ade33eb11"
UNISWAP_LP_ETH_USDT = "0x0d4a11d5eeaac28ec3f61d100daf4d40471f1852"
UNISWAP_LP_USDC_ETH = "0xb4e16d0168e52d35cacd2c6185b44281ec28c9dc"
UNISWAP_LP_WBTC_ETH = "0xbb2b8038a1640196fbe3e38816f3e67cba72d940"
SUSHI_LP_DAI_ETH = "0xc3d03e4f041fd4cd388c549ee2a29a9e5075882f"
SUSHI_LP_ETH_USDT = "0x06da0fd433c1a5d7a4faa01111c044910a184553"
SUSHI_LP_SUSHI_ETH = "0x795065dcc9f64b5614c407a6efdc400da6221fb0"
SUSHI_LP_USDC_ETH = "0x397ff1542f962076d0bfe58ea045ffa2d347aca0"
SUSHI_LP_WBTC_ETH = "0xceff51756c56ceffca006cd410b03ffc46dd3a58"
SUSHI_LP_YFI_ETH = "0x088ee5007c98a9677165d78dd2109ae4a3d04d0c"
TOKENS = [
UNISWAP_LP_DAI_ETH,
UNISWAP_LP_ETH_USDT,
UNISWAP_LP_USDC_ETH,
UNISWAP_LP_WBTC_ETH,
SUSHI_LP_DAI_ETH,
SUSHI_LP_ETH_USDT,
SUSHI_LP_SUSHI_ETH,
SUSHI_LP_USDC_ETH,
SUSHI_LP_WBTC_ETH,
SUSHI_LP_YFI_ETH,
]
@pytest.mark.parametrize("token", TOKENS)
def test_burn(MintableTestToken, DAI, WETH, alice, receiver, burner, token):
coin = MintableTestToken.from_abi("testToken", token, abi=ERC20)
amount = 10 ** coin.decimals()
mint_success = False
fails = 0
while not mint_success and fails < 5:
try:
coin._mint_for_testing(alice, amount, {"from": alice})
mint_success = True
except Exception:
fails += 1
amount /= 2
coin.approve(burner, 2 ** 256 - 1, {"from": alice})
burner.burn(coin, {"from": alice})
assert coin.balanceOf(alice) == 0
assert coin.balanceOf(burner) == 0
assert coin.balanceOf(receiver) == 0
assert WETH.balanceOf(alice) == 0
assert WETH.balanceOf(burner) == 0
assert WETH.balanceOf(receiver) > 0
|
the-stack_106_18364
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
import unittest
from random import choice
import datetime
import frappe
from frappe.custom.doctype.custom_field.custom_field import create_custom_field
from frappe.utils import random_string
from frappe.utils.testutils import clear_custom_fields
class TestDB(unittest.TestCase):
def test_get_value(self):
self.assertEqual(frappe.db.get_value("User", {"name": ["=", "Administrator"]}), "Administrator")
self.assertEqual(frappe.db.get_value("User", {"name": ["like", "Admin%"]}), "Administrator")
self.assertNotEqual(frappe.db.get_value("User", {"name": ["!=", "Guest"]}), "Guest")
self.assertEqual(frappe.db.get_value("User", {"name": ["<", "Adn"]}), "Administrator")
self.assertEqual(frappe.db.get_value("User", {"name": ["<=", "Administrator"]}), "Administrator")
self.assertEqual(frappe.db.sql("""SELECT name FROM `tabUser` WHERE name > 's' ORDER BY MODIFIED DESC""")[0][0],
frappe.db.get_value("User", {"name": [">", "s"]}))
self.assertEqual(frappe.db.sql("""SELECT name FROM `tabUser` WHERE name >= 't' ORDER BY MODIFIED DESC""")[0][0],
frappe.db.get_value("User", {"name": [">=", "t"]}))
def test_set_value(self):
todo1 = frappe.get_doc(dict(doctype='ToDo', description = 'test_set_value 1')).insert()
todo2 = frappe.get_doc(dict(doctype='ToDo', description = 'test_set_value 2')).insert()
frappe.db.set_value('ToDo', todo1.name, 'description', 'test_set_value change 1')
self.assertEqual(frappe.db.get_value('ToDo', todo1.name, 'description'), 'test_set_value change 1')
# multiple set-value
frappe.db.set_value('ToDo', dict(description=('like', '%test_set_value%')),
'description', 'change 2')
self.assertEqual(frappe.db.get_value('ToDo', todo1.name, 'description'), 'change 2')
self.assertEqual(frappe.db.get_value('ToDo', todo2.name, 'description'), 'change 2')
def test_escape(self):
frappe.db.escape("香港濟生堂製藥有限公司 - IT".encode("utf-8"))
def test_get_single_value(self):
#setup
values_dict = {
"Float": 1.5,
"Int": 1,
"Percent": 55.5,
"Currency": 12.5,
"Data": "Test",
"Date": datetime.datetime.now().date(),
"Datetime": datetime.datetime.now(),
"Time": datetime.timedelta(hours=9, minutes=45, seconds=10)
}
test_inputs = [{
"fieldtype": fieldtype,
"value": value} for fieldtype, value in values_dict.items()]
for fieldtype in values_dict.keys():
create_custom_field("Print Settings", {
"fieldname": f"test_{fieldtype.lower()}",
"label": f"Test {fieldtype}",
"fieldtype": fieldtype,
})
#test
for inp in test_inputs:
fieldname = f"test_{inp['fieldtype'].lower()}"
frappe.db.set_value("Print Settings", "Print Settings", fieldname, inp["value"])
self.assertEqual(frappe.db.get_single_value("Print Settings", fieldname), inp["value"])
#teardown
clear_custom_fields("Print Settings")
def test_log_touched_tables(self):
frappe.flags.in_migrate = True
frappe.flags.touched_tables = set()
frappe.db.set_value('System Settings', 'System Settings', 'backup_limit', 5)
self.assertIn('tabSingles', frappe.flags.touched_tables)
frappe.flags.touched_tables = set()
todo = frappe.get_doc({'doctype': 'ToDo', 'description': 'Random Description'})
todo.save()
self.assertIn('tabToDo', frappe.flags.touched_tables)
frappe.flags.touched_tables = set()
todo.description = "Another Description"
todo.save()
self.assertIn('tabToDo', frappe.flags.touched_tables)
if frappe.db.db_type != "postgres":
frappe.flags.touched_tables = set()
frappe.db.sql("UPDATE tabToDo SET description = 'Updated Description'")
self.assertNotIn('tabToDo SET', frappe.flags.touched_tables)
self.assertIn('tabToDo', frappe.flags.touched_tables)
frappe.flags.touched_tables = set()
todo.delete()
self.assertIn('tabToDo', frappe.flags.touched_tables)
frappe.flags.touched_tables = set()
create_custom_field('ToDo', {'label': 'ToDo Custom Field'})
self.assertIn('tabToDo', frappe.flags.touched_tables)
self.assertIn('tabCustom Field', frappe.flags.touched_tables)
frappe.flags.in_migrate = False
frappe.flags.touched_tables.clear()
def test_db_keywords_as_fields(self):
"""Tests if DB keywords work as docfield names. If they're wrapped with grave accents."""
# Using random.choices, picked out a list of 40 keywords for testing
all_keywords = {
"mariadb": ["CHARACTER", "DELAYED", "LINES", "EXISTS", "YEAR_MONTH", "LOCALTIME", "BOTH", "MEDIUMINT",
"LEFT", "BINARY", "DEFAULT", "KILL", "WRITE", "SQL_SMALL_RESULT", "CURRENT_TIME", "CROSS", "INHERITS",
"SELECT", "TABLE", "ALTER", "CURRENT_TIMESTAMP", "XOR", "CASE", "ALL", "WHERE", "INT", "TO", "SOME",
"DAY_MINUTE", "ERRORS", "OPTIMIZE", "REPLACE", "HIGH_PRIORITY", "VARBINARY", "HELP", "IS",
"CHAR", "DESCRIBE", "KEY"],
"postgres": ["WORK", "LANCOMPILER", "REAL", "HAVING", "REPEATABLE", "DATA", "USING", "BIT", "DEALLOCATE",
"SERIALIZABLE", "CURSOR", "INHERITS", "ARRAY", "TRUE", "IGNORE", "PARAMETER_MODE", "ROW", "CHECKPOINT",
"SHOW", "BY", "SIZE", "SCALE", "UNENCRYPTED", "WITH", "AND", "CONVERT", "FIRST", "SCOPE", "WRITE", "INTERVAL",
"CHARACTER_SET_SCHEMA", "ADD", "SCROLL", "NULL", "WHEN", "TRANSACTION_ACTIVE",
"INT", "FORTRAN", "STABLE"]
}
created_docs = []
# edit by rushabh: added [:1]
# don't run every keyword! - if one works, they all do
fields = all_keywords[frappe.conf.db_type][:1]
test_doctype = "ToDo"
def add_custom_field(field):
create_custom_field(test_doctype, {
"fieldname": field.lower(),
"label": field.title(),
"fieldtype": 'Data',
})
# Create custom fields for test_doctype
for field in fields:
add_custom_field(field)
# Create documents under that doctype and query them via ORM
for _ in range(10):
docfields = { key.lower(): random_string(10) for key in fields }
doc = frappe.get_doc({"doctype": test_doctype, "description": random_string(20), **docfields})
doc.insert()
created_docs.append(doc.name)
random_field = choice(fields).lower()
random_doc = choice(created_docs)
random_value = random_string(20)
# Testing read
self.assertEqual(list(frappe.get_all("ToDo", fields=[random_field], limit=1)[0])[0], random_field)
self.assertEqual(list(frappe.get_all("ToDo", fields=[f"`{random_field}` as total"], limit=1)[0])[0], "total")
# Testing read for distinct and sql functions
self.assertEqual(list(
frappe.get_all("ToDo",
fields=[f"`{random_field}` as total"],
distinct=True,
limit=1,
)[0]
)[0], "total")
self.assertEqual(list(
frappe.get_all("ToDo",
fields=[f"`{random_field}`"],
distinct=True,
limit=1,
)[0]
)[0], random_field)
self.assertEqual(list(
frappe.get_all("ToDo",
fields=[f"count(`{random_field}`)"],
limit=1
)[0]
)[0], "count" if frappe.conf.db_type == "postgres" else f"count(`{random_field}`)")
# Testing update
frappe.db.set_value(test_doctype, random_doc, random_field, random_value)
self.assertEqual(frappe.db.get_value(test_doctype, random_doc, random_field), random_value)
# Cleanup - delete records and remove custom fields
for doc in created_docs:
frappe.delete_doc(test_doctype, doc)
clear_custom_fields(test_doctype)
|
the-stack_106_18368
|
import discord
import asyncio
from packages.get_server_status import get_server_status, format_dict
from packages.get_news import get_news, format_news
from packages.format_op import format_op
from unidecode import unidecode
from packages.player_info import get_player_info, format_player_info
from packages.live import get_lives, format_lives
from packages.prefix import change_prefix
from packages.quote import get_quote
from packages.get_op_info import get_op_info
from time import sleep
import os
from dotenv import load_dotenv
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
client = discord.Client()
@client.event
async def on_ready():
print('now online - hello, world')
print(client.user.name)
print(client.user.id)
print('-'*20)
@client.event
async def on_message(message):
global prefix
log = open(r'log.txt', 'r+')
found_prefix = False
for line in log.readlines():
if str(message.guild.id) in line:
prefix = line.split(' ')[1].replace('\n', '')
found_prefix = True
if not found_prefix:
prefix = '?'
if message.content.lower().startswith(f'{prefix.lower()}'):
if message.content.lower().startswith(f'{prefix.lower()}help'):
msg = discord.Embed(title='Commands', description="Here's a list of all commands!", colour=discord.Color.from_rgb(244, 175, 44))
msg.set_thumbnail(url='https://i.imgur.com/Vgrtz5u.png')
msg.add_field(name=f'{prefix}pcstats', value='shows info regarding the status of the PC servers')
msg.add_field(name=f'{prefix}ps4stats', value='shows info regarding the status of the PS4 servers')
msg.add_field(name=f'{prefix}xboxstats', value='shows info regarding the status of the Xbox One servers')
msg.add_field(name=f'{prefix}agent (agent name)', value="shows agent's general info")
msg.add_field(name=f'{prefix}player (name) (platform)', value="tracks player's info (default platform is PC)")
msg.add_field(name=f'{prefix}live', value=f"shows live streams on twitch ({prefix}lives also works)")
msg.add_field(name=f'{prefix}prefix (new prefix)', value=f"changes the server's command prefix")
msg.add_field(name=f'{prefix}quote', value=f"generates a very wise quote.")
await message.channel.send(embed=msg)
elif 'stats' in message.content.lower():
await message.channel.send("Working on it...")
if message.content.lower().startswith(f'{prefix.lower()}pcstats'):
status = get_server_status()
info = format_dict(status["pc"])
await message.channel.send(embed=info)
elif message.content.lower().startswith(f'{prefix.lower()}ps4stats'):
status = get_server_status()
info = format_dict(status["ps4"])
await message.channel.send(embed=info)
elif message.content.lower().startswith(f'{prefix.lower()}xboxstats'):
status = get_server_status()
info = format_dict(status["xbox"])
await message.channel.send(embed=info)
elif message.content.lower().startswith(f'{prefix.lower()}news'):
await message.channel.send("Working on it...")
news = get_news()
emb = format_news(news)
await message.channel.send(embed=emb)
elif message.content.lower().startswith(f'{prefix.lower()}agent'):
try:
op = unidecode(message.content.lower().split(' ')[1])
except:
msg = discord.Embed(title='Error', description=f"Please give me a valid agent", colour=discord.Color.from_rgb(255, 0, 0))
await message.channel.send(embed=msg)
else:
await message.channel.send("Working on it...")
try:
emb = format_op(get_op_info(op))
await message.channel.send(embed=emb)
except:
msg = discord.Embed(title='Error', description=f"Failed to find {op}'s information! Please check agent's name and try again", colour=discord.Color.from_rgb(255, 0, 0))
await message.channel.send(embed=msg)
elif message.content.lower().startswith(f'{prefix.lower()}player'):
try:
player = unidecode(message.content.split(' ')[1])
except:
msg = discord.Embed(title='Error', description=f"Please give me a valid player name and/or platform", colour=discord.Color.from_rgb(255, 0, 0))
await message.channel.send(embed=msg)
else:
try:
if len(message.content.split(' ')) == 2:
platform = 'pc'
else:
platform = unidecode(message.content.lower().split(' ')[2]).lower()
await message.channel.send("Working on it...")
player_info = format_player_info(get_player_info(platform, player))
await message.channel.send(embed=player_info["overview"])
await message.channel.send(embed=player_info["ranked"])
await message.channel.send(embed=player_info["season"])
except:
msg = discord.Embed(title='Error', description=f"Failed to find {player}'s information! Please check player's name and platform and try again", colour=discord.Color.from_rgb(255, 0, 0))
await message.channel.send(embed=msg)
elif message.content.lower().startswith(f'{prefix.lower()}lives') or message.content.lower().startswith(f'{prefix.lower()}live'):
await message.channel.send("Working on it...")
lives = get_lives()[:6]
msg = format_lives(lives)
await message.channel.send(embed=msg)
elif message.content.lower().startswith(f'{prefix.lower()}prefix'):
try:
new_prefix = message.content.split(' ')[1]
except:
msg = discord.Embed(title='Error', description=f"Please give me a valid prefix", colour=discord.Color.from_rgb(255, 0, 0))
await message.channel.send(embed=msg)
else:
log = open(r'./log.txt', 'a')
log.write(f'{message.guild.id}: {new_prefix}\n')
log.close()
change_prefix(new_prefix)
msg = discord.Embed(title='Prefix', description=f"Sucessfully updated prefix to {new_prefix}", colour=discord.Color.from_rgb(244, 175, 44))
await message.channel.send(embed=msg)
elif message.content.lower().startswith(f'{prefix.lower()}quote'):
await message.channel.send(get_quote())
elif message.content.lower().startswith(f'{prefix.lower()}clear'):
msg_list = message.content.split(' ')
if len(msg_list) == 2:
try:
deleted = await message.channel.purge(limit=int(msg_list[1]) + 1)
except:
msg = discord.Embed(title='Error', description=f"Please give me a valid number of messages or upgrade my permissions!", colour=discord.Color.from_rgb(255, 0, 0))
await message.channel.send(embed=msg)
else:
msg = discord.Embed(title='Clear', description=f"Sucessfully deleted {len(deleted)} messages", colour=discord.Color.from_rgb(244, 175, 44))
await message.channel.send(embed=msg)
else:
try:
deleted = await message.channel.purge(limit=100)
msg = discord.Embed(title='Clear', description=f"Sucessfully deleted {len(deleted)} messages", colour=discord.Color.from_rgb(244, 175, 44))
await message.channel.send(embed=msg)
except:
msg = discord.Embed(title='Error', description=f"Please upgrade my permissions!", colour=discord.Color.from_rgb(255, 0, 0))
await message.channel.send(embed=msg)
client.run(TOKEN)
|
the-stack_106_18370
|
"""
IDE: PyCharm
Project: social-media-bot
Author: Robin
Filename: analyze_tweets.py
Date: 25.01.2020
"""
import json
import os
from collections import defaultdict
from datetime import datetime
import dotenv
import numpy as np
import pandas as pd
import spacy
from matplotlib import pyplot
from term_document_matrix import TermDocumentMatrix, filter_top_phrases
nlp = spacy.load('en_core_web_sm')
def text_analysis(tweets_filepath):
matrix = TermDocumentMatrix(nlp)
with open(tweets_filepath, 'r', encoding='utf8') as json_file:
tweet_set = json.load(json_file)
for tweet in tweet_set["tweets"]:
text = tweet["text"].strip()
id = tweet["tweet_id"]
matrix.add_doc(id, text)
phrases = matrix.get_most_frequent_phrases(2, 3)
top_phrases = filter_top_phrases(phrases, 20)
objects = [x[0] for x in top_phrases]
y_pos = np.arange(len(objects))
performance = [x[1] for x in top_phrases]
pyplot.bar(y_pos, performance, align='center', alpha=0.5)
pyplot.xticks(y_pos, objects, rotation=45)
pyplot.ylabel('Document Frequency')
pyplot.title('Terms')
pyplot.show()
def general_stats(tweets_filepath):
with open(tweets_filepath, 'r', encoding='utf8') as json_file:
tweets = json.load(json_file)["tweets"]
# preprocessing (categorize top5 mentions, timestamps to dates
mention_count = defaultdict(int)
for tweet in tweets:
tweet["date"] = datetime.strptime(datetime.strptime(tweet["date"], "%c").strftime('%d.%m.%y'), '%d.%m.%y')
for mention in tweet['mentions']:
mention_count[mention] += 1
# only top 5 tags
topx = 5
mention_items = list([k, mention_count[k]] for k in mention_count.keys())
mention_items.sort(key=lambda x: x[1], reverse=True)
mention_items = mention_items[:topx]
mention_tags = [k for (k, v) in mention_items]
# create mention columns
for tweet in tweets:
for tag in mention_tags:
if tag in tweet['mentions']:
tweet[tag] = 1
else:
tweet[tag] = 0
series = pd.DataFrame.from_records(tweets, index="tweet_id", exclude=['hashtags', 'url', 'text', 'mentions'])
grouped = series.groupby('date').sum()
grouped.plot()
pyplot.show()
for mention_tag in mention_tags:
grouped = series.query(mention_tag + ">0").groupby('date').count()
grouped.plot(y=[mention_tag])
pyplot.show()
if __name__ == '__main__':
dotenv.load_dotenv()
tweets_file = os.getenv('TWITTER_DATA')
general_stats(tweets_file)
text_analysis(tweets_file)
|
the-stack_106_18371
|
"""project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.urls import include
urlpatterns = [
path("", include("django.contrib.auth.urls")),
path("", include("users.urls")),
path("", include("landing.urls")),
path("dashboard/", include("dashboard.urls")),
path("api/", include("workouts.urls")),
path("admin/", admin.site.urls),
]
|
the-stack_106_18372
|
# -*- coding: utf-8 -*-
# Copyright 2018-2019 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""time_input unit test."""
from tests import testutil
import streamlit as st
from parameterized import parameterized
from datetime import datetime
from datetime import time
class TimeInputTest(testutil.DeltaGeneratorTestCase):
"""Test ability to marshall time_input protos."""
def test_just_label(self):
"""Test that it can be called with no value."""
st.time_input("the label")
c = self.get_delta_from_queue().new_element.time_input
self.assertEqual(c.label, "the label")
self.assertLessEqual(
datetime.strptime(c.default, "%H:%M").time(), datetime.now().time()
)
@parameterized.expand(
[(time(8, 45), "08:45"), (datetime(2019, 7, 6, 21, 15), "21:15")]
)
def test_value_types(self, arg_value, proto_value):
"""Test that it supports different types of values."""
st.time_input("the label", arg_value)
c = self.get_delta_from_queue().new_element.time_input
self.assertEqual(c.label, "the label")
self.assertEqual(c.default, proto_value)
|
the-stack_106_18373
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import bpy
import os
import time
import netrender
from netrender.utils import *
from bpy.props import PointerProperty, StringProperty, BoolProperty, EnumProperty, IntProperty, CollectionProperty
VERSION = b"0.5"
PATH_PREFIX = "/tmp/"
LAST_ADDRESS_TEST = 0
ADDRESS_TEST_TIMEOUT = 30
def base_poll(cls, context):
rd = context.scene.render
return (rd.engine in cls.COMPAT_ENGINES)
def init_file():
if netrender.init_file != bpy.data.filepath:
netrender.init_file = bpy.data.filepath
netrender.init_data = True
netrender.valid_address = False
def init_data(netsettings):
init_file()
if netrender.init_data:
netrender.init_data = False
netsettings.active_slave_index = 0
while(len(netsettings.slaves) > 0):
netsettings.slaves.remove(0)
netsettings.active_blacklisted_slave_index = 0
while(len(netsettings.slaves_blacklist) > 0):
netsettings.slaves_blacklist.remove(0)
netsettings.active_job_index = 0
while(len(netsettings.jobs) > 0):
netsettings.jobs.remove(0)
def verify_address(netsettings, force=False):
global LAST_ADDRESS_TEST
init_file()
if force or LAST_ADDRESS_TEST + ADDRESS_TEST_TIMEOUT < time.time():
LAST_ADDRESS_TEST = time.time()
try:
conn = clientConnection(netsettings, scan = False, timeout = 1)
except:
conn = None
if conn:
netrender.valid_address = True
conn.close()
else:
netrender.valid_address = False
return netrender.valid_address
class NeedValidAddress():
@classmethod
def poll(cls, context):
return super().poll(context) and verify_address(context.scene.network_render)
class NetRenderButtonsPanel():
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "render"
# COMPAT_ENGINES must be defined in each subclass, external engines can add themselves here
@classmethod
def poll(cls, context):
rd = context.scene.render
return rd.engine == 'NET_RENDER'
# Setting panel, use in the scene for now.
class RENDER_PT_network_settings(NetRenderButtonsPanel, bpy.types.Panel):
bl_label = "Network Settings"
COMPAT_ENGINES = {'NET_RENDER'}
@classmethod
def poll(cls, context):
return super().poll(context)
def draw(self, context):
layout = self.layout
netsettings = context.scene.network_render
verify_address(netsettings)
layout.prop(netsettings, "mode", expand=True)
if netsettings.mode in {'RENDER_MASTER', 'RENDER_SLAVE'}:
layout.operator("render.netclientstart", icon='PLAY')
layout.prop(netsettings, "path")
row = layout.row()
split = layout.split(factor=0.5)
col = split.column()
col.prop(netsettings, "server_address", text="Address")
col = split.column()
row = col.row()
row.prop(netsettings, "server_port", text="Port")
row.prop(netsettings, "use_ssl", text="SSL")
if netsettings.mode != "RENDER_MASTER":
layout.operator("render.netclientscan", icon='FILE_REFRESH', text="")
if not netrender.valid_address:
layout.label(text="No master at specified address")
if netsettings.use_ssl and netsettings.mode == "RENDER_MASTER":
layout.prop(netsettings, "cert_path", text="Certificate")
layout.prop(netsettings, "key_path", text="Key")
layout.operator("render.netclientweb", icon='QUESTION')
class RENDER_PT_network_slave_settings(NetRenderButtonsPanel, bpy.types.Panel):
bl_label = "Slave Settings"
COMPAT_ENGINES = {'NET_RENDER'}
@classmethod
def poll(cls, context):
scene = context.scene
return super().poll(context) and scene.network_render.mode == "RENDER_SLAVE"
def draw(self, context):
layout = self.layout
rd = context.scene.render
netsettings = context.scene.network_render
layout.prop(netsettings, "slave_tags", text="Tags")
layout.prop(netsettings, "slave_render")
layout.prop(netsettings, "slave_bake")
layout.prop(netsettings, "use_slave_clear")
layout.prop(netsettings, "use_slave_thumb")
layout.prop(netsettings, "use_slave_output_log")
layout.label(text="Threads:")
layout.prop(rd, "threads_mode", expand=True)
col = layout.column()
col.enabled = rd.threads_mode == 'FIXED'
col.prop(rd, "threads")
class RENDER_PT_network_master_settings(NetRenderButtonsPanel, bpy.types.Panel):
bl_label = "Master Settings"
COMPAT_ENGINES = {'NET_RENDER'}
@classmethod
def poll(cls, context):
scene = context.scene
return super().poll(context) and scene.network_render.mode == "RENDER_MASTER"
def draw(self, context):
layout = self.layout
netsettings = context.scene.network_render
layout.prop(netsettings, "use_master_broadcast")
layout.prop(netsettings, "use_master_force_upload")
layout.prop(netsettings, "use_master_clear")
class RENDER_PT_network_job(NetRenderButtonsPanel, bpy.types.Panel):
bl_label = "Job Settings"
COMPAT_ENGINES = {'NET_RENDER'}
@classmethod
def poll(cls, context):
scene = context.scene
return super().poll(context) and scene.network_render.mode == "RENDER_CLIENT"
def draw(self, context):
layout = self.layout
netsettings = context.scene.network_render
verify_address(netsettings)
if netsettings.server_address != "[default]":
layout.operator("render.netclientanim", icon='RENDER_ANIMATION')
layout.operator("render.netclientsend", icon='FILE_BLEND')
layout.operator("render.netclientsendbake", icon='PHYSICS')
layout.operator("render.netclientsendframe", icon='RENDER_STILL')
if netsettings.job_id:
row = layout.row()
row.operator("render.render", text="Get Image", icon='RENDER_STILL')
row.operator("render.render", text="Get Animation", icon='RENDER_ANIMATION').animation = True
layout.prop(netsettings, "job_type", text="Type")
layout.prop(netsettings, "job_name", text="Name")
layout.prop(netsettings, "job_category", text="Category")
layout.prop(netsettings, "job_tags", text="Tags")
layout.prop(netsettings, "job_render_engine", text="Engine")
if netsettings.job_render_engine == "OTHER":
layout.prop(netsettings, "job_render_engine_other", text="Other Engine")
row = layout.row()
row.prop(netsettings, "priority")
row.prop(netsettings, "chunks")
if netsettings.job_type == "JOB_BLENDER":
layout.prop(netsettings, "save_before_job")
class RENDER_PT_network_job_vcs(NetRenderButtonsPanel, bpy.types.Panel):
bl_label = "VCS Job Settings"
COMPAT_ENGINES = {'NET_RENDER'}
@classmethod
def poll(cls, context):
scene = context.scene
return (super().poll(context)
and scene.network_render.mode == "RENDER_CLIENT"
and scene.network_render.job_type == "JOB_VCS")
def draw(self, context):
layout = self.layout
netsettings = context.scene.network_render
layout.operator("render.netclientvcsguess", icon='FILE_REFRESH', text="")
layout.prop(netsettings, "vcs_system")
layout.prop(netsettings, "vcs_revision")
layout.prop(netsettings, "vcs_rpath")
layout.prop(netsettings, "vcs_wpath")
class RENDER_PT_network_slaves(NeedValidAddress, NetRenderButtonsPanel, bpy.types.Panel):
bl_label = "Slaves Status"
COMPAT_ENGINES = {'NET_RENDER'}
@classmethod
def poll(cls, context):
netsettings = context.scene.network_render
return super().poll(context) and netsettings.mode == "RENDER_CLIENT"
def draw(self, context):
layout = self.layout
netsettings = context.scene.network_render
row = layout.row()
row.template_list("UI_UL_list", "net_render_slaves", netsettings, "slaves",
netsettings, "active_slave_index", rows=2)
sub = row.column(align=True)
sub.operator("render.netclientslaves", icon='FILE_REFRESH', text="")
sub.operator("render.netclientblacklistslave", icon='ZOOMOUT', text="")
if len(netrender.slaves) > netsettings.active_slave_index >= 0:
layout.separator()
slave = netrender.slaves[netsettings.active_slave_index]
layout.label(text="Name: " + slave.name)
layout.label(text="Address: " + slave.address[0])
layout.label(text="Seen: " + time.ctime(slave.last_seen))
layout.label(text="Stats: " + slave.stats)
class RENDER_PT_network_slaves_blacklist(NeedValidAddress, NetRenderButtonsPanel, bpy.types.Panel):
bl_label = "Slaves Blacklist"
COMPAT_ENGINES = {'NET_RENDER'}
@classmethod
def poll(cls, context):
netsettings = context.scene.network_render
return super().poll(context) and netsettings.mode == "RENDER_CLIENT"
def draw(self, context):
layout = self.layout
netsettings = context.scene.network_render
row = layout.row()
row.template_list("UI_UL_list", "net_render_slaves_blacklist", netsettings, "slaves_blacklist",
netsettings, "active_blacklisted_slave_index", rows=2)
sub = row.column(align=True)
sub.operator("render.netclientwhitelistslave", icon='ZOOMOUT', text="")
if len(netrender.blacklist) > netsettings.active_blacklisted_slave_index >= 0:
layout.separator()
slave = netrender.blacklist[netsettings.active_blacklisted_slave_index]
layout.label(text="Name: " + slave.name)
layout.label(text="Address: " + slave.address[0])
layout.label(text="Seen: " + time.ctime(slave.last_seen))
layout.label(text="Stats: " + slave.stats)
class RENDER_PT_network_jobs(NeedValidAddress, NetRenderButtonsPanel, bpy.types.Panel):
bl_label = "Jobs"
COMPAT_ENGINES = {'NET_RENDER'}
@classmethod
def poll(cls, context):
netsettings = context.scene.network_render
return super().poll(context) and netsettings.mode == "RENDER_CLIENT"
def draw(self, context):
layout = self.layout
netsettings = context.scene.network_render
row = layout.row()
row.template_list("UI_UL_list", "net_render", netsettings, "jobs", netsettings, "active_job_index", rows=2)
sub = row.column(align=True)
sub.operator("render.netclientstatus", icon='FILE_REFRESH', text="")
sub.operator("render.netclientcancel", icon='ZOOMOUT', text="")
sub.operator("render.netclientcancelall", icon='PANEL_CLOSE', text="")
sub.operator("render.netclientdownload", icon='RENDER_ANIMATION', text="")
if len(netrender.jobs) > netsettings.active_job_index >= 0:
layout.separator()
job = netrender.jobs[netsettings.active_job_index]
layout.label(text="Name: %s" % job.name)
layout.label(text="Length: %04i" % len(job))
layout.label(text="Done: %04i" % job.results[netrender.model.FRAME_DONE])
layout.label(text="Error: %04i" % job.results[netrender.model.FRAME_ERROR])
import bl_ui.properties_render as properties_render
class RENDER_PT_network_output(NeedValidAddress, NetRenderButtonsPanel, bpy.types.Panel):
bl_label = "Output"
COMPAT_ENGINES = {'NET_RENDER'}
@classmethod
def poll(cls, context):
netsettings = context.scene.network_render
return super().poll(context) and netsettings.mode == "RENDER_CLIENT"
draw = properties_render.RENDER_PT_output.draw
class NetRenderSlave(bpy.types.PropertyGroup):
@classmethod
def register(NetRenderSlave):
NetRenderSlave.name = StringProperty(
name="Name of the slave",
description="",
maxlen = 64,
default = "")
class NetRenderJob(bpy.types.PropertyGroup):
@classmethod
def register(NetRenderJob):
NetRenderJob.name = StringProperty(
name="Name of the job",
description="",
maxlen = 128,
default = "")
class NetRenderSettings(bpy.types.PropertyGroup):
@classmethod
def register(NetRenderSettings):
def address_update_callback(self, context):
netsettings = context.scene.network_render
verify_address(netsettings, True)
NetRenderSettings.server_address = StringProperty(
name="Server address",
description="IP or name of the master render server",
maxlen = 128,
default = "[default]",
update = address_update_callback)
NetRenderSettings.server_port = IntProperty(
name="Server port",
description="port of the master render server",
default = 8000,
min=1,
max=65535)
NetRenderSettings.use_master_broadcast = BoolProperty(
name="Broadcast",
description="broadcast master server address on local network",
default = True)
NetRenderSettings.use_ssl = BoolProperty(
name="use ssl",
description="use ssl encryption for communication",
default = False)
NetRenderSettings.cert_path = StringProperty(
name="CertPath",
description="Path to ssl certificate",
maxlen = 128,
default = "",
subtype='FILE_PATH')
NetRenderSettings.key_path = StringProperty(
name="key",
description="Path to ssl key file",
maxlen = 128,
default = "",
subtype='FILE_PATH')
NetRenderSettings.use_slave_clear = BoolProperty(
name="Clear on exit",
description="delete downloaded files on exit",
default = True)
NetRenderSettings.use_slave_thumb = BoolProperty(
name="Generate thumbnails",
description="Generate thumbnails on slaves instead of master",
default = False)
NetRenderSettings.slave_tags = StringProperty(
name="Tags",
description="Tags to associate with the slave (semi-colon separated)",
maxlen = 256,
default = "")
NetRenderSettings.use_slave_output_log = BoolProperty(
name="Output render log on console",
description="Output render text log to console as well as sending it to the master",
default = True)
NetRenderSettings.slave_render = BoolProperty(
name="Render on slave",
description="Use slave for render jobs",
default = True)
NetRenderSettings.slave_bake = BoolProperty(
name="Bake on slave",
description="Use slave for baking jobs",
default = True)
NetRenderSettings.use_master_clear = BoolProperty(
name="Clear on exit",
description="Delete saved files on exit",
default = False)
NetRenderSettings.use_master_force_upload = BoolProperty(
name="Force Dependency Upload",
description="Force client to upload dependency files to master",
default = False)
default_path = os.environ.get("TEMP")
if not default_path:
if os.name == 'nt':
default_path = "c:/tmp/"
else:
default_path = "/tmp/"
elif not default_path.endswith(os.sep):
default_path += os.sep
NetRenderSettings.path = StringProperty(
name="Path",
description="Path for temporary files",
maxlen = 128,
default = default_path,
subtype='FILE_PATH')
NetRenderSettings.job_type = EnumProperty(
items=(
("JOB_BLENDER", "Blender", "Standard Blender Job"),
("JOB_PROCESS", "Process", "Custom Process Job"),
("JOB_VCS", "VCS", "Version Control System Managed Job"),
),
name="Job Type",
description="Type of render job",
default="JOB_BLENDER")
NetRenderSettings.job_name = StringProperty(
name="Job name",
description="Name of the job",
maxlen = 128,
default = "[default]")
NetRenderSettings.job_category = StringProperty(
name="Job category",
description="Category of the job",
maxlen = 128,
default = "")
NetRenderSettings.job_tags = StringProperty(
name="Tags",
description="Tags to associate with the job (semi-colon separated)",
maxlen = 256,
default = "")
NetRenderSettings.job_render_engine = EnumProperty(
items = (
("BLENDER_RENDER", "BLENDER", "Standard Blender Render"),
("CYCLES", "CYCLES", "Cycle Render"),
("OTHER", "OTHER", "Other non-default Render"),
),
name="render",
description="Render engine used to render this job",
default="BLENDER_RENDER")
NetRenderSettings.job_render_engine_other = StringProperty(
name="Render engine",
description="Render engine other than the builtin defaults (POVRAY_RENDER, ...)",
maxlen = 128,
default = "")
NetRenderSettings.save_before_job = BoolProperty(
name="Save Before Job",
description="Save current file before sending a job",
default = False)
NetRenderSettings.chunks = IntProperty(
name="Chunks",
description="Number of frame to dispatch to each slave in one chunk",
default = 5,
min=1,
max=65535)
NetRenderSettings.priority = IntProperty(
name="Priority",
description="Priority of the job",
default = 1,
min=1,
max=10)
NetRenderSettings.vcs_wpath = StringProperty(
name="Working Copy",
description="Path of the local working copy",
maxlen = 1024,
default = "")
NetRenderSettings.vcs_rpath = StringProperty(
name="Remote Path",
description="Path of the server copy (protocol specific)",
maxlen = 1024,
default = "")
NetRenderSettings.vcs_revision = StringProperty(
name="Revision",
description="Revision for this job",
maxlen = 256,
default = "")
NetRenderSettings.vcs_system = EnumProperty(
items= netrender.versioning.ITEMS,
name="VCS mode",
description="Version Control System",
default=netrender.versioning.ITEMS[0][0])
NetRenderSettings.job_id = StringProperty(
name="Network job id",
description="id of the last sent render job",
maxlen = 64,
default = "")
NetRenderSettings.active_slave_index = IntProperty(
name="Index of the active slave",
description="",
default = -1,
min= -1,
max=65535)
NetRenderSettings.active_blacklisted_slave_index = IntProperty(
name="Index of the active slave",
description="",
default = -1,
min= -1,
max=65535)
NetRenderSettings.active_job_index = IntProperty(
name="Index of the active job",
description="",
default = -1,
min= -1,
max=65535)
NetRenderSettings.mode = EnumProperty(
items=(
("RENDER_CLIENT", "Client", "Act as render client"),
("RENDER_MASTER", "Master", "Act as render master"),
("RENDER_SLAVE", "Slave", "Act as render slave"),
),
name="Network mode",
description="Mode of operation of this instance",
default="RENDER_CLIENT")
NetRenderSettings.slaves = CollectionProperty(type=NetRenderSlave, name="Slaves", description="")
NetRenderSettings.slaves_blacklist = CollectionProperty(type=NetRenderSlave, name="Slaves Blacklist", description="")
NetRenderSettings.jobs = CollectionProperty(type=NetRenderJob, name="Job List", description="")
bpy.types.Scene.network_render = PointerProperty(type=NetRenderSettings, name="Network Render", description="Network Render Settings")
@classmethod
def unregister(cls):
del bpy.types.Scene.network_render
|
the-stack_106_18374
|
import time
from spaceone.inventory.libs.manager import AWSPowerStateManager
from spaceone.inventory.libs.schema.base import ReferenceModel
from spaceone.inventory.connector.auto_scaling import AutoScalingConnector
from spaceone.inventory.model.auto_scaling import *
class AutoScalingManager(AWSPowerStateManager):
connector_name = 'AutoScalingConnector'
def collect_power_state(self, params):
print("** Auto Scaling Start **")
start_time = time.time()
auto_scaling_conn: AutoScalingConnector = self.locator.get_connector(self.connector_name, **params)
auto_scaling_resources = []
for region_name in params.get('regions', []):
# print(f'[ AutoScaling {region_name} ]')
auto_scaling_conn.set_client(region_name)
for asg in auto_scaling_conn.describe_auto_scaling_groups():
auto_scaling_data = AutoScalingGroup(asg, strict=False)
auto_scaling_resource = AutoScalingResource({
'data': auto_scaling_data,
'reference': ReferenceModel(auto_scaling_data.reference())
})
auto_scaling_resources.append(AutoScalingResponse({'resource': auto_scaling_resource}))
print(f' Auto Scaling Finished {time.time() - start_time} Seconds')
return auto_scaling_resources
|
the-stack_106_18375
|
def quadrado(b):
return f'{b}^2 = {b**2}'
def eh_par(n):
if n < 2:
return ''
if n % 2 != 0:
n -= 1
print(quadrado(n))
return eh_par(n-2)
def quadrado_de_pares():
x = 1
while x != 0:
x = int(input())
eh_par(x)
quadrado_de_pares()
|
the-stack_106_18376
|
# Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from __future__ import absolute_import
import logging
import os
import boto3
import pytest
from sagemaker import LocalSession, Session
from sagemaker.tensorflow import TensorFlow
from ..integration import NO_P2_REGIONS, NO_P3_REGIONS, get_ecr_registry
logger = logging.getLogger(__name__)
logging.getLogger('boto').setLevel(logging.INFO)
logging.getLogger('botocore').setLevel(logging.INFO)
logging.getLogger('factory.py').setLevel(logging.INFO)
logging.getLogger('auth.py').setLevel(logging.INFO)
logging.getLogger('connectionpool.py').setLevel(logging.INFO)
SCRIPT_PATH = os.path.dirname(os.path.realpath(__file__))
def pytest_addoption(parser):
parser.addoption('--docker-base-name', default='sagemaker-tensorflow-scriptmode')
parser.addoption('--tag', default=None)
parser.addoption('--region', default='us-west-2')
parser.addoption('--framework-version', default='')
parser.addoption('--processor', default='cpu', choices=['cpu', 'gpu', 'cpu,gpu'])
parser.addoption('--py-version', default='3', choices=['2', '3', '2,3', '37'])
parser.addoption('--account-id', default='142577830533')
parser.addoption('--instance-type', default=None)
parser.addoption('--generate-coverage-doc', default=False, action='store_true',
help='use this option to generate test coverage doc')
parser.addoption(
"--efa", action="store_true", default=False, help="Run only efa tests",
)
def pytest_configure(config):
config.addinivalue_line("markers", "efa(): explicitly mark to run efa tests")
def pytest_runtest_setup(item):
if item.config.getoption("--efa"):
efa_tests = [mark for mark in item.iter_markers(name="efa")]
if not efa_tests:
pytest.skip("Skipping non-efa tests")
def pytest_collection_modifyitems(session, config, items):
if config.getoption("--generate-coverage-doc"):
from test.test_utils.test_reporting import TestReportGenerator
report_generator = TestReportGenerator(items, is_sagemaker=True)
report_generator.generate_coverage_doc(framework="tensorflow_2", job_type="training")
def pytest_configure(config):
os.environ['TEST_PY_VERSIONS'] = config.getoption('--py-version')
os.environ['TEST_PROCESSORS'] = config.getoption('--processor')
@pytest.fixture(scope='session')
def docker_base_name(request):
return request.config.getoption('--docker-base-name')
@pytest.fixture(scope='session')
def region(request):
return request.config.getoption('--region')
@pytest.fixture(scope='session')
def framework_version(request):
return request.config.getoption('--framework-version')
@pytest.fixture
def tag(request, framework_version, processor, py_version):
provided_tag = request.config.getoption('--tag')
default_tag = '{}-{}-py{}'.format(framework_version, processor, py_version)
return provided_tag if provided_tag is not None else default_tag
@pytest.fixture(scope='session')
def sagemaker_session(region):
return Session(boto_session=boto3.Session(region_name=region))
@pytest.fixture(scope='session')
def sagemaker_local_session(region):
return LocalSession(boto_session=boto3.Session(region_name=region))
@pytest.fixture(scope='session')
def account_id(request):
return request.config.getoption('--account-id')
@pytest.fixture
def instance_type(request, processor):
provided_instance_type = request.config.getoption('--instance-type')
default_instance_type = 'ml.c4.xlarge' if processor == 'cpu' else 'ml.p2.xlarge'
return provided_instance_type if provided_instance_type is not None else default_instance_type
@pytest.fixture()
def py_version():
if 'TEST_PY_VERSIONS' in os.environ:
return os.environ['TEST_PY_VERSIONS'].split(',')
return None
@pytest.fixture()
def processor():
if 'TEST_PROCESSORS' in os.environ:
return os.environ['TEST_PROCESSORS'].split(',')
return None
@pytest.fixture(autouse=True)
def skip_by_device_type(request, processor):
is_gpu = (processor == 'gpu')
if (request.node.get_closest_marker('skip_gpu') and is_gpu) or \
(request.node.get_closest_marker('skip_cpu') and not is_gpu):
pytest.skip('Skipping because running on \'{}\' instance'.format(processor))
@pytest.fixture(autouse=True)
def skip_gpu_instance_restricted_regions(region, instance_type):
if (region in NO_P2_REGIONS and instance_type.startswith('ml.p2')) or \
(region in NO_P3_REGIONS and instance_type.startswith('ml.p3')):
pytest.skip('Skipping GPU test in region {}'.format(region))
@pytest.fixture
def docker_image(docker_base_name, tag):
return '{}:{}'.format(docker_base_name, tag)
@pytest.fixture
def ecr_image(account_id, docker_base_name, tag, region):
registry = get_ecr_registry(account_id, region)
return '{}/{}:{}'.format(registry, docker_base_name, tag)
@pytest.fixture(autouse=True)
def skip_py2_containers(request, tag):
if request.node.get_closest_marker('skip_py2_containers'):
if 'py2' in tag:
pytest.skip('Skipping python2 container with tag {}'.format(tag))
|
the-stack_106_18377
|
_base_ = './retinanet_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
the-stack_106_18379
|
import scipy.ndimage.filters
import pyredner
import numpy as np
import torch
pyredner.set_use_gpu(torch.cuda.is_available())
scene = pyredner.load_mitsuba('scenes/bunny_box.xml')
scene.shapes[-1].vertices += torch.tensor([0, 0.01, 0], device = pyredner.get_device())
args=pyredner.RenderFunction.serialize_scene(\
scene = scene,
num_samples = 4,
max_bounces = 6)
render = pyredner.RenderFunction.apply
# Render our target. The first argument is the seed for RNG in the renderer.
img = render(0, *args)
pyredner.imwrite(img.cpu(), 'results/test_bunny_box/target.exr')
pyredner.imwrite(img.cpu(), 'results/test_bunny_box/target.png')
target = pyredner.imread('results/test_bunny_box/target.exr')
if pyredner.get_use_gpu():
target = target.cuda(device = pyredner.get_device())
bunny_vertices = scene.shapes[-1].vertices.clone()
bunny_translation = torch.tensor([0.1,0.4,0.1], device = pyredner.get_device(), requires_grad=True)
bunny_rotation = torch.tensor([-0.2,0.1,-0.1], device = pyredner.get_device(), requires_grad=True)
bunny_rotation_matrix = pyredner.gen_rotate_matrix(bunny_rotation)
scene.shapes[-1].vertices = \
(bunny_vertices-torch.mean(bunny_vertices, 0))@torch.t(bunny_rotation_matrix) + \
torch.mean(bunny_vertices, 0) + bunny_translation
args=pyredner.RenderFunction.serialize_scene(\
scene = scene,
num_samples = 4,
max_bounces = 6)
img = render(1, *args)
pyredner.imwrite(img.cpu(), 'results/test_bunny_box/init.exr')
pyredner.imwrite(img.cpu(), 'results/test_bunny_box/init.png')
optimizer = torch.optim.Adam([bunny_translation, bunny_rotation], lr=1e-2)
for t in range(200):
print('iteration:', t)
optimizer.zero_grad()
# Forward pass: render the image
bunny_rotation_matrix = pyredner.gen_rotate_matrix(bunny_rotation)
scene.shapes[-1].vertices = \
(bunny_vertices-torch.mean(bunny_vertices, 0))@torch.t(bunny_rotation_matrix) + \
torch.mean(bunny_vertices, 0) + bunny_translation
args=pyredner.RenderFunction.serialize_scene(\
scene = scene,
num_samples = 4,
max_bounces = 6)
img = render(t+1, *args)
pyredner.imwrite(img.cpu(), 'results/test_bunny_box/iter_{}.png'.format(t))
dirac = np.zeros([7,7], dtype=np.float32)
dirac[3,3] = 1.0
dirac = torch.from_numpy(dirac)
f = np.zeros([3, 3, 7, 7], dtype=np.float32)
gf = scipy.ndimage.filters.gaussian_filter(dirac, 1.0)
f[0, 0, :, :] = gf
f[1, 1, :, :] = gf
f[2, 2, :, :] = gf
f = torch.from_numpy(f)
if pyredner.get_use_gpu():
f = f.cuda(device = pyredner.get_device())
m = torch.nn.AvgPool2d(2)
res = 256
diff_0 = (img - target).view(1, res, res, 3).permute(0, 3, 2, 1)
diff_1 = m(torch.nn.functional.conv2d(diff_0, f, padding=3)) # 128 x 128
diff_2 = m(torch.nn.functional.conv2d(diff_1, f, padding=3)) # 64 x 64
diff_3 = m(torch.nn.functional.conv2d(diff_2, f, padding=3)) # 32 x 32
diff_4 = m(torch.nn.functional.conv2d(diff_3, f, padding=3)) # 16 x 16
loss = diff_0.pow(2).sum() / (res*res) + \
diff_1.pow(2).sum() / ((res/2)*(res/2)) + \
diff_2.pow(2).sum() / ((res/4)*(res/4)) + \
diff_3.pow(2).sum() / ((res/8)*(res/8)) + \
diff_4.pow(2).sum() / ((res/16)*(res/16))
print('loss:', loss.item())
loss.backward()
print('bunny_translation.grad:', bunny_translation.grad)
print('bunny_rotation.grad:', bunny_rotation.grad)
optimizer.step()
print('bunny_translation:', bunny_translation)
print('bunny_rotation:', bunny_rotation)
from subprocess import call
call(["ffmpeg", "-framerate", "24", "-i",
"results/bunny_box/iter_%d.png", "-vb", "20M",
"results/bunny_box/out.mp4"])
|
the-stack_106_18382
|
#! -*- coding: utf-8 -*-
# 自定义层
import tensorflow as tf
from bert4keras.backend import keras, K, is_string
from bert4keras.backend import get_all_attributes
# 等价于 from keras.layers import *
locals().update(get_all_attributes(keras.layers))
initializers = keras.initializers
activations = keras.activations
def sequence_masking(x, mask, mode=0, axis=None, heads=1):
"""为序列条件mask的函数
mask: 形如(batch_size, seq_len)的0-1矩阵;
mode: 如果是0,则直接乘以mask;
如果是1,则在padding部分减去一个大正数。
axis: 序列所在轴,默认为1;
heads: 相当于batch这一维要被重复的次数。
"""
if mask is None or mode not in [0, 1]:
return x
else:
if heads is not 1:
mask = K.expand_dims(mask, 1)
mask = K.tile(mask, (1, heads, 1))
mask = K.reshape(mask, (-1, K.shape(mask)[2]))
if axis is None:
axis = 1
if axis == -1:
axis = K.ndim(x) - 1
assert axis > 0, 'axis muse be greater than 0'
for _ in range(axis - 1):
mask = K.expand_dims(mask, 1)
for _ in range(K.ndim(x) - K.ndim(mask) - axis + 1):
mask = K.expand_dims(mask, K.ndim(mask))
if mode == 0:
return x * mask
else:
return x - (1 - mask) * 1e12
class MultiHeadAttention(Layer):
"""多头注意力机制
"""
def __init__(self,
heads,
head_size,
key_size=None,
kernel_initializer='glorot_uniform',
**kwargs):
super(MultiHeadAttention, self).__init__(**kwargs)
self.heads = heads
self.head_size = head_size
self.out_dim = heads * head_size
self.key_size = key_size if key_size else head_size
self.kernel_initializer = initializers.get(kernel_initializer)
def build(self, input_shape):
super(MultiHeadAttention, self).build(input_shape)
self.q_dense = Dense(units=self.key_size * self.heads,
kernel_initializer=self.kernel_initializer)
self.k_dense = Dense(units=self.key_size * self.heads,
kernel_initializer=self.kernel_initializer)
self.v_dense = Dense(units=self.out_dim,
kernel_initializer=self.kernel_initializer)
self.o_dense = Dense(units=self.out_dim,
kernel_initializer=self.kernel_initializer)
def call(self, inputs, q_mask=False, v_mask=False, a_mask=False):
"""实现多头注意力
q_mask: 对输入的query序列的mask。
主要是将输出结果的padding部分置0。
v_mask: 对输入的value序列的mask。
主要是防止attention读取到padding信息。
a_mask: 对attention矩阵的mask。
不同的attention mask对应不同的应用。
"""
q, k, v = inputs[:3]
# 处理mask
idx = 3
if q_mask:
q_mask = inputs[idx]
idx += 1
else:
q_mask = None
if v_mask:
v_mask = inputs[idx]
idx += 1
else:
v_mask = None
if a_mask:
if len(inputs) > idx:
a_mask = inputs[idx]
else:
a_mask = 'history_only'
else:
a_mask = None
# 线性变换
qw = self.q_dense(q)
kw = self.k_dense(k)
vw = self.v_dense(v)
# 形状变换
qw = K.reshape(qw, (-1, K.shape(q)[1], self.heads, self.key_size))
kw = K.reshape(kw, (-1, K.shape(k)[1], self.heads, self.key_size))
vw = K.reshape(vw, (-1, K.shape(v)[1], self.heads, self.head_size))
# 维度置换
qw = K.permute_dimensions(qw, (0, 2, 1, 3))
kw = K.permute_dimensions(kw, (0, 2, 1, 3))
vw = K.permute_dimensions(vw, (0, 2, 1, 3))
# 转为三阶张量
qw = K.reshape(qw, (-1, K.shape(q)[1], self.key_size))
kw = K.reshape(kw, (-1, K.shape(k)[1], self.key_size))
vw = K.reshape(vw, (-1, K.shape(v)[1], self.head_size))
# Attention
a = K.batch_dot(qw, kw, [2, 2]) / self.key_size**0.5
a = sequence_masking(a, v_mask, 1, -1, self.heads)
if a_mask is not None:
if is_string(a_mask) and a_mask == 'history_only':
ones = K.ones_like(a[:1])
a_mask = (ones - tf.linalg.band_part(ones, -1, 0)) * 1e12
a = a - a_mask
else:
a = a - (1 - a_mask) * 1e12
a = K.softmax(a)
# 完成输出
o = K.batch_dot(a, vw, [2, 1])
o = K.reshape(o, (-1, self.heads, K.shape(q)[1], self.head_size))
o = K.permute_dimensions(o, (0, 2, 1, 3))
o = K.reshape(o, (-1, K.shape(o)[1], self.out_dim))
o = self.o_dense(o)
o = sequence_masking(o, q_mask, 0)
return o
def compute_output_shape(self, input_shape):
return (input_shape[0][0], input_shape[0][1], self.out_dim)
def get_config(self):
config = {
'heads': self.heads,
'head_size': self.head_size,
'key_size': self.key_size,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
}
base_config = super(MultiHeadAttention, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class LayerNormalization(Layer):
"""实现基本的Layer Norm,只保留核心运算部分
"""
def __init__(self, **kwargs):
super(LayerNormalization, self).__init__(**kwargs)
self.epsilon = K.epsilon() * K.epsilon()
def build(self, input_shape):
super(LayerNormalization, self).build(input_shape)
shape = (input_shape[-1], )
self.gamma = self.add_weight(shape=shape,
initializer='ones',
name='gamma')
self.beta = self.add_weight(shape=shape,
initializer='zeros',
name='beta')
def call(self, inputs):
mean = K.mean(inputs, axis=-1, keepdims=True)
variance = K.mean(K.square(inputs - mean), axis=-1, keepdims=True)
std = K.sqrt(variance + self.epsilon)
outputs = (inputs - mean) / std
outputs *= self.gamma
outputs += self.beta
return outputs
class PositionEmbedding(Layer):
"""定义位置Embedding,这里的Embedding是可训练的。
"""
def __init__(self,
input_dim,
output_dim,
merge_mode='add',
embeddings_initializer='zeros',
**kwargs):
super(PositionEmbedding, self).__init__(**kwargs)
self.input_dim = input_dim
self.output_dim = output_dim
self.merge_mode = merge_mode
self.embeddings_initializer = initializers.get(embeddings_initializer)
def build(self, input_shape):
super(PositionEmbedding, self).build(input_shape)
self.embeddings = self.add_weight(
name='embeddings',
shape=(self.input_dim, self.output_dim),
initializer=self.embeddings_initializer,
)
def call(self, inputs):
input_shape = K.shape(inputs)
batch_size, seq_len = input_shape[0], input_shape[1]
pos_embeddings = self.embeddings[:seq_len]
pos_embeddings = K.expand_dims(pos_embeddings, 0)
pos_embeddings = K.tile(pos_embeddings, [batch_size, 1, 1])
if self.merge_mode == 'add':
return inputs + pos_embeddings
else:
return K.concatenate([inputs, pos_embeddings])
def compute_output_shape(self, input_shape):
if self.merge_mode == 'add':
return input_shape
else:
return input_shape[:2] + (input_shape[2] + self.v_dim, )
def get_config(self):
config = {
'input_dim': self.input_dim,
'output_dim': self.output_dim,
'merge_mode': self.merge_mode,
'embeddings_initializer': initializers.serialize(self.embeddings_initializer),
}
base_config = super(PositionEmbedding, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class FeedForward(Layer):
"""FeedForward层,其实就是两个Dense层的叠加
"""
def __init__(self,
units,
activation='relu',
kernel_initializer='glorot_uniform',
**kwargs):
super(FeedForward, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.kernel_initializer = initializers.get(kernel_initializer)
def build(self, input_shape):
super(FeedForward, self).build(input_shape)
output_dim = input_shape[-1]
self.dense_1 = Dense(units=self.units,
activation=self.activation,
kernel_initializer=self.kernel_initializer)
self.dense_2 = Dense(units=output_dim,
kernel_initializer=self.kernel_initializer)
def call(self, inputs):
x = self.dense_1(inputs)
x = self.dense_2(x)
return x
def get_config(self):
config = {
'units': self.units,
'activation': activations.serialize(self.activation),
'kernel_initializer': initializers.serialize(self.kernel_initializer),
}
base_config = super(FeedForward, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class EmbeddingDense(Layer):
"""运算跟Dense一致,但kernel用Embedding层的embeddings矩阵。
根据Embedding层的名字来搜索定位Embedding层。
"""
def __init__(self, embedding_name, activation='softmax', **kwargs):
super(EmbeddingDense, self).__init__(**kwargs)
self.embedding_name = embedding_name
self.activation = activations.get(activation)
def call(self, inputs):
if not hasattr(self, 'kernel'):
embedding_layer = inputs._keras_history[0]
if embedding_layer.name != self.embedding_name:
def recursive_search(layer):
"""递归向上搜索,根据名字找Embedding层
"""
last_layer = layer._inbound_nodes[0].inbound_layers
if isinstance(last_layer, list):
if len(last_layer) == 0:
return None
else:
last_layer = last_layer[0]
if last_layer.name == self.embedding_name:
return last_layer
else:
return recursive_search(last_layer)
embedding_layer = recursive_search(embedding_layer)
if embedding_layer is None:
raise Exception('Embedding layer not found')
self.kernel = K.transpose(embedding_layer.embeddings)
self.units = K.int_shape(self.kernel)[1]
self.bias = self.add_weight(name='bias',
shape=(self.units, ),
initializer='zeros')
outputs = K.dot(inputs, self.kernel)
outputs = K.bias_add(outputs, self.bias)
outputs = self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
return input_shape[:-1] + (self.units, )
def get_config(self):
config = {
'embedding_name': self.embedding_name,
'activation': activations.serialize(self.activation),
}
base_config = super(EmbeddingDense, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
custom_objects = {
'MultiHeadAttention': MultiHeadAttention,
'LayerNormalization': LayerNormalization,
'PositionEmbedding': PositionEmbedding,
'FeedForward': FeedForward,
'EmbeddingDense': EmbeddingDense
}
keras.utils.get_custom_objects().update(custom_objects)
|
the-stack_106_18383
|
from tensorflow.keras import backend
def get_center_crop_location(source, destination):
"""
Returns the center crop area of source which matches
with the destination size
Returns:
((top_crop, bottom_crop), (left_crop, right_crop))
"""
src_shape = backend.int_shape(source)
dst_shape = backend.int_shape(destination)
# Height to identify top and bottom crop
src_height = src_shape[1]
dst_height = dst_shape[1]
crop_height = src_height - dst_height
assert crop_height >= 0
half_height = int(crop_height / 2)
if (crop_height % 2) != 0:
# uneven cropping
crop_top, crop_bottom = half_height, half_height + 1
else:
# even cropping
crop_top, crop_bottom = half_height, half_height
# Width to identify left and right crop
src_width = src_shape[2]
dst_width = dst_shape[2]
crop_width = src_width - dst_width
assert crop_width >= 0
half_width = int(crop_width/2)
if (crop_width % 2) != 0:
# uneven cropping
crop_left, crop_right = half_width, half_width + 1
else:
# even cropping
crop_left, crop_right = half_width, half_width
# Return the final cropping rectangle
return (crop_top, crop_bottom), (crop_left, crop_right)
|
the-stack_106_18386
|
# -*- coding: utf8 -*-
from __future__ import unicode_literals, division, absolute_import
from __builtin__ import object
import logging
import mock
import pytest
from flexget.plugins.api_t411 import T411RestClient, T411ObjectMapper, T411Proxy, FriendlySearchQuery, ApiError
from flexget.utils.qualities import Requirements
log = logging.getLogger('test_t411')
class MockRestClient(object):
search_result = {
"query": "Mickey",
"total": 1,
"offset": 0,
"limit": 10,
"torrents": [{
"id": 123123,
"name": "Mickey vs Donald S01E01 1080p dd5.1 10bit",
"category": "14",
"seeders": "11",
"leechers": "2",
"comments": "8",
"isVerified": "1",
"added": "2013-01-15 16:14:14",
"size": "2670809119",
"times_completed": "1256",
"owner": "7589510",
"categoryname": "Animation",
"categoryimage": "t411-animation.png",
"username": "MegaUsername",
"privacy": "normal"
}]
}
cat_result = {
"12": { # Category ID index
"id": "12", # Category ID
"pid": "0", # Parent's catogory ID
"name": "video",
"cats": { # Subcategories
"13": {"id": "13", "pid": "12", "name": "films"},
"14": {"id": "14", "pid": "12", "name": "cartoons"}
}
}
}
term_result = {
"14": { # Category ID
"11": { # Term type ID
"type": "Application - Genre", # Term type definition
"mode": "single",
"terms": { # Terms of the term type
"123": "Antivirus",
"345": "Torrent clients"
}
}, "7": {
"type": "Vidéo - Qualité",
"mode": "single",
"terms": {"12": "TVripHD 720 [Rip HD depuis Source HD]"}
}
}
}
details_result = {
"id": 123123,
"name": "Mock Title 720p",
"category": 14,
"terms": {
"Application - Genre": "Antivirus",
"Vidéo - Qualité": "TVripHD 720 [Rip HD depuis Source HD]"
}
}
def __init__(self):
self.details_called = False
self.api_token = "LOL:CAT:TOKEN"
def auth(self):
return
def is_authenticated(self):
return True
def retrieve_category_tree(self):
return MockRestClient.cat_result
def retrieve_terms_tree(self):
return MockRestClient.term_result
def search(self, query):
self.last_query = query
return MockRestClient.search_result
def details(self, torrent_id):
assert torrent_id == 123123
self.details_called = True
return MockRestClient.details_result
class TestRestClient(object):
credentials = {'username': 'set', 'password': 'this'}
api_token = 'you must set this value for online test'
def build_unauthenticated_client(self):
client = T411RestClient()
client.credentials = TestRestClient.credentials
del client.web_session.headers['Accept-Encoding']
return client
def build_authenticated_client(self):
client = T411RestClient()
client.set_api_token(TestRestClient.api_token)
del client.web_session.headers['Accept-Encoding']
return client
def test_init_state(self):
client = self.build_unauthenticated_client()
assert not client.is_authenticated()
@pytest.mark.online
def test_auth(self):
client = self.build_unauthenticated_client()
client.auth()
assert client.is_authenticated(), 'Client is not authenticated (are you using mocked credentials online?)'
@pytest.mark.online
def test_retrieve_categories(self):
client = self.build_authenticated_client()
json_tree_categories = client.retrieve_category_tree()
json_category = json_tree_categories.get('210')
assert json_category is not None, 'Category with id 210 wasn\'t found'
assert json_category.get('id') == '210'
assert json_category.get('pid') == '0'
assert json_category.get('name') == u'Film/Vidéo'
json_sub_categories = json_category.get('cats')
assert json_sub_categories is not None, 'Cannot found excepted subcategories'
json_sub_category = json_sub_categories.get('631')
assert json_sub_category is not None
assert json_sub_category.get('name') == 'Film'
@pytest.mark.online
def test_retrieve_terms(self):
client = self.build_authenticated_client()
json_terms = client.retrieve_terms_tree()
assert json_terms is not None
assert json_terms.get('234') is not None
term_type = json_terms.get('234').get('11')
assert term_type is not None
assert term_type.get('type') == 'Application - Genre'
assert term_type.get('mode') == 'single'
@pytest.mark.online
def test_malformed_search_response(self):
"""
Search without expression produces server response
that contains some error messages. This test check
if this case is properly handled
:return:
"""
client = self.build_authenticated_client()
search_result = client.search({})
assert search_result.get('query') is None
assert search_result.get('limit') == 10
@pytest.mark.online
def test_error_message_handler(self):
exception_was_raised = False
client = T411RestClient()
client.set_api_token('LEAVE:THIS:TOKEN:FALSE')
del client.web_session.headers['Accept-Encoding']
try:
client.details(666666)
except ApiError as e:
exception_was_raised = True
assert e.code == 202
pass
assert exception_was_raised
class TestObjectMapper(object):
def test_map_category(self):
category = T411ObjectMapper().map_category({
u'pid': u'0',
u'id': u'210',
u'name': u'Film/Vidéo',
u'cats': {
u'631': {u'pid': u'210', u'id': u'631', u'name': u'Film'},
u'633': {u'pid': u'210', u'id': u'633', u'name': u'Concert'},
u'634': {u'pid': u'210', u'id': u'634', u'name': u'Documentaire'},
u'635': {u'pid': u'210', u'id': u'635', u'name': u'Spectacle'},
u'636': {u'pid': u'210', u'id': u'636', u'name': u'Sport'},
u'637': {u'pid': u'210', u'id': u'637', u'name': u'Animation Série'},
u'639': {u'pid': u'210', u'id': u'639', u'name': u'Emission TV'},
u'455': {u'pid': u'210', u'id': u'455', u'name': u'Animation'},
u'402': {u'pid': u'210', u'id': u'402', u'name': u'Vidéo-clips'},
u'433': {u'pid': u'210', u'id': u'433', u'name': u'Série TV'}
}
})
assert category.id == 210
assert category.parent_id is None
assert category.name == u'Film/Vidéo'
assert len(category.sub_categories) == 10
def test_map_term_type_tree(self):
tree = {
"234": {
"11": {
"type": "Application - Genre",
"mode": "single",
"terms": {
"158": "Edition multim\u00e9dia",
"126": "Administration",
"190": "Utilitaire",
"169": "Lecteur multim\u00e9dia",
"137": "Aspiration de site",
"180": "Registre",
"148": "Communaut\u00e9"
}
},
"43": {
"type": "Langue",
"mode": "single",
"terms": {
"729": "Fran\u00e7ais",
"730": "Anglais",
"731": "Multi (Fran\u00e7ais inclus)",
"830": "Japonais"
}
}
}
}
category_to_term_type, term_types = T411ObjectMapper().map_term_type_tree(tree)
assert (234, 11) in category_to_term_type
assert (234, 43) in category_to_term_type
assert term_types.has_key(11)
assert term_types.has_key(43)
assert term_types.get(11).mode == 'single'
assert term_types.get(11).name == 'Application - Genre', \
'Expected "Application - Genre", found "%s"' % term_types.get(11).name
assert len(term_types.get(11).terms) == 7
assert term_types.get(11).terms[0].name == "Edition multimédia"
class TestProxy(object):
def test_offline_proxy(self):
proxy = T411Proxy()
proxy.rest_client = MockRestClient()
assert not proxy.has_cached_criterias()
proxy.synchronize_database()
assert proxy.has_cached_criterias()
assert 'cartoons' in proxy.all_category_names()
query = FriendlySearchQuery()
query.expression = "Mickey"
query.category_name = "cartoons"
query.term_names.append("Antivirus")
assert proxy.search(query)[0]['t411_torrent_id'] == 123123
assert (11,123) in proxy.rest_client.last_query['terms']
assert proxy.rest_client.last_query['category_id'] == 14
assert proxy.rest_client.last_query['expression'] == 'Mickey'
def test_details(self):
proxy = T411Proxy()
proxy.rest_client = MockRestClient()
details = proxy.details(123123)
assert proxy.rest_client.details_called == True
assert details.name == "Mock Title 720p"
assert details.terms[0].id == 12
# Session not still bound! assert details.terms[0].type.id == 7
proxy.rest_client.details_called = False
proxy.details(123123)
assert proxy.rest_client.details_called == False, 'Proxy not used the cache'
class TestInputPlugin(object):
config = """
tasks:
uncached_db:
series:
- Mickey vs Donald
t411:
category: cartoons
terms:
- Antivirus
t411_lookup: fill
"""
@mock.patch('flexget.plugins.api_t411.T411Proxy.set_credential')
@mock.patch('flexget.plugins.api_t411.T411RestClient.search')
@mock.patch('flexget.plugins.api_t411.T411RestClient.retrieve_terms_tree')
@mock.patch('flexget.plugins.api_t411.T411RestClient.retrieve_category_tree')
@mock.patch('flexget.plugins.api_t411.T411RestClient.details')
def test_schema(self, mock_details, mock_cat, mock_term, mock_search, mock_auth, execute_task):
mock_details.return_value = MockRestClient.details_result
mock_cat.return_value = MockRestClient.cat_result
mock_term.return_value = MockRestClient.term_result
mock_search.return_value = MockRestClient.search_result
mock_auth.return_value = None
task = execute_task('uncached_db')
log.debug(task.all_entries)
assert len(task.all_entries) == 1
entry = task.all_entries[0]
quality = entry.get('quality')
assert quality is not None
log.debug(quality)
quality_tester = Requirements('1080p hdtv 10bit dd5.1')
assert quality_tester.allows(quality)
|
the-stack_106_18388
|
import os
import base64
import logging
import binascii
import datetime
import traceback
import urllib.request
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.x509.oid import ExtensionOID, NameOID, AuthorityInformationAccessOID
from .certstore import CertStore
from .crl import CRL
from .errors import InvalidCertificateDataTypeException, InvalidCertificateException, InvalidCRLException
class Cert(object):
def __init__(self, cert_data, data_type, cert_store=CertStore()):
self._cert = self._cert_load(cert_data, data_type)
self._cert_store = cert_store
self._subject = self._cert_subject()
self._issuer = self._cert_issuer()
self._fingerprint = self._cert_fingerprint()
self._pub_key = self._cert_pub_key()
self._subject_key_id = self._cert_subject_key_identifier()
self._authority_key_id = self._cert_authority_key_identifier()
self._root = self._cert_is_root()
self._aia = self._cert_aia()
self._ocsp = self._cert_ocsp()
self._crl = self._cert_crl()
self._revoked = self._cert_is_revoked()
def _cert_load(self, cert_data, data_type):
if data_type not in ('PEM', 'DER'):
raise InvalidCertificateDataTypeException('certificate type %s unknown' % data_type)
try:
if data_type == "PEM":
return x509.load_pem_x509_certificate(cert_data, default_backend())
elif data_type == "DER":
return x509.load_der_x509_certificate(cert_data, default_backend())
except Exception as e:
raise InvalidCertificateException('certificate cannot be loaded: %s' % str(e)) from e
def _cert_subject(self):
return ",".join([x.value for x in self._cert.subject.get_attributes_for_oid(NameOID.COMMON_NAME)])
def _cert_issuer(self):
return ",".join([x.value for x in self._cert.issuer.get_attributes_for_oid(NameOID.COMMON_NAME)])
def _cert_is_root(self):
if len(self._cert.subject) != len(self._cert.issuer):
return False
else:
for sub_attr in self._cert.subject:
iss_attrs = self._cert.issuer.get_attributes_for_oid(sub_attr.oid)
if not sub_attr.value in [iss_attr.value for iss_attr in iss_attrs]:
return False
return True
def _cert_pub_key(self):
return self._cert.public_key()
def _cert_fingerprint(self):
return binascii.hexlify(self._cert.fingerprint(hashes.SHA256())).decode('utf-8')
def __str__(self):
return "subject=%s fingerprint=%s" % (self._subject, self._fingerprint)
def _cert_subject_key_identifier(self):
ski = None
try:
ski_ext = self._cert.extensions.get_extension_for_oid(ExtensionOID.SUBJECT_KEY_IDENTIFIER)
ski = binascii.hexlify(ski_ext.value.digest).decode('utf-8')
except x509.ExtensionNotFound:
logging.debug("Subject Key Identifier extension not found for cert: %s" % self)
return ski
def _cert_authority_key_identifier(self):
aki = None
try:
aki_ext = self._cert.extensions.get_extension_for_oid(ExtensionOID.AUTHORITY_KEY_IDENTIFIER)
aki = binascii.hexlify(aki_ext.value.key_identifier).decode('utf-8')
except x509.ExtensionNotFound:
logging.debug("Authority Key Identifier extension not found for cert: %s" % self)
return aki
def _cert_aia(self):
aia = None
if self._cert and not self._root:
aia_ext = self._cert.extensions.get_extension_for_oid(ExtensionOID.AUTHORITY_INFORMATION_ACCESS)
for ad in aia_ext.value:
if ad.access_method == AuthorityInformationAccessOID.CA_ISSUERS:
aia_uri = ad.access_location.value
aia_data = None
try:
with urllib.request.urlopen(aia_uri) as response:
aia_data = response.read()
if aia_data:
aia = MyCert(aia_data, "DER", self._cert_store)
break
except Exception as e:
logging.debug("%s AIA uri %s download error: %s" % (self.__str__(), aia_uri, e))
if aia == None:
aia = self._cert_store.get_cert(self._issuer)
return aia
def _cert_ocsp(self):
ocsp = None
if self._cert and not self._root:
ocsp_ext = self._cert.extensions.get_extension_for_oid(ExtensionOID.AUTHORITY_INFORMATION_ACCESS)
for ad in ocsp_ext.value:
if ad.access_method == AuthorityInformationAccessOID.OCSP:
ocsp_uri = ad.access_location.value
ocsp_data = None
try:
ocsp_req_builder = x509.ocsp.OCSPRequestBuilder()
ocsp_req_builder = ocsp_req_builder.add_certificate(self._cert, self._aia.raw_cert(), hashes.SHA1())
ocsp_req = ocsp_req_builder.build()
ocsp_req = base64.b64encode(ocsp_req.public_bytes(serialization.Encoding.DER)).decode('utf-8')
ocsp_uri = ocsp_uri + '/' + ocsp_req
with urllib.request.urlopen(ocsp_uri) as response:
ocsp_data = response.read()
if ocsp_data:
ocsp = x509.ocsp.load_der_ocsp_response(ocsp_data)
break
except Exception as e:
logging.debug("%s OCSP uri %s error: %s" % (self.__str__(), ocsp_uri, e))
#traceback.print_exc()
return ocsp
def _cert_crl(self):
crl = None
if self._cert and not self._root:
crl_ext = self._cert.extensions.get_extension_for_oid(ExtensionOID.CRL_DISTRIBUTION_POINTS)
for dp in crl_ext.value:
if crl:
continue
for crl_uri in dp.full_name:
crl_uri = crl_uri.value
crl_data = None
if crl_uri.endswith('.crl'):
try:
with urllib.request.urlopen(crl_uri) as response:
crl_data = response.read()
if crl_data:
crl = MyCRL(crl_data, "DER")
break
except Exception as e:
logging.debug("%s CRL uri %s error: %s" %(self.__str__(), crl_uri, e))
#traceback.print_exc()
return crl
def is_expired(self):
if self._cert:
d = datetime.datetime.now()
return not ((d > self._cert.not_valid_before) and (d < self._cert.not_valid_after))
return None
def cert_pub_key(self):
return self._pub_key
def _cert_is_revoked(self):
if self._cert:
if self._root:
return False
if self._ocsp and self._ocsp.response_status == x509.ocsp.OCSPResponseStatus.SUCCESSFUL:
# Add OCSP response signature verification and time check
if ocsp.certificate_status == x509.ocsp.OCSPCertStatus.REVOKED:
return True
elif ocsp.certificate_status == x509.ocsp.OCSPCertStatus.UNKNOWN:
return False
elif ocsp.certificate_status == x509.ocsp.OCSPCertStatus.GOOD:
return False
if self._crl:
if self._aia:
if self._crl.is_expired():
raise InvalidCRLException("%s CRL has expired" % (self.__str__()))
if self._crl.is_valid(self._aia.cert_pub_key()):
return self._crl.is_revoked(self._cert.serial_number)
else:
raise InvalidCRLException("%s CRL cannot be verified with cert %s" % (self.__str__(), self.aia.__str__()))
return None
def is_root(self):
return self._root;
def is_revoked(self):
return self._revoked;
def ocsp_revoked(self):
if self._cert:
if self._ocsp:
return self._ocsp.certificate_status == x509.ocsp.OCSPCertStatus.REVOKED
return None
def cert_is_valid(self):
valid = True
if self._cert:
c = self
while not c == None and valid:
logging.debug("Checking cert: %s" % c)
logging.debug("\troot: %s" % c.is_root())
logging.debug("\texpired: %s" % c.is_expired())
logging.debug("\trevoked: %s" % c.is_revoked())
logging.debug("\tocsp response: %s" % c.ocsp_revoked())
if c.is_expired():
valid = False
if not c.is_root() and c.is_revoked() == True:
valid = False
if not c.is_root() and c._aia == None:
valid = False
c = c._aia
else:
valid = False
return valid
def raw_cert(self):
return self._cert
def cert_subject(self):
return self._subject
def cert_fingerprint(self):
return self._fingerprint
|
the-stack_106_18389
|
from __future__ import absolute_import, division, print_function
try:
import boost_adaptbx.boost.python as bp
except Exception:
ext = None
else:
ext = bp.import_ext("fable_ext", optional=True)
from six.moves import range
# compare with fem/utils/string.hpp
def py_fem_utils_unsigned_integer_scan(code, start=0, stop=-1):
i = start
while (i < stop):
c = code[i]
if (not c.isdigit()): break
i += 1
if (i == start): return -1
return i
# compare with ext.cpp
def py_ext_get_code_stop(code, stop):
len_code = len(code)
if (stop < 0): return len_code
assert stop <= len_code
return stop
# compare with ext.cpp
def py_unsigned_integer_scan(code, start=0, stop=-1):
return py_fem_utils_unsigned_integer_scan(
code=code, start=start, stop=py_ext_get_code_stop(code, stop))
# compare with ext.cpp
def py_floating_point_scan_after_exponent_char(code, start=0, stop=-1):
code_stop = py_ext_get_code_stop(code=code, stop=stop)
i = start
if (i < code_stop):
c = code[i]
if (c == '+' or c == '-'):
i += 1
return py_unsigned_integer_scan(code=code, start=i, stop=stop)
return -1
# compare with ext.cpp
def py_floating_point_scan_after_dot(code, start=0, stop=-1):
code_stop = py_ext_get_code_stop(code=code, stop=stop)
i = py_unsigned_integer_scan(code=code, start=start, stop=stop)
if (i < 0): i = start
if (i < code_stop):
c = code[i]
if (c == 'e' or c == 'd'):
return py_floating_point_scan_after_exponent_char(
code=code, start=i+1, stop=stop)
return i
# compare with ext.cpp
def py_identifier_scan(code, start=0, stop=-1):
code_stop = py_ext_get_code_stop(code=code, stop=stop)
i = start
if (i < code_stop):
c = code[i]; i += 1
if ((c < 'a' or c > 'z') and c != '_'): return -1
while (i < code_stop):
c = code[i]; i += 1
if ( (c < 'a' or c > 'z')
and (c < '0' or c > '9') and c != '_'): return i-1
return i
return -1
def py_find_closing_parenthesis(code, start=0, stop=-1):
code_stop = py_ext_get_code_stop(code=code, stop=stop)
n_inner = 0
for i in range(start, code_stop):
c = code[i]
if (c == ')'):
if (n_inner == 0): return i
n_inner -= 1
elif (c == '('):
n_inner += 1
return -1
if (ext is not None):
from fable_ext import *
else:
unsigned_integer_scan = py_unsigned_integer_scan
floating_point_scan_after_exponent_char = \
py_floating_point_scan_after_exponent_char
floating_point_scan_after_dot = py_floating_point_scan_after_dot
identifier_scan = py_identifier_scan
find_closing_parenthesis = py_find_closing_parenthesis
class SemanticError(Exception): pass
|
the-stack_106_18392
|
from spotify_api import API
token = open('token.txt', 'r').read()
api = API(token)
targetUser = api.get_user(input("Enter User ID: "))
playlists = targetUser.get_playlists()
print(f'Playlist Analysis for "{targetUser.name}":\n')
tot = []
for playlist in playlists:
exp = []
for track in playlist.get_tracks():
exp.append(track.explicit)
track.analyze()
print(track.tempo)
print(f'Playlist: {playlist.name}\n{exp.count(True)}/{len(exp)} ({exp.count(True)/len(exp)*100:.2f}%) Explicit!\n')
tot.extend(exp)
print(f'User "{targetUser.name}" Total:\n{tot.count(True)}/{len(tot)} ({tot.count(True)/len(tot)*100:.2f}%) Explicit!')
|
the-stack_106_18393
|
import math
import numpy as np
import scipy.interpolate
# motion parameter
L = 1.0 # wheel base
ds = 0.1 # course distanse
v = 1 # (5.4 / 3.6) # velocity [m/s] | [km/h]/3.6 = [m/s]
# MoCap room
# [x_min, x_max, y_min, y_max]
# play_area=[-2.2, 2.3, -1.97, 1.58]
class State:
def __init__(self, x=0.0, y=0.0, yaw=0.0, v=0.0):
self.x = x
self.y = y
self.yaw = yaw
self.v = v
def pi_2_pi(angle):
return (angle + math.pi) % (2 * math.pi) - math.pi
def update(state, v, delta, dt, L):
state.v = v
state.x = state.x + state.v * math.cos(state.yaw) * dt
state.y = state.y + state.v * math.sin(state.yaw) * dt
state.yaw = state.yaw + state.v / L * math.tan(delta) * dt
state.yaw = pi_2_pi(state.yaw)
# in_degree = str(np.rad2deg(state.yaw))[1:5]
# print(f"\n--> Yaw: {in_degree}°")
return state
def generate_trajectory(s, km, kf, k0):
n = s / ds
time = s / v # [s]
if isinstance(time, type(np.array([]))):
time = time[0]
if isinstance(km, type(np.array([]))):
km = km[0]
if isinstance(kf, type(np.array([]))):
kf = kf[0]
tk = np.array([0.0, time / 2.0, time])
kk = np.array([k0, km, kf])
t = np.arange(0.0, time, time / n)
fkp = scipy.interpolate.interp1d(tk, kk, kind="quadratic")
kp = [fkp(ti) for ti in t]
dt = float(time / n)
# plt.plot(t, kp)
# plt.show()
state = State()
x, y, yaw = [state.x], [state.y], [state.yaw]
for ikp in kp:
state = update(state, v, ikp, dt, L)
x.append(state.x)
y.append(state.y)
yaw.append(state.yaw)
return x, y, yaw, dt
def generate_last_state(s, km, kf, k0):
n = s / ds
time = s / v # [s]
if isinstance(time, type(np.array([]))):
time = time[0]
if isinstance(km, type(np.array([]))):
km = km[0]
if isinstance(kf, type(np.array([]))):
kf = kf[0]
tk = np.array([0.0, time / 2.0, time])
kk = np.array([k0, km, kf])
t = np.arange(0.0, time, time / n)
fkp = scipy.interpolate.interp1d(tk, kk, kind="quadratic")
kp = [fkp(ti) for ti in t]
dt = time / n
# plt.plot(t, kp)
# plt.show()
state = State()
_ = [update(state, v, ikp, dt, L) for ikp in kp]
return state.x, state.y, state.yaw
|
the-stack_106_18396
|
# -*- coding: utf-8 -*-
#
# Copyright © 2011-2013 Pierre Raybaut
# Licensed under the terms of the MIT License
# (see spyderlib/__init__.py for details)
"""
Spyder base configuration management
As opposed to spyderlib/config.py, this configuration script deals
exclusively with non-GUI features configuration only
(in other words, we won't import any PyQt object here, avoiding any
sip API incompatibility issue in spyderlib's non-gui modules)
"""
from __future__ import print_function
import os.path as osp
import os
import sys
# Local imports
from spyderlib import __version__
from spyderlib.utils import encoding
from spyderlib.py3compat import (is_unicode, TEXT_TYPES, INT_TYPES, PY3,
to_text_string, is_text_string)
#==============================================================================
# Only for development
#==============================================================================
# To activate/deactivate certain things for development
# SPYDER_DEV is (and *only* has to be) set in bootstrap.py
DEV = os.environ.get('SPYDER_DEV')
# For testing purposes
# SPYDER_TEST can be set using the --test option of bootstrap.py
TEST = os.environ.get('SPYDER_TEST')
#==============================================================================
# Debug helpers
#==============================================================================
STDOUT = sys.stdout
STDERR = sys.stderr
def _get_debug_env():
debug_env = os.environ.get('SPYDER_DEBUG', '')
if not debug_env.isdigit():
debug_env = bool(debug_env)
return int(debug_env)
DEBUG = _get_debug_env()
def debug_print(*message):
"""Output debug messages to stdout"""
if DEBUG:
ss = STDOUT
print(*message, file=ss)
#==============================================================================
# Configuration paths
#==============================================================================
# Spyder settings dir
if TEST is None:
SUBFOLDER = '.spyder%s' % __version__.split('.')[0]
else:
SUBFOLDER = 'spyder_test'
# We can't have PY2 and PY3 settings in the same dir because:
# 1. This leads to ugly crashes and freezes (e.g. by trying to
# embed a PY2 interpreter in PY3)
# 2. We need to save the list of installed modules (for code
# completion) separately for each version
if PY3:
SUBFOLDER = SUBFOLDER + '-py3'
def get_home_dir():
"""
Return user home directory
"""
try:
# expanduser() returns a raw byte string which needs to be
# decoded with the codec that the OS is using to represent file paths.
path = encoding.to_unicode_from_fs(osp.expanduser('~'))
except:
path = ''
for env_var in ('HOME', 'USERPROFILE', 'TMP'):
if osp.isdir(path):
break
# os.environ.get() returns a raw byte string which needs to be
# decoded with the codec that the OS is using to represent environment
# variables.
path = encoding.to_unicode_from_fs(os.environ.get(env_var, ''))
if path:
return path
else:
raise RuntimeError('Please define environment variable $HOME')
def get_conf_path(filename=None):
"""Return absolute path for configuration file with specified filename"""
if TEST is None:
conf_dir = osp.join(get_home_dir(), SUBFOLDER)
else:
import tempfile
conf_dir = osp.join(tempfile.gettempdir(), SUBFOLDER)
if not osp.isdir(conf_dir):
os.mkdir(conf_dir)
if filename is None:
return conf_dir
else:
return osp.join(conf_dir, filename)
def get_module_path(modname):
"""Return module *modname* base path"""
return osp.abspath(osp.dirname(sys.modules[modname].__file__))
def get_module_data_path(modname, relpath=None, attr_name='DATAPATH'):
"""Return module *modname* data path
Note: relpath is ignored if module has an attribute named *attr_name*
Handles py2exe/cx_Freeze distributions"""
datapath = getattr(sys.modules[modname], attr_name, '')
if datapath:
return datapath
else:
datapath = get_module_path(modname)
parentdir = osp.join(datapath, osp.pardir)
if osp.isfile(parentdir):
# Parent directory is not a directory but the 'library.zip' file:
# this is either a py2exe or a cx_Freeze distribution
datapath = osp.abspath(osp.join(osp.join(parentdir, osp.pardir),
modname))
if relpath is not None:
datapath = osp.abspath(osp.join(datapath, relpath))
return datapath
def get_module_source_path(modname, basename=None):
"""Return module *modname* source path
If *basename* is specified, return *modname.basename* path where
*modname* is a package containing the module *basename*
*basename* is a filename (not a module name), so it must include the
file extension: .py or .pyw
Handles py2exe/cx_Freeze distributions"""
srcpath = get_module_path(modname)
parentdir = osp.join(srcpath, osp.pardir)
if osp.isfile(parentdir):
# Parent directory is not a directory but the 'library.zip' file:
# this is either a py2exe or a cx_Freeze distribution
srcpath = osp.abspath(osp.join(osp.join(parentdir, osp.pardir),
modname))
if basename is not None:
srcpath = osp.abspath(osp.join(srcpath, basename))
return srcpath
def is_py2exe_or_cx_Freeze():
"""Return True if this is a py2exe/cx_Freeze distribution of Spyder"""
return osp.isfile(osp.join(get_module_path('spyderlib'), osp.pardir))
SCIENTIFIC_STARTUP = get_module_source_path('spyderlib',
'scientific_startup.py')
#==============================================================================
# Image path list
#==============================================================================
IMG_PATH = []
def add_image_path(path):
if not osp.isdir(path):
return
global IMG_PATH
IMG_PATH.append(path)
for _root, dirs, _files in os.walk(path):
for dir in dirs:
IMG_PATH.append(osp.join(path, dir))
add_image_path(get_module_data_path('spyderlib', relpath='images'))
from spyderlib.otherplugins import PLUGIN_PATH
if PLUGIN_PATH is not None:
add_image_path(osp.join(PLUGIN_PATH, 'images'))
def get_image_path(name, default="not_found.png"):
"""Return image absolute path"""
for img_path in IMG_PATH:
full_path = osp.join(img_path, name)
if osp.isfile(full_path):
return osp.abspath(full_path)
if default is not None:
return osp.abspath(osp.join(img_path, default))
#==============================================================================
# Translations
#==============================================================================
def get_translation(modname, dirname=None):
"""Return translation callback for module *modname*"""
if dirname is None:
dirname = modname
locale_path = get_module_data_path(dirname, relpath="locale",
attr_name='LOCALEPATH')
# fixup environment var LANG in case it's unknown
if "LANG" not in os.environ:
import locale
lang = locale.getdefaultlocale()[0]
if lang is not None:
os.environ["LANG"] = lang
import gettext
try:
_trans = gettext.translation(modname, locale_path, codeset="utf-8")
lgettext = _trans.lgettext
def translate_gettext(x):
if not PY3 and is_unicode(x):
x = x.encode("utf-8")
y = lgettext(x)
if is_text_string(y) and PY3:
return y
else:
return to_text_string(y, "utf-8")
return translate_gettext
except IOError as _e: # analysis:ignore
#print "Not using translations (%s)" % _e
def translate_dumb(x):
if not is_unicode(x):
return to_text_string(x, "utf-8")
return x
return translate_dumb
# Translation callback
_ = get_translation("spyderlib")
#==============================================================================
# Namespace Browser (Variable Explorer) configuration management
#==============================================================================
def get_supported_types():
"""
Return a dictionnary containing types lists supported by the
namespace browser:
dict(picklable=picklable_types, editable=editables_types)
See:
get_remote_data function in spyderlib/widgets/externalshell/monitor.py
get_internal_shell_filter method in namespacebrowser.py
Note:
If you update this list, don't forget to update doc/variablexplorer.rst
"""
from datetime import date
editable_types = [int, float, complex, list, dict, tuple, date
] + list(TEXT_TYPES) + list(INT_TYPES)
try:
from numpy import ndarray, matrix, generic
editable_types += [ndarray, matrix, generic]
except ImportError:
pass
try:
from pandas import DataFrame, TimeSeries
editable_types += [DataFrame, TimeSeries]
except ImportError:
pass
picklable_types = editable_types[:]
try:
from spyderlib.pil_patch import Image
editable_types.append(Image.Image)
except ImportError:
pass
return dict(picklable=picklable_types, editable=editable_types)
# Variable explorer display / check all elements data types for sequences:
# (when saving the variable explorer contents, check_all is True,
# see widgets/externalshell/namespacebrowser.py:NamespaceBrowser.save_data)
CHECK_ALL = False #XXX: If True, this should take too much to compute...
EXCLUDED_NAMES = ['nan', 'inf', 'infty', 'little_endian', 'colorbar_doc',
'typecodes', '__builtins__', '__main__', '__doc__', 'NaN',
'Inf', 'Infinity', 'sctypes', 'rcParams', 'rcParamsDefault',
'sctypeNA', 'typeNA', 'False_', 'True_',]
#==============================================================================
# Mac application utilities
#==============================================================================
if PY3:
MAC_APP_NAME = 'Spyder.app'
else:
MAC_APP_NAME = 'Spyder-Py2.app'
def running_in_mac_app():
if sys.platform == "darwin" and MAC_APP_NAME in __file__:
return True
else:
return False
|
the-stack_106_18397
|
"""
Support for information about the German train system.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.deutsche_bahn/
"""
import logging
from datetime import timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.helpers.entity import Entity
REQUIREMENTS = ['schiene==0.18']
_LOGGER = logging.getLogger(__name__)
CONF_DESTINATION = 'to'
CONF_START = 'from'
ICON = 'mdi:train'
SCAN_INTERVAL = timedelta(minutes=2)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_DESTINATION): cv.string,
vol.Required(CONF_START): cv.string,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Deutsche Bahn Sensor."""
start = config.get(CONF_START)
destination = config.get(CONF_DESTINATION)
add_devices([DeutscheBahnSensor(start, destination)], True)
class DeutscheBahnSensor(Entity):
"""Implementation of a Deutsche Bahn sensor."""
def __init__(self, start, goal):
"""Initialize the sensor."""
self._name = '{} to {}'.format(start, goal)
self.data = SchieneData(start, goal)
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
"""Return the icon for the frontend."""
return ICON
@property
def state(self):
"""Return the departure time of the next train."""
return self._state
@property
def device_state_attributes(self):
"""Return the state attributes."""
connections = self.data.connections[0]
connections['next'] = self.data.connections[1]['departure']
connections['next_on'] = self.data.connections[2]['departure']
return connections
def update(self):
"""Get the latest delay from bahn.de and updates the state."""
self.data.update()
self._state = self.data.connections[0].get('departure', 'Unknown')
if self.data.connections[0]['delay'] != 0:
self._state += " + {}".format(self.data.connections[0]['delay'])
class SchieneData(object):
"""Pull data from the bahn.de web page."""
def __init__(self, start, goal):
"""Initialize the sensor."""
import schiene
self.start = start
self.goal = goal
self.schiene = schiene.Schiene()
self.connections = [{}]
def update(self):
"""Update the connection data."""
self.connections = self.schiene.connections(
self.start, self.goal, dt_util.as_local(dt_util.utcnow()))
for con in self.connections:
# Detail info is not useful. Having a more consistent interface
# simplifies usage of template sensors.
if 'details' in con:
con.pop('details')
delay = con.get('delay', {'delay_departure': 0,
'delay_arrival': 0})
# IMHO only delay_departure is useful
con['delay'] = delay['delay_departure']
con['ontime'] = con.get('ontime', False)
|
the-stack_106_18398
|
from SBAgent import SBAgent
from SBEnvironment.SBEnvironmentWrapper import SBEnvironmentWrapper
# for using reward as score and 50 times faster game play
env = SBEnvironmentWrapper(reward_type="score", speed=50)
level_list = [1, 2, 3] # level list for the agent to play
dummy_agent = SBAgent(env=env, level_list=level_list) # initialise agent
dummy_agent.state_representation_type = 'image' # use symbolic representation as state and headless mode
env.make(agent=dummy_agent, start_level=dummy_agent.level_list[0],
state_representation_type=dummy_agent.state_representation_type) # initialise the environment
s, r, is_done, info = env.reset() # get ready for running
for level_idx in level_list:
is_done = False
while not is_done:
s, r, is_done, info = env.step([-100, -100]) # agent always shoots at -100,100 as relative to the slingshot
env.current_level = level_idx+1 # update the level list once finished the level
if env.current_level > level_list[-1]: # end the game when all game levels in the level list are played
break
s, r, is_done, info = env.reload_current_level() #go to the next level
|
the-stack_106_18399
|
"""
SE-ResNet for CIFAR/SVHN, implemented in TensorFlow.
Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
"""
__all__ = ['CIFARSEResNet', 'seresnet20_cifar10', 'seresnet20_cifar100', 'seresnet20_svhn',
'seresnet56_cifar10', 'seresnet56_cifar100', 'seresnet56_svhn',
'seresnet110_cifar10', 'seresnet110_cifar100', 'seresnet110_svhn',
'seresnet164bn_cifar10', 'seresnet164bn_cifar100', 'seresnet164bn_svhn',
'seresnet272bn_cifar10', 'seresnet272bn_cifar100', 'seresnet272bn_svhn',
'seresnet542bn_cifar10', 'seresnet542bn_cifar100', 'seresnet542bn_svhn',
'seresnet1001_cifar10', 'seresnet1001_cifar100', 'seresnet1001_svhn',
'seresnet1202_cifar10', 'seresnet1202_cifar100', 'seresnet1202_svhn']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv3x3_block, flatten, is_channels_first
from .seresnet import SEResUnit
class CIFARSEResNet(tf.keras.Model):
"""
SE-ResNet model for CIFAR from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
classes : int, default 10
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
in_channels=3,
in_size=(32, 32),
classes=10,
data_format="channels_last",
**kwargs):
super(CIFARSEResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = tf.keras.Sequential(name="features")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = tf.keras.Sequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(SEResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bottleneck=bottleneck,
conv1_stride=False,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AveragePooling2D(
pool_size=8,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_seresnet_cifar(classes,
blocks,
bottleneck,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create SE-ResNet model for CIFAR with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
blocks : int
Number of blocks.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
assert (classes in [10, 100])
if bottleneck:
assert ((blocks - 2) % 9 == 0)
layers = [(blocks - 2) // 9] * 3
else:
assert ((blocks - 2) % 6 == 0)
layers = [(blocks - 2) // 6] * 3
channels_per_layers = [16, 32, 64]
init_block_channels = 16
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if bottleneck:
channels = [[cij * 4 for cij in ci] for ci in channels]
net = CIFARSEResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
classes=classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def seresnet20_cifar10(classes=10, **kwargs):
"""
SE-ResNet-20 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="seresnet20_cifar10", **kwargs)
def seresnet20_cifar100(classes=100, **kwargs):
"""
SE-ResNet-20 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="seresnet20_cifar100", **kwargs)
def seresnet20_svhn(classes=10, **kwargs):
"""
SE-ResNet-20 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="seresnet20_svhn", **kwargs)
def seresnet56_cifar10(classes=10, **kwargs):
"""
SE-ResNet-56 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="seresnet56_cifar10", **kwargs)
def seresnet56_cifar100(classes=100, **kwargs):
"""
SE-ResNet-56 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="seresnet56_cifar100", **kwargs)
def seresnet56_svhn(classes=10, **kwargs):
"""
SE-ResNet-56 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="seresnet56_svhn", **kwargs)
def seresnet110_cifar10(classes=10, **kwargs):
"""
SE-ResNet-110 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="seresnet110_cifar10", **kwargs)
def seresnet110_cifar100(classes=100, **kwargs):
"""
SE-ResNet-110 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="seresnet110_cifar100",
**kwargs)
def seresnet110_svhn(classes=10, **kwargs):
"""
SE-ResNet-110 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="seresnet110_svhn", **kwargs)
def seresnet164bn_cifar10(classes=10, **kwargs):
"""
SE-ResNet-164(BN) model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="seresnet164bn_cifar10",
**kwargs)
def seresnet164bn_cifar100(classes=100, **kwargs):
"""
SE-ResNet-164(BN) model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="seresnet164bn_cifar100",
**kwargs)
def seresnet164bn_svhn(classes=10, **kwargs):
"""
SE-ResNet-164(BN) model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="seresnet164bn_svhn", **kwargs)
def seresnet272bn_cifar10(classes=10, **kwargs):
"""
SE-ResNet-272(BN) model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="seresnet272bn_cifar10",
**kwargs)
def seresnet272bn_cifar100(classes=100, **kwargs):
"""
SE-ResNet-272(BN) model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="seresnet272bn_cifar100",
**kwargs)
def seresnet272bn_svhn(classes=10, **kwargs):
"""
SE-ResNet-272(BN) model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="seresnet272bn_svhn", **kwargs)
def seresnet542bn_cifar10(classes=10, **kwargs):
"""
SE-ResNet-542(BN) model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="seresnet542bn_cifar10",
**kwargs)
def seresnet542bn_cifar100(classes=100, **kwargs):
"""
SE-ResNet-542(BN) model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="seresnet542bn_cifar100",
**kwargs)
def seresnet542bn_svhn(classes=10, **kwargs):
"""
SE-ResNet-542(BN) model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="seresnet542bn_svhn", **kwargs)
def seresnet1001_cifar10(classes=10, **kwargs):
"""
SE-ResNet-1001 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="seresnet1001_cifar10",
**kwargs)
def seresnet1001_cifar100(classes=100, **kwargs):
"""
SE-ResNet-1001 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="seresnet1001_cifar100",
**kwargs)
def seresnet1001_svhn(classes=10, **kwargs):
"""
SE-ResNet-1001 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="seresnet1001_svhn", **kwargs)
def seresnet1202_cifar10(classes=10, **kwargs):
"""
SE-ResNet-1202 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="seresnet1202_cifar10",
**kwargs)
def seresnet1202_cifar100(classes=100, **kwargs):
"""
SE-ResNet-1202 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="seresnet1202_cifar100",
**kwargs)
def seresnet1202_svhn(classes=10, **kwargs):
"""
SE-ResNet-1202 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="seresnet1202_svhn", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
# data_format = "channels_first"
pretrained = False
models = [
(seresnet20_cifar10, 10),
(seresnet20_cifar100, 100),
(seresnet20_svhn, 10),
(seresnet56_cifar10, 10),
(seresnet56_cifar100, 100),
(seresnet56_svhn, 10),
(seresnet110_cifar10, 10),
(seresnet110_cifar100, 100),
(seresnet110_svhn, 10),
(seresnet164bn_cifar10, 10),
(seresnet164bn_cifar100, 100),
(seresnet164bn_svhn, 10),
(seresnet272bn_cifar10, 10),
(seresnet272bn_cifar100, 100),
(seresnet272bn_svhn, 10),
(seresnet542bn_cifar10, 10),
(seresnet542bn_cifar100, 100),
(seresnet542bn_svhn, 10),
(seresnet1001_cifar10, 10),
(seresnet1001_cifar100, 100),
(seresnet1001_svhn, 10),
(seresnet1202_cifar10, 10),
(seresnet1202_cifar100, 100),
(seresnet1202_svhn, 10),
]
for model, classes in models:
net = model(pretrained=pretrained, data_format=data_format)
batch_saze = 14
x = tf.random.normal((batch_saze, 3, 32, 32) if is_channels_first(data_format) else (batch_saze, 32, 32, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch_saze, classes))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != seresnet20_cifar10 or weight_count == 274847)
assert (model != seresnet20_cifar100 or weight_count == 280697)
assert (model != seresnet20_svhn or weight_count == 274847)
assert (model != seresnet56_cifar10 or weight_count == 862889)
assert (model != seresnet56_cifar100 or weight_count == 868739)
assert (model != seresnet56_svhn or weight_count == 862889)
assert (model != seresnet110_cifar10 or weight_count == 1744952)
assert (model != seresnet110_cifar100 or weight_count == 1750802)
assert (model != seresnet110_svhn or weight_count == 1744952)
assert (model != seresnet164bn_cifar10 or weight_count == 1906258)
assert (model != seresnet164bn_cifar100 or weight_count == 1929388)
assert (model != seresnet164bn_svhn or weight_count == 1906258)
assert (model != seresnet272bn_cifar10 or weight_count == 3153826)
assert (model != seresnet272bn_cifar100 or weight_count == 3176956)
assert (model != seresnet272bn_svhn or weight_count == 3153826)
assert (model != seresnet542bn_cifar10 or weight_count == 6272746)
assert (model != seresnet542bn_cifar100 or weight_count == 6295876)
assert (model != seresnet542bn_svhn or weight_count == 6272746)
assert (model != seresnet1001_cifar10 or weight_count == 11574910)
assert (model != seresnet1001_cifar100 or weight_count == 11598040)
assert (model != seresnet1001_svhn or weight_count == 11574910)
assert (model != seresnet1202_cifar10 or weight_count == 19582226)
assert (model != seresnet1202_cifar100 or weight_count == 19588076)
assert (model != seresnet1202_svhn or weight_count == 19582226)
if __name__ == "__main__":
_test()
|
the-stack_106_18401
|
""" PyTorch Lamb optimizer w/ behaviour similar to NVIDIA FusedLamb
This optimizer code was adapted from the following (starting with latest)
* https://github.com/HabanaAI/Model-References/blob/2b435114fe8e31f159b1d3063b8280ae37af7423/PyTorch/nlp/bert/pretraining/lamb.py
* https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/Transformer-XL/pytorch/lamb.py
* https://github.com/cybertronai/pytorch-lamb
Use FusedLamb if you can (GPU). The reason for including this variant of Lamb is to have a version that is
similar in behaviour to APEX FusedLamb if you aren't using NVIDIA GPUs or cannot install/use APEX.
In addition to some cleanup, this Lamb impl has been modified to support PyTorch XLA and has been tested on TPU.
Original copyrights for above sources are below.
Modifications Copyright 2021 Ross Wightman
"""
# Copyright (c) 2021, Habana Labs Ltd. All rights reserved.
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MIT License
#
# Copyright (c) 2019 cybertronai
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import math
import torch
from torch.optim import Optimizer
class Lamb(Optimizer):
"""Implements a pure pytorch variant of FuseLAMB (NvLamb variant) optimizer from apex.optimizers.FusedLAMB
reference: https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/Transformer-XL/pytorch/lamb.py
LAMB was proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining parameter groups.
lr (float, optional): learning rate. (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its norm. (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability. (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
grad_averaging (bool, optional): whether apply (1-beta2) to grad when
calculating running averages of gradient. (default: True)
max_grad_norm (float, optional): value used to clip global grad norm (default: 1.0)
trust_clip (bool): enable LAMBC trust ratio clipping (default: False)
always_adapt (boolean, optional): Apply adaptive learning rate to 0.0
weight decay parameter (default: False)
.. _Large Batch Optimization for Deep Learning - Training BERT in 76 minutes:
https://arxiv.org/abs/1904.00962
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(
self, params, lr=1e-3, bias_correction=True, betas=(0.9, 0.999), eps=1e-6,
weight_decay=0.01, grad_averaging=True, max_grad_norm=1.0, trust_clip=False, always_adapt=False):
defaults = dict(
lr=lr, bias_correction=bias_correction, betas=betas, eps=eps, weight_decay=weight_decay,
grad_averaging=grad_averaging, max_grad_norm=max_grad_norm,
trust_clip=trust_clip, always_adapt=always_adapt)
super().__init__(params, defaults)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
device = self.param_groups[0]['params'][0].device
one_tensor = torch.tensor(1.0, device=device) # because torch.where doesn't handle scalars correctly
global_grad_norm = torch.zeros(1, device=device)
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise RuntimeError('Lamb does not support sparse gradients, consider SparseAdam instad.')
global_grad_norm.add_(grad.pow(2).sum())
global_grad_norm = torch.sqrt(global_grad_norm)
# FIXME it'd be nice to remove explicit tensor conversion of scalars when torch.where promotes
# scalar types properly https://github.com/pytorch/pytorch/issues/9190
max_grad_norm = torch.tensor(self.defaults['max_grad_norm'], device=device)
clip_global_grad_norm = torch.where(
global_grad_norm > max_grad_norm,
global_grad_norm / max_grad_norm,
one_tensor)
for group in self.param_groups:
bias_correction = 1 if group['bias_correction'] else 0
beta1, beta2 = group['betas']
grad_averaging = 1 if group['grad_averaging'] else 0
beta3 = 1 - beta1 if grad_averaging else 1.0
# assume same step across group now to simplify things
# per parameter step can be easily support by making it tensor, or pass list into kernel
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
if bias_correction:
bias_correction1 = 1 - beta1 ** group['step']
bias_correction2 = 1 - beta2 ** group['step']
else:
bias_correction1, bias_correction2 = 1.0, 1.0
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.div_(clip_global_grad_norm)
state = self.state[p]
# State initialization
if len(state) == 0:
# Exponential moving average of gradient valuesa
state['exp_avg'] = torch.zeros_like(p)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=beta3) # m_t
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) # v_t
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
update = (exp_avg / bias_correction1).div_(denom)
weight_decay = group['weight_decay']
if weight_decay != 0:
update.add_(p, alpha=weight_decay)
if weight_decay != 0 or group['always_adapt']:
# Layer-wise LR adaptation. By default, skip adaptation on parameters that are
# excluded from weight decay, unless always_adapt == True, then always enabled.
w_norm = p.norm(2.0)
g_norm = update.norm(2.0)
# FIXME nested where required since logical and/or not working in PT XLA
trust_ratio = torch.where(
w_norm > 0,
torch.where(g_norm > 0, w_norm / g_norm, one_tensor),
one_tensor,
)
if group['trust_clip']:
# LAMBC trust clipping, upper bound fixed at one
trust_ratio = torch.minimum(trust_ratio, one_tensor)
update.mul_(trust_ratio)
p.add_(update, alpha=-group['lr'])
return loss
|
the-stack_106_18404
|
import os
from typing import List
import numpy as np
from pathlib import Path
from alchemy.db.fs import raw_data_dir
from alchemy.db.model import (
ClassificationAnnotation,
EntityTypeEnum,
Task,
User,
majority_vote_annotations_query,
)
from alchemy.shared.utils import load_json, load_jsonl, save_jsonl
from alchemy.train.no_deps.inference_results import InferenceResults
from alchemy.train.no_deps.run import build_inference_cache, inference, train_model
from alchemy.train.no_deps.utils import BINARY_CLASSIFICATION
from alchemy.train.prep import prepare_next_model_for_label
from alchemy.train.no_deps.paths import ( # Train Model; Model Inference
_get_data_parser_fname,
_get_inference_fname,
_get_metrics_fname,
)
LABEL = "IsTall"
# TODO add a case to cover only 1 class present in the data.
# This is easy to do, just set N=2.
# Unclear what's the best way to surface that error just yet.
class stub_model:
def __init__(self):
self.history = []
def predict(self, text: List[str]):
self.history.append(text)
preds = np.array([1] * len(text))
# This is the style when "Sliding Window" is enabled
probs = [np.array([[-0.48803285, 0.56392884]], dtype=np.float32)] * len(text)
return preds, probs
def stub_train_fn(X, y, config):
return stub_model()
def stub_build_fn(config, model_dir):
return stub_model()
# Create many Annotations for a Label
def _create_anno(ent, v, user, weight=1):
return ClassificationAnnotation(
entity_type=EntityTypeEnum.COMPANY,
entity=ent,
user=user,
label=LABEL,
value=v,
weight=weight,
)
def _populate_db_and_fs(dbsession, tmp_path, N, weight=1):
# =========================================================================
# Add in a fake data file
d = raw_data_dir(tmp_path, as_path=True)
d.mkdir(parents=True)
p = d / "data.jsonl"
data = [
{"text": f"item {i} text", "meta": {"domain": f"{i}.com"}} for i in range(N)
]
save_jsonl(str(p), data)
# =========================================================================
# Create dummy data
# Create many Users
user = User(username=f"someuser")
dbsession.add(user)
dbsession.commit()
ents = [d["meta"]["domain"] for d in data]
annos = [
# Create a few annotations for the first 2 entities.
_create_anno(ents[0], 1, user),
_create_anno(ents[0], 1, user),
_create_anno(ents[0], 1, user),
_create_anno(ents[0], -1, user, weight=weight),
_create_anno(ents[0], 0, user),
_create_anno(ents[0], 0, user),
_create_anno(ents[0], 0, user),
_create_anno(ents[0], 0, user),
_create_anno(ents[1], 1, user, weight=weight),
_create_anno(ents[1], 1, user),
_create_anno(ents[1], -1, user),
_create_anno(ents[1], -1, user),
_create_anno(ents[1], -1, user),
_create_anno(ents[1], -1, user),
_create_anno(ents[1], 0, user),
_create_anno(ents[1], 0, user),
]
for i in range(2, N):
# Create one annotations for the rest of the entities.
annos.append(_create_anno(ents[i], 1 if i % 2 else -1, user))
dbsession.add_all(annos)
dbsession.commit()
# Create a Task
task = Task(name="Bball")
task.set_labels([LABEL])
task.set_annotators([user.username])
task.set_patterns_file(None)
task.set_patterns(["Shaq", "Lebron"])
task.set_data_filenames(["data.jsonl"])
dbsession.add(task)
dbsession.commit()
def test_train_flow_simple(dbsession, monkeypatch, tmp_path):
monkeypatch.setenv("ALCHEMY_FILESTORE_DIR", str(tmp_path))
N = 2
_populate_db_and_fs(dbsession, tmp_path, N, weight=100)
query = majority_vote_annotations_query(dbsession, LABEL)
res = query.all()
assert sorted(res) == [("0.com", -1, 100), ("1.com", 1, 101)]
def test_train_flow_simple_equal_weight(dbsession, monkeypatch, tmp_path):
monkeypatch.setenv("ALCHEMY_FILESTORE_DIR", str(tmp_path))
N = 2
_populate_db_and_fs(dbsession, tmp_path, N, weight=3)
query = majority_vote_annotations_query(dbsession, LABEL)
res = query.all()
assert len(res) == N
res = sorted(res)
for i in range(N):
assert res[i][0] == str(i) + ".com"
assert res[i][1] in [-1, 1]
if i == 0:
assert res[i][2] == 3
elif i == 1:
assert res[i][2] == 4
def test_train_flow(dbsession, monkeypatch, tmp_path):
monkeypatch.setenv("ALCHEMY_FILESTORE_DIR", str(tmp_path))
N = 20
_populate_db_and_fs(dbsession, tmp_path, N)
task = dbsession.query(Task).first()
# Part 1. Prepare.
label = task.get_labels()[0]
raw_file_path = task.get_data_filenames(abs=True)[0]
model = prepare_next_model_for_label(dbsession, label, raw_file_path)
model_dir = model.dir(abs=True)
# These are all the files we need to train a model.
files = os.listdir(model_dir)
assert set(files) == set(["data.jsonl", "config.json"])
data = load_jsonl(os.path.join(model_dir, "data.jsonl"), to_df=False)
data = sorted(data, key=lambda d: d["text"])
print(data[0])
print(data[1])
print(len(data))
assert data[0] == {"text": "item 0 text", "labels": {"IsTall": 1}}
assert data[1] == {"text": "item 1 text", "labels": {"IsTall": -1}}
assert len(data) == 20
config = load_json(os.path.join(model_dir, "config.json"))
assert config is not None
assert config["train_config"] is not None
# Part 2. Train model.
train_model(model_dir, train_fn=stub_train_fn)
data_parser_results = load_json(_get_data_parser_fname(model_dir))
assert data_parser_results["problem_type"] == BINARY_CLASSIFICATION
metrics = load_json(_get_metrics_fname(model_dir))
assert metrics["test"] is not None
assert metrics["train"] is not None
# Part 3. Post-training Inference.
f = tmp_path / "tmp_file_for_inference.jsonl"
save_jsonl(str(f), [{"text": "hello"}, {"text": "world"}])
inference(model_dir, str(f), build_model_fn=stub_build_fn, generate_plots=False)
ir = InferenceResults.load(_get_inference_fname(model_dir, str(f)))
assert np.isclose(ir.probs, [0.7411514, 0.7411514]).all()
# Part 4. New data update, run inference on the new data.
f2 = tmp_path / "tmp_file_for_inference_v2.jsonl"
save_jsonl(
str(f2),
[
{"text": "hello"},
{"text": "world"},
{"text": "newline_1"},
{"text": "newline_2"},
],
)
class Mock_DatasetStorageManager:
def download(self, url):
# Return the one data file we have locally.
return str(f)
mock_dsm = Mock_DatasetStorageManager()
inference_cache = build_inference_cache(model_dir, mock_dsm)
model, _ = inference(
model_dir,
str(f2),
build_model_fn=stub_build_fn,
generate_plots=False,
inference_cache=inference_cache,
)
assert model.history == [
["newline_1", "newline_2"]
], "Model should have only been ran on the new lines"
ir2 = InferenceResults.load(_get_inference_fname(model_dir, str(f2)))
assert len(ir2.probs) == 4, "Inference should have 4 elements"
assert ir.probs[:2] == ir2.probs[:2], "Result on the same items should be the same"
def test_majority_vote_annotations_query(dbsession):
user = User(username="fake_user")
dbsession.add(user)
dbsession.commit()
ents = [str(i) + ".com" for i in range(3)]
annos = [
# for this entity, the value -1 has the highest weight.
_create_anno(ents[0], 1, user),
_create_anno(ents[0], 1, user),
_create_anno(ents[0], 1, user),
_create_anno(ents[0], -1, user, weight=100),
_create_anno(ents[0], 0, user),
_create_anno(ents[0], 0, user),
_create_anno(ents[0], 0, user),
_create_anno(ents[0], 0, user),
# for this entity, the weighted vote is a tie between 1 and -1.
_create_anno(ents[1], 1, user, weight=3),
_create_anno(ents[1], 1, user),
_create_anno(ents[1], -1, user),
_create_anno(ents[1], -1, user),
_create_anno(ents[1], -1, user),
_create_anno(ents[1], -1, user),
_create_anno(ents[1], 0, user),
_create_anno(ents[1], 0, user),
_create_anno(ents[2], 1, user),
]
dbsession.add_all(annos)
dbsession.commit()
query = majority_vote_annotations_query(dbsession, label=LABEL)
res = query.all()
assert len(res) == 3
res = sorted(res, key=lambda x: x[0])
for i in range(3):
if i == 0:
assert res[0] == ("0.com", -1, 100)
elif i == 1:
assert res[1][0] == "1.com"
assert res[1][1] in [1, -1]
assert res[1][2] == 4
else:
assert res[2] == ("2.com", 1, 1)
|
the-stack_106_18406
|
from collections import namedtuple
import enum
class BlockType(enum.Enum):
# Frames describing CLB features, interconnect, clocks and IOs.
CLB_IO_CLK = 'CLB_IO_CLK'
# Frames describing block RAM initialization.
BLOCK_RAM = 'BLOCK_RAM'
GridLoc = namedtuple('GridLoc', 'grid_x grid_y')
GridInfo = namedtuple('GridInfo', 'bits sites tile_type pin_functions')
BitAlias = namedtuple('BitAlias', 'tile_type start_offset sites')
Bits = namedtuple('Bits', 'base_address frames offset words alias')
BitsInfo = namedtuple('BitsInfo', 'block_type tile bits')
|
the-stack_106_18408
|
import RPi.GPIO as GPIO
import time
from constants import *
def setInput(pin):
try:
GPIO.setup(pin, GPIO.IN) # set pin as an input pin
except:
GPIO.cleanup()
GPIO.setmode(GPIO.BCM) # BCM pin-numbering scheme from Raspberry Pi
GPIO.setup(pin, GPIO.IN) # set pin as an input pin
def readInput(pin):
value = GPIO.input(pin)
if value == GPIO.HIGH:
return False
else:
return True
if __name__=="__main__":
setInput(BUMPA)
setInput(BUMPB)
setInput(BUMPC)
setInput(BUMPD)
while True:
print(readInput(BUMPA),readInput(BUMPB),readInput(BUMPC),readInput(BUMPD))
# time.sleep(0.1)
|
the-stack_106_18409
|
# Open3D: www.open3d.org
# The MIT License (MIT)
# See license file or visit www.open3d.org for details
# examples/Python/Advanced/remove_geometry.py
import open3d as o3d
import numpy as np
import time
import copy
def visualize_non_blocking(vis):
vis.update_geometry()
vis.poll_events()
vis.update_renderer()
pcd_orig = o3d.io.read_point_cloud("../../TestData/fragment.pcd")
flip_transform = [[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]
pcd_orig.transform(flip_transform)
n_pcd = 5
pcds = []
for i in range(n_pcd):
pcds.append(copy.deepcopy(pcd_orig))
trans = np.identity(4)
trans[:3, 3] = [3 * i, 0, 0]
pcds[i].transform(trans)
vis = o3d.visualization.Visualizer()
vis.create_window()
start_time = time.time()
added = [False] * n_pcd
curr_sec = int(time.time() - start_time)
prev_sec = curr_sec - 1
while True:
curr_sec = int(time.time() - start_time)
if curr_sec - prev_sec == 1:
prev_sec = curr_sec
for i in range(n_pcd):
if curr_sec % (n_pcd * 2) == i and not added[i]:
vis.add_geometry(pcds[i])
added[i] = True
print("Adding %d" % i)
if curr_sec % (n_pcd * 2) == (i + n_pcd) and added[i]:
vis.remove_geometry(pcds[i])
added[i] = False
print("Removing %d" % i)
visualize_non_blocking(vis)
|
the-stack_106_18410
|
from unittest import mock
import pytest
from servicex_databinder import configuration
@mock.patch("servicex_databinder.configuration._validate_config")
def test_load_config(mock_validation):
conf = configuration._load_config("config_example_uproot.yml")
assert isinstance(conf, dict)
mock_validation.assert_called_once()
with pytest.raises(FileNotFoundError):
conf = configuration._load_config("none.yml")
def test_validate_config():
config_without_general = {}
with pytest.raises(KeyError):
configuration._validate_config(config_without_general)
config_without_servicexbackendname = {"General": {},
"Sample": [{},]}
with pytest.raises(KeyError):
configuration._validate_config(config_without_servicexbackendname)
config_without_wrong_backendname = {
"General": {
"ServiceXBackendName": "wrong_backend_name",
},
"Sample": [{},]}
with pytest.raises(ValueError):
configuration._validate_config(config_without_wrong_backendname)
config_without_outputdirectory = {
"General": {
"ServiceXBackendName": "uproot",
},
"Sample": [{},]}
with pytest.raises(KeyError):
configuration._validate_config(config_without_outputdirectory)
config_without_outputformat = {
"General": {
"ServiceXBackendName": "uproot",
"OutputDirectory": "a",
},
"Sample": [{},]}
with pytest.raises(KeyError):
configuration._validate_config(config_without_outputformat)
config_wrong_outputformat = {
"General": {
"ServiceXBackendName": "uproot",
"OutputDirectory": "a",
"OutputFormat": "pandas",
},
"Sample": [{},]}
with pytest.raises(ValueError):
configuration._validate_config(config_wrong_outputformat)
config_without_rucio_did = {
"General": {
"ServiceXBackendName": "uproot",
"OutputDirectory": "a",
"OutputFormat": "parquet",
},
"Sample": [{
"Name": "ttH",
}]
}
with pytest.raises(KeyError):
configuration._validate_config(config_without_rucio_did)
config_without_rucio_scope = {
"General": {
"ServiceXBackendName": "uproot",
"OutputDirectory": "a",
"OutputFormat": "parquet",
},
"Sample": [{
"Name": "ttH",
"RucioDID": "user.kchoi:user.kchoi.A, user.kchoi.B",
}]
}
with pytest.raises(ValueError):
configuration._validate_config(config_without_rucio_scope)
config_tree_not_with_uproot = {
"General": {
"ServiceXBackendName": "xaod",
"OutputDirectory": "a",
"OutputFormat": "root",
},
"Sample": [{
"Name": "ttH",
"RucioDID": "user.kchoi:user.kchoi.A, user.kchoi:user.kchoi.B",
"Tree": "nominal",
}]
}
with pytest.raises(KeyError):
configuration._validate_config(config_tree_not_with_uproot)
config_columns_with_funcadl = {
"General": {
"ServiceXBackendName": "xaod",
"OutputDirectory": "a",
"OutputFormat": "root",
},
"Sample": [{
"Name": "ttH",
"RucioDID": "user.kchoi:user.kchoi.A, user.kchoi:user.kchoi.B",
"Columns": "jet_pt",
"FuncADL": "Select()",
}]
}
with pytest.raises(KeyError):
configuration._validate_config(config_columns_with_funcadl)
config_filter_with_funcadl = {
"General": {
"ServiceXBackendName": "xaod",
"OutputDirectory": "a",
"OutputFormat": "root",
},
"Sample": [{
"Name": "ttH",
"RucioDID": "user.kchoi:user.kchoi.A, user.kchoi:user.kchoi.B",
"Filter": "jet_pt>15e3",
"FuncADL": "Select()",
}]
}
with pytest.raises(KeyError):
configuration._validate_config(config_filter_with_funcadl)
config_valid_uproot ={
"General": {
"ServiceXBackendName": "uproot",
"OutputDirectory": "a",
"OutputFormat": "parquet",
},
"Sample": [{
"Name": "ttH",
"RucioDID": "user.kchoi:user.kchoi.A, user.kchoi:user.kchoi.B",
"FuncADL": "Select()",
}]
}
assert configuration._validate_config(config_valid_uproot)
# def test_validate_config_for_uproot():
# config_valid_uproot ={
# "General": {
# "ServiceXBackendName": "uproot_test",
# "OutputDirectory": "a",
# "OutputFormat": "parquet",
# },
# "Sample": [{
# "Name": "ttH",
# "RucioDID": "user.kchoi:user.kchoi",
# "FuncADL": "Select()",
# }]
# }
# assert configuration._validate_config(config_valid_uproot)
# def test_validate_config_for_xaod():
# config_valid_xaod ={
# "General": {
# "ServiceXBackendName": "xaod_test",
# "OutputDirectory": "a",
# "OutputFormat": "root",
# },
# "Sample": [{
# "Name": "ttH",
# "RucioDID": "user.kchoi:user.kchoi",
# "FuncADL": 1,
# }]
# }
# assert configuration._validate_config(config_valid_xaod)
|
the-stack_106_18411
|
# For relative imports to work in Python 3.6
import os, sys; sys.path.append(os.path.dirname(os.path.realpath(__file__)))
from os.path import abspath, join
from flask import Flask
# main application
app = None
log = None
def create_app( cfgfile ):
'''
Create main app object, while ingesting the settings from the cfgfile
'''
global app
if app is None:
static_folder = abspath('static')
app = Flask(
'farm', #__name__,
static_folder=static_folder, static_url_path='',
instance_path=abspath(join( __file__, '../../conf' )),
instance_relative_config=True)
print('Serving static content from', static_folder, '...')
if cfgfile:
print('Loading config from', cfgfile, '...')
app.config.from_pyfile(cfgfile)
from .logger import create_log
global log
if log is None:
log = create_log( app )
return app
def init_app( app ):
'''
Get the app ready to serve HTTP requests
'''
assert app is not None
print('Initializing...')
from . import routes
from . import dataset
if not dataset.init_dataset(app.config):
return False
return True
|
the-stack_106_18413
|
from collections import OrderedDict
import contextlib
from datetime import date, datetime, time, timedelta
from distutils.version import LooseVersion
from functools import partial
import os
import warnings
import numpy as np
from numpy import nan
import pytest
from pandas.compat import PY36, BytesIO, iteritems, map, range, u
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series
from pandas.core.config import get_option, set_option
import pandas.util.testing as tm
from pandas.util.testing import ensure_clean, makeCustomDataframe as mkdf
from pandas.io.common import URLError
from pandas.io.excel import (
ExcelFile, ExcelWriter, _OpenpyxlWriter, _XlsxWriter, _XlwtWriter,
read_excel, register_writer)
from pandas.io.formats.excel import ExcelFormatter
from pandas.io.parsers import read_csv
_seriesd = tm.getSeriesData()
_tsd = tm.getTimeSeriesData()
_frame = DataFrame(_seriesd)[:10]
_frame2 = DataFrame(_seriesd, columns=['D', 'C', 'B', 'A'])[:10]
_tsframe = tm.makeTimeDataFrame()[:5]
_mixed_frame = _frame.copy()
_mixed_frame['foo'] = 'bar'
@contextlib.contextmanager
def ignore_xlrd_time_clock_warning():
"""
Context manager to ignore warnings raised by the xlrd library,
regarding the deprecation of `time.clock` in Python 3.7.
"""
with warnings.catch_warnings():
warnings.filterwarnings(
action='ignore',
message='time.clock has been deprecated',
category=DeprecationWarning)
yield
@td.skip_if_no('xlrd', '1.0.0')
class SharedItems(object):
@pytest.fixture(autouse=True)
def setup_method(self, datapath):
self.dirpath = datapath("io", "data")
self.frame = _frame.copy()
self.frame2 = _frame2.copy()
self.tsframe = _tsframe.copy()
self.mixed_frame = _mixed_frame.copy()
def get_csv_refdf(self, basename):
"""
Obtain the reference data from read_csv with the Python engine.
Parameters
----------
basename : str
File base name, excluding file extension.
Returns
-------
dfref : DataFrame
"""
pref = os.path.join(self.dirpath, basename + '.csv')
dfref = read_csv(pref, index_col=0, parse_dates=True, engine='python')
return dfref
def get_excelfile(self, basename, ext):
"""
Return test data ExcelFile instance.
Parameters
----------
basename : str
File base name, excluding file extension.
Returns
-------
excel : io.excel.ExcelFile
"""
return ExcelFile(os.path.join(self.dirpath, basename + ext))
def get_exceldf(self, basename, ext, *args, **kwds):
"""
Return test data DataFrame.
Parameters
----------
basename : str
File base name, excluding file extension.
Returns
-------
df : DataFrame
"""
pth = os.path.join(self.dirpath, basename + ext)
return read_excel(pth, *args, **kwds)
class ReadingTestsBase(SharedItems):
# This is based on ExcelWriterBase
@pytest.fixture(autouse=True, params=['xlrd', None])
def set_engine(self, request):
func_name = "get_exceldf"
old_func = getattr(self, func_name)
new_func = partial(old_func, engine=request.param)
setattr(self, func_name, new_func)
yield
setattr(self, func_name, old_func)
@td.skip_if_no("xlrd", "1.0.1") # see gh-22682
def test_usecols_int(self, ext):
df_ref = self.get_csv_refdf("test1")
df_ref = df_ref.reindex(columns=["A", "B", "C"])
# usecols as int
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
with ignore_xlrd_time_clock_warning():
df1 = self.get_exceldf("test1", ext, "Sheet1",
index_col=0, usecols=3)
# usecols as int
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
with ignore_xlrd_time_clock_warning():
df2 = self.get_exceldf("test1", ext, "Sheet2", skiprows=[1],
index_col=0, usecols=3)
# parse_cols instead of usecols, usecols as int
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
with ignore_xlrd_time_clock_warning():
df3 = self.get_exceldf("test1", ext, "Sheet2", skiprows=[1],
index_col=0, parse_cols=3)
# TODO add index to xls file)
tm.assert_frame_equal(df1, df_ref, check_names=False)
tm.assert_frame_equal(df2, df_ref, check_names=False)
tm.assert_frame_equal(df3, df_ref, check_names=False)
@td.skip_if_no('xlrd', '1.0.1') # GH-22682
def test_usecols_list(self, ext):
dfref = self.get_csv_refdf('test1')
dfref = dfref.reindex(columns=['B', 'C'])
df1 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0,
usecols=[0, 2, 3])
df2 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
index_col=0, usecols=[0, 2, 3])
with tm.assert_produces_warning(FutureWarning):
with ignore_xlrd_time_clock_warning():
df3 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
index_col=0, parse_cols=[0, 2, 3])
# TODO add index to xls file)
tm.assert_frame_equal(df1, dfref, check_names=False)
tm.assert_frame_equal(df2, dfref, check_names=False)
tm.assert_frame_equal(df3, dfref, check_names=False)
@td.skip_if_no('xlrd', '1.0.1') # GH-22682
def test_usecols_str(self, ext):
dfref = self.get_csv_refdf('test1')
df1 = dfref.reindex(columns=['A', 'B', 'C'])
df2 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0,
usecols='A:D')
df3 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
index_col=0, usecols='A:D')
with tm.assert_produces_warning(FutureWarning):
with ignore_xlrd_time_clock_warning():
df4 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
index_col=0, parse_cols='A:D')
# TODO add index to xls, read xls ignores index name ?
tm.assert_frame_equal(df2, df1, check_names=False)
tm.assert_frame_equal(df3, df1, check_names=False)
tm.assert_frame_equal(df4, df1, check_names=False)
df1 = dfref.reindex(columns=['B', 'C'])
df2 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0,
usecols='A,C,D')
df3 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
index_col=0, usecols='A,C,D')
# TODO add index to xls file
tm.assert_frame_equal(df2, df1, check_names=False)
tm.assert_frame_equal(df3, df1, check_names=False)
df1 = dfref.reindex(columns=['B', 'C'])
df2 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0,
usecols='A,C:D')
df3 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
index_col=0, usecols='A,C:D')
tm.assert_frame_equal(df2, df1, check_names=False)
tm.assert_frame_equal(df3, df1, check_names=False)
@pytest.mark.parametrize("usecols", [
[0, 1, 3], [0, 3, 1],
[1, 0, 3], [1, 3, 0],
[3, 0, 1], [3, 1, 0],
])
def test_usecols_diff_positional_int_columns_order(self, ext, usecols):
expected = self.get_csv_refdf("test1")[["A", "C"]]
result = self.get_exceldf("test1", ext, "Sheet1",
index_col=0, usecols=usecols)
tm.assert_frame_equal(result, expected, check_names=False)
@pytest.mark.parametrize("usecols", [
["B", "D"], ["D", "B"]
])
def test_usecols_diff_positional_str_columns_order(self, ext, usecols):
expected = self.get_csv_refdf("test1")[["B", "D"]]
expected.index = range(len(expected))
result = self.get_exceldf("test1", ext, "Sheet1", usecols=usecols)
tm.assert_frame_equal(result, expected, check_names=False)
def test_read_excel_without_slicing(self, ext):
expected = self.get_csv_refdf("test1")
result = self.get_exceldf("test1", ext, "Sheet1", index_col=0)
tm.assert_frame_equal(result, expected, check_names=False)
def test_usecols_excel_range_str(self, ext):
expected = self.get_csv_refdf("test1")[["C", "D"]]
result = self.get_exceldf("test1", ext, "Sheet1",
index_col=0, usecols="A,D:E")
tm.assert_frame_equal(result, expected, check_names=False)
def test_usecols_excel_range_str_invalid(self, ext):
msg = "Invalid column name: E1"
with pytest.raises(ValueError, match=msg):
self.get_exceldf("test1", ext, "Sheet1", usecols="D:E1")
def test_index_col_label_error(self, ext):
msg = "list indices must be integers.*, not str"
with pytest.raises(TypeError, match=msg):
self.get_exceldf("test1", ext, "Sheet1", index_col=["A"],
usecols=["A", "C"])
def test_index_col_empty(self, ext):
# see gh-9208
result = self.get_exceldf("test1", ext, "Sheet3",
index_col=["A", "B", "C"])
expected = DataFrame(columns=["D", "E", "F"],
index=MultiIndex(levels=[[]] * 3,
codes=[[]] * 3,
names=["A", "B", "C"]))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("index_col", [None, 2])
def test_index_col_with_unnamed(self, ext, index_col):
# see gh-18792
result = self.get_exceldf("test1", ext, "Sheet4",
index_col=index_col)
expected = DataFrame([["i1", "a", "x"], ["i2", "b", "y"]],
columns=["Unnamed: 0", "col1", "col2"])
if index_col:
expected = expected.set_index(expected.columns[index_col])
tm.assert_frame_equal(result, expected)
def test_usecols_pass_non_existent_column(self, ext):
msg = ("Usecols do not match columns, "
"columns expected but not found: " + r"\['E'\]")
with pytest.raises(ValueError, match=msg):
self.get_exceldf("test1", ext, usecols=["E"])
def test_usecols_wrong_type(self, ext):
msg = ("'usecols' must either be list-like of "
"all strings, all unicode, all integers or a callable.")
with pytest.raises(ValueError, match=msg):
self.get_exceldf("test1", ext, usecols=["E1", 0])
def test_excel_stop_iterator(self, ext):
parsed = self.get_exceldf('test2', ext, 'Sheet1')
expected = DataFrame([['aaaa', 'bbbbb']], columns=['Test', 'Test1'])
tm.assert_frame_equal(parsed, expected)
def test_excel_cell_error_na(self, ext):
parsed = self.get_exceldf('test3', ext, 'Sheet1')
expected = DataFrame([[np.nan]], columns=['Test'])
tm.assert_frame_equal(parsed, expected)
def test_excel_passes_na(self, ext):
excel = self.get_excelfile('test4', ext)
parsed = read_excel(excel, 'Sheet1', keep_default_na=False,
na_values=['apple'])
expected = DataFrame([['NA'], [1], ['NA'], [np.nan], ['rabbit']],
columns=['Test'])
tm.assert_frame_equal(parsed, expected)
parsed = read_excel(excel, 'Sheet1', keep_default_na=True,
na_values=['apple'])
expected = DataFrame([[np.nan], [1], [np.nan], [np.nan], ['rabbit']],
columns=['Test'])
tm.assert_frame_equal(parsed, expected)
# 13967
excel = self.get_excelfile('test5', ext)
parsed = read_excel(excel, 'Sheet1', keep_default_na=False,
na_values=['apple'])
expected = DataFrame([['1.#QNAN'], [1], ['nan'], [np.nan], ['rabbit']],
columns=['Test'])
tm.assert_frame_equal(parsed, expected)
parsed = read_excel(excel, 'Sheet1', keep_default_na=True,
na_values=['apple'])
expected = DataFrame([[np.nan], [1], [np.nan], [np.nan], ['rabbit']],
columns=['Test'])
tm.assert_frame_equal(parsed, expected)
@td.skip_if_no('xlrd', '1.0.1') # GH-22682
def test_deprecated_sheetname(self, ext):
# gh-17964
excel = self.get_excelfile('test1', ext)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
read_excel(excel, sheetname='Sheet1')
with pytest.raises(TypeError):
read_excel(excel, sheet='Sheet1')
@td.skip_if_no('xlrd', '1.0.1') # GH-22682
def test_excel_table_sheet_by_index(self, ext):
excel = self.get_excelfile('test1', ext)
dfref = self.get_csv_refdf('test1')
df1 = read_excel(excel, 0, index_col=0)
df2 = read_excel(excel, 1, skiprows=[1], index_col=0)
tm.assert_frame_equal(df1, dfref, check_names=False)
tm.assert_frame_equal(df2, dfref, check_names=False)
df1 = excel.parse(0, index_col=0)
df2 = excel.parse(1, skiprows=[1], index_col=0)
tm.assert_frame_equal(df1, dfref, check_names=False)
tm.assert_frame_equal(df2, dfref, check_names=False)
df3 = read_excel(excel, 0, index_col=0, skipfooter=1)
tm.assert_frame_equal(df3, df1.iloc[:-1])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
df4 = read_excel(excel, 0, index_col=0, skip_footer=1)
tm.assert_frame_equal(df3, df4)
df3 = excel.parse(0, index_col=0, skipfooter=1)
tm.assert_frame_equal(df3, df1.iloc[:-1])
import xlrd
with pytest.raises(xlrd.XLRDError):
read_excel(excel, 'asdf')
def test_excel_table(self, ext):
dfref = self.get_csv_refdf('test1')
df1 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0)
df2 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
index_col=0)
# TODO add index to file
tm.assert_frame_equal(df1, dfref, check_names=False)
tm.assert_frame_equal(df2, dfref, check_names=False)
df3 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0,
skipfooter=1)
tm.assert_frame_equal(df3, df1.iloc[:-1])
def test_reader_special_dtypes(self, ext):
expected = DataFrame.from_dict(OrderedDict([
("IntCol", [1, 2, -3, 4, 0]),
("FloatCol", [1.25, 2.25, 1.83, 1.92, 0.0000000005]),
("BoolCol", [True, False, True, True, False]),
("StrCol", [1, 2, 3, 4, 5]),
# GH5394 - this is why convert_float isn't vectorized
("Str2Col", ["a", 3, "c", "d", "e"]),
("DateCol", [datetime(2013, 10, 30), datetime(2013, 10, 31),
datetime(1905, 1, 1), datetime(2013, 12, 14),
datetime(2015, 3, 14)])
]))
basename = 'test_types'
# should read in correctly and infer types
actual = self.get_exceldf(basename, ext, 'Sheet1')
tm.assert_frame_equal(actual, expected)
# if not coercing number, then int comes in as float
float_expected = expected.copy()
float_expected["IntCol"] = float_expected["IntCol"].astype(float)
float_expected.loc[float_expected.index[1], "Str2Col"] = 3.0
actual = self.get_exceldf(basename, ext, 'Sheet1', convert_float=False)
tm.assert_frame_equal(actual, float_expected)
# check setting Index (assuming xls and xlsx are the same here)
for icol, name in enumerate(expected.columns):
actual = self.get_exceldf(basename, ext, 'Sheet1', index_col=icol)
exp = expected.set_index(name)
tm.assert_frame_equal(actual, exp)
# convert_float and converters should be different but both accepted
expected["StrCol"] = expected["StrCol"].apply(str)
actual = self.get_exceldf(
basename, ext, 'Sheet1', converters={"StrCol": str})
tm.assert_frame_equal(actual, expected)
no_convert_float = float_expected.copy()
no_convert_float["StrCol"] = no_convert_float["StrCol"].apply(str)
actual = self.get_exceldf(basename, ext, 'Sheet1', convert_float=False,
converters={"StrCol": str})
tm.assert_frame_equal(actual, no_convert_float)
# GH8212 - support for converters and missing values
def test_reader_converters(self, ext):
basename = 'test_converters'
expected = DataFrame.from_dict(OrderedDict([
("IntCol", [1, 2, -3, -1000, 0]),
("FloatCol", [12.5, np.nan, 18.3, 19.2, 0.000000005]),
("BoolCol", ['Found', 'Found', 'Found', 'Not found', 'Found']),
("StrCol", ['1', np.nan, '3', '4', '5']),
]))
converters = {'IntCol': lambda x: int(x) if x != '' else -1000,
'FloatCol': lambda x: 10 * x if x else np.nan,
2: lambda x: 'Found' if x != '' else 'Not found',
3: lambda x: str(x) if x else '',
}
# should read in correctly and set types of single cells (not array
# dtypes)
actual = self.get_exceldf(basename, ext, 'Sheet1',
converters=converters)
tm.assert_frame_equal(actual, expected)
def test_reader_dtype(self, ext):
# GH 8212
basename = 'testdtype'
actual = self.get_exceldf(basename, ext)
expected = DataFrame({
'a': [1, 2, 3, 4],
'b': [2.5, 3.5, 4.5, 5.5],
'c': [1, 2, 3, 4],
'd': [1.0, 2.0, np.nan, 4.0]}).reindex(
columns=['a', 'b', 'c', 'd'])
tm.assert_frame_equal(actual, expected)
actual = self.get_exceldf(basename, ext,
dtype={'a': 'float64',
'b': 'float32',
'c': str})
expected['a'] = expected['a'].astype('float64')
expected['b'] = expected['b'].astype('float32')
expected['c'] = ['001', '002', '003', '004']
tm.assert_frame_equal(actual, expected)
with pytest.raises(ValueError):
self.get_exceldf(basename, ext, dtype={'d': 'int64'})
@pytest.mark.parametrize("dtype,expected", [
(None,
DataFrame({
"a": [1, 2, 3, 4],
"b": [2.5, 3.5, 4.5, 5.5],
"c": [1, 2, 3, 4],
"d": [1.0, 2.0, np.nan, 4.0]
})),
({"a": "float64",
"b": "float32",
"c": str,
"d": str
},
DataFrame({
"a": Series([1, 2, 3, 4], dtype="float64"),
"b": Series([2.5, 3.5, 4.5, 5.5], dtype="float32"),
"c": ["001", "002", "003", "004"],
"d": ["1", "2", np.nan, "4"]
})),
])
def test_reader_dtype_str(self, ext, dtype, expected):
# see gh-20377
basename = "testdtype"
actual = self.get_exceldf(basename, ext, dtype=dtype)
tm.assert_frame_equal(actual, expected)
def test_reading_all_sheets(self, ext):
# Test reading all sheetnames by setting sheetname to None,
# Ensure a dict is returned.
# See PR #9450
basename = 'test_multisheet'
dfs = self.get_exceldf(basename, ext, sheet_name=None)
# ensure this is not alphabetical to test order preservation
expected_keys = ['Charlie', 'Alpha', 'Beta']
tm.assert_contains_all(expected_keys, dfs.keys())
# Issue 9930
# Ensure sheet order is preserved
assert expected_keys == list(dfs.keys())
def test_reading_multiple_specific_sheets(self, ext):
# Test reading specific sheetnames by specifying a mixed list
# of integers and strings, and confirm that duplicated sheet
# references (positions/names) are removed properly.
# Ensure a dict is returned
# See PR #9450
basename = 'test_multisheet'
# Explicitly request duplicates. Only the set should be returned.
expected_keys = [2, 'Charlie', 'Charlie']
dfs = self.get_exceldf(basename, ext, sheet_name=expected_keys)
expected_keys = list(set(expected_keys))
tm.assert_contains_all(expected_keys, dfs.keys())
assert len(expected_keys) == len(dfs.keys())
def test_reading_all_sheets_with_blank(self, ext):
# Test reading all sheetnames by setting sheetname to None,
# In the case where some sheets are blank.
# Issue #11711
basename = 'blank_with_header'
dfs = self.get_exceldf(basename, ext, sheet_name=None)
expected_keys = ['Sheet1', 'Sheet2', 'Sheet3']
tm.assert_contains_all(expected_keys, dfs.keys())
# GH6403
def test_read_excel_blank(self, ext):
actual = self.get_exceldf('blank', ext, 'Sheet1')
tm.assert_frame_equal(actual, DataFrame())
def test_read_excel_blank_with_header(self, ext):
expected = DataFrame(columns=['col_1', 'col_2'])
actual = self.get_exceldf('blank_with_header', ext, 'Sheet1')
tm.assert_frame_equal(actual, expected)
@td.skip_if_no("xlwt")
@td.skip_if_no("openpyxl")
@pytest.mark.parametrize("header,expected", [
(None, DataFrame([np.nan] * 4)),
(0, DataFrame({"Unnamed: 0": [np.nan] * 3}))
])
def test_read_one_empty_col_no_header(self, ext, header, expected):
# xref gh-12292
filename = "no_header"
df = pd.DataFrame(
[["", 1, 100],
["", 2, 200],
["", 3, 300],
["", 4, 400]]
)
with ensure_clean(ext) as path:
df.to_excel(path, filename, index=False, header=False)
result = read_excel(path, filename, usecols=[0], header=header)
tm.assert_frame_equal(result, expected)
@td.skip_if_no("xlwt")
@td.skip_if_no("openpyxl")
@pytest.mark.parametrize("header,expected", [
(None, DataFrame([0] + [np.nan] * 4)),
(0, DataFrame([np.nan] * 4))
])
def test_read_one_empty_col_with_header(self, ext, header, expected):
filename = "with_header"
df = pd.DataFrame(
[["", 1, 100],
["", 2, 200],
["", 3, 300],
["", 4, 400]]
)
with ensure_clean(ext) as path:
df.to_excel(path, 'with_header', index=False, header=True)
result = read_excel(path, filename, usecols=[0], header=header)
tm.assert_frame_equal(result, expected)
@td.skip_if_no('openpyxl')
@td.skip_if_no('xlwt')
def test_set_column_names_in_parameter(self, ext):
# GH 12870 : pass down column names associated with
# keyword argument names
refdf = pd.DataFrame([[1, 'foo'], [2, 'bar'],
[3, 'baz']], columns=['a', 'b'])
with ensure_clean(ext) as pth:
with ExcelWriter(pth) as writer:
refdf.to_excel(writer, 'Data_no_head',
header=False, index=False)
refdf.to_excel(writer, 'Data_with_head', index=False)
refdf.columns = ['A', 'B']
with ExcelFile(pth) as reader:
xlsdf_no_head = read_excel(reader, 'Data_no_head',
header=None, names=['A', 'B'])
xlsdf_with_head = read_excel(reader, 'Data_with_head',
index_col=None, names=['A', 'B'])
tm.assert_frame_equal(xlsdf_no_head, refdf)
tm.assert_frame_equal(xlsdf_with_head, refdf)
def test_date_conversion_overflow(self, ext):
# GH 10001 : pandas.ExcelFile ignore parse_dates=False
expected = pd.DataFrame([[pd.Timestamp('2016-03-12'), 'Marc Johnson'],
[pd.Timestamp('2016-03-16'), 'Jack Black'],
[1e+20, 'Timothy Brown']],
columns=['DateColWithBigInt', 'StringCol'])
result = self.get_exceldf('testdateoverflow', ext)
tm.assert_frame_equal(result, expected)
@td.skip_if_no("xlrd", "1.0.1") # see gh-22682
def test_sheet_name_and_sheetname(self, ext):
# gh-10559: Minor improvement: Change "sheet_name" to "sheetname"
# gh-10969: DOC: Consistent var names (sheetname vs sheet_name)
# gh-12604: CLN GH10559 Rename sheetname variable to sheet_name
# gh-20920: ExcelFile.parse() and pd.read_xlsx() have different
# behavior for "sheetname" argument
filename = "test1"
sheet_name = "Sheet1"
df_ref = self.get_csv_refdf(filename)
df1 = self.get_exceldf(filename, ext,
sheet_name=sheet_name, index_col=0) # doc
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
with ignore_xlrd_time_clock_warning():
df2 = self.get_exceldf(filename, ext, index_col=0,
sheetname=sheet_name) # backward compat
excel = self.get_excelfile(filename, ext)
df1_parse = excel.parse(sheet_name=sheet_name, index_col=0) # doc
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
df2_parse = excel.parse(index_col=0,
sheetname=sheet_name) # backward compat
tm.assert_frame_equal(df1, df_ref, check_names=False)
tm.assert_frame_equal(df2, df_ref, check_names=False)
tm.assert_frame_equal(df1_parse, df_ref, check_names=False)
tm.assert_frame_equal(df2_parse, df_ref, check_names=False)
def test_sheet_name_both_raises(self, ext):
with pytest.raises(TypeError, match="Cannot specify both"):
self.get_exceldf('test1', ext, sheetname='Sheet1',
sheet_name='Sheet1')
excel = self.get_excelfile('test1', ext)
with pytest.raises(TypeError, match="Cannot specify both"):
excel.parse(sheetname='Sheet1',
sheet_name='Sheet1')
def test_excel_read_buffer(self, ext):
pth = os.path.join(self.dirpath, 'test1' + ext)
expected = read_excel(pth, 'Sheet1', index_col=0)
with open(pth, 'rb') as f:
actual = read_excel(f, 'Sheet1', index_col=0)
tm.assert_frame_equal(expected, actual)
with open(pth, 'rb') as f:
xls = ExcelFile(f)
actual = read_excel(xls, 'Sheet1', index_col=0)
tm.assert_frame_equal(expected, actual)
def test_bad_engine_raises(self, ext):
bad_engine = 'foo'
with pytest.raises(ValueError, match="Unknown engine: foo"):
read_excel('', engine=bad_engine)
@tm.network
def test_read_from_http_url(self, ext):
url = ('https://raw.github.com/pandas-dev/pandas/master/'
'pandas/tests/io/data/test1' + ext)
url_table = read_excel(url)
local_table = self.get_exceldf('test1', ext)
tm.assert_frame_equal(url_table, local_table)
@td.skip_if_not_us_locale
def test_read_from_s3_url(self, ext, s3_resource):
# Bucket "pandas-test" created in tests/io/conftest.py
file_name = os.path.join(self.dirpath, 'test1' + ext)
with open(file_name, "rb") as f:
s3_resource.Bucket("pandas-test").put_object(Key="test1" + ext,
Body=f)
url = ('s3://pandas-test/test1' + ext)
url_table = read_excel(url)
local_table = self.get_exceldf('test1', ext)
tm.assert_frame_equal(url_table, local_table)
@pytest.mark.slow
# ignore warning from old xlrd
@pytest.mark.filterwarnings("ignore:This metho:PendingDeprecationWarning")
def test_read_from_file_url(self, ext):
# FILE
localtable = os.path.join(self.dirpath, 'test1' + ext)
local_table = read_excel(localtable)
try:
url_table = read_excel('file://localhost/' + localtable)
except URLError:
# fails on some systems
import platform
pytest.skip("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
@td.skip_if_no('pathlib')
def test_read_from_pathlib_path(self, ext):
# GH12655
from pathlib import Path
str_path = os.path.join(self.dirpath, 'test1' + ext)
expected = read_excel(str_path, 'Sheet1', index_col=0)
path_obj = Path(self.dirpath, 'test1' + ext)
actual = read_excel(path_obj, 'Sheet1', index_col=0)
tm.assert_frame_equal(expected, actual)
@td.skip_if_no('py.path')
def test_read_from_py_localpath(self, ext):
# GH12655
from py.path import local as LocalPath
str_path = os.path.join(self.dirpath, 'test1' + ext)
expected = read_excel(str_path, 'Sheet1', index_col=0)
abs_dir = os.path.abspath(self.dirpath)
path_obj = LocalPath(abs_dir).join('test1' + ext)
actual = read_excel(path_obj, 'Sheet1', index_col=0)
tm.assert_frame_equal(expected, actual)
def test_reader_closes_file(self, ext):
pth = os.path.join(self.dirpath, 'test1' + ext)
f = open(pth, 'rb')
with ExcelFile(f) as xlsx:
# parses okay
read_excel(xlsx, 'Sheet1', index_col=0)
assert f.closed
@td.skip_if_no("xlwt")
@td.skip_if_no("openpyxl")
def test_creating_and_reading_multiple_sheets(self, ext):
# see gh-9450
#
# Test reading multiple sheets, from a runtime
# created Excel file with multiple sheets.
def tdf(col_sheet_name):
d, i = [11, 22, 33], [1, 2, 3]
return DataFrame(d, i, columns=[col_sheet_name])
sheets = ["AAA", "BBB", "CCC"]
dfs = [tdf(s) for s in sheets]
dfs = dict(zip(sheets, dfs))
with ensure_clean(ext) as pth:
with ExcelWriter(pth) as ew:
for sheetname, df in iteritems(dfs):
df.to_excel(ew, sheetname)
dfs_returned = read_excel(pth, sheet_name=sheets, index_col=0)
for s in sheets:
tm.assert_frame_equal(dfs[s], dfs_returned[s])
def test_reader_seconds(self, ext):
# Test reading times with and without milliseconds. GH5945.
expected = DataFrame.from_dict({"Time": [time(1, 2, 3),
time(2, 45, 56, 100000),
time(4, 29, 49, 200000),
time(6, 13, 42, 300000),
time(7, 57, 35, 400000),
time(9, 41, 28, 500000),
time(11, 25, 21, 600000),
time(13, 9, 14, 700000),
time(14, 53, 7, 800000),
time(16, 37, 0, 900000),
time(18, 20, 54)]})
actual = self.get_exceldf('times_1900', ext, 'Sheet1')
tm.assert_frame_equal(actual, expected)
actual = self.get_exceldf('times_1904', ext, 'Sheet1')
tm.assert_frame_equal(actual, expected)
def test_read_excel_multiindex(self, ext):
# see gh-4679
mi = MultiIndex.from_product([["foo", "bar"], ["a", "b"]])
mi_file = os.path.join(self.dirpath, "testmultiindex" + ext)
# "mi_column" sheet
expected = DataFrame([[1, 2.5, pd.Timestamp("2015-01-01"), True],
[2, 3.5, pd.Timestamp("2015-01-02"), False],
[3, 4.5, pd.Timestamp("2015-01-03"), False],
[4, 5.5, pd.Timestamp("2015-01-04"), True]],
columns=mi)
actual = read_excel(mi_file, "mi_column", header=[0, 1], index_col=0)
tm.assert_frame_equal(actual, expected)
# "mi_index" sheet
expected.index = mi
expected.columns = ["a", "b", "c", "d"]
actual = read_excel(mi_file, "mi_index", index_col=[0, 1])
tm.assert_frame_equal(actual, expected, check_names=False)
# "both" sheet
expected.columns = mi
actual = read_excel(mi_file, "both", index_col=[0, 1], header=[0, 1])
tm.assert_frame_equal(actual, expected, check_names=False)
# "mi_index_name" sheet
expected.columns = ["a", "b", "c", "d"]
expected.index = mi.set_names(["ilvl1", "ilvl2"])
actual = read_excel(mi_file, "mi_index_name", index_col=[0, 1])
tm.assert_frame_equal(actual, expected)
# "mi_column_name" sheet
expected.index = list(range(4))
expected.columns = mi.set_names(["c1", "c2"])
actual = read_excel(mi_file, "mi_column_name",
header=[0, 1], index_col=0)
tm.assert_frame_equal(actual, expected)
# see gh-11317
# "name_with_int" sheet
expected.columns = mi.set_levels(
[1, 2], level=1).set_names(["c1", "c2"])
actual = read_excel(mi_file, "name_with_int",
index_col=0, header=[0, 1])
tm.assert_frame_equal(actual, expected)
# "both_name" sheet
expected.columns = mi.set_names(["c1", "c2"])
expected.index = mi.set_names(["ilvl1", "ilvl2"])
actual = read_excel(mi_file, "both_name",
index_col=[0, 1], header=[0, 1])
tm.assert_frame_equal(actual, expected)
# "both_skiprows" sheet
actual = read_excel(mi_file, "both_name_skiprows", index_col=[0, 1],
header=[0, 1], skiprows=2)
tm.assert_frame_equal(actual, expected)
def test_read_excel_multiindex_header_only(self, ext):
# see gh-11733.
#
# Don't try to parse a header name if there isn't one.
mi_file = os.path.join(self.dirpath, "testmultiindex" + ext)
result = read_excel(mi_file, "index_col_none", header=[0, 1])
exp_columns = MultiIndex.from_product([("A", "B"), ("key", "val")])
expected = DataFrame([[1, 2, 3, 4]] * 2, columns=exp_columns)
tm.assert_frame_equal(result, expected)
@td.skip_if_no("xlsxwriter")
def test_read_excel_multiindex_empty_level(self, ext):
# see gh-12453
with ensure_clean(ext) as path:
df = DataFrame({
("One", "x"): {0: 1},
("Two", "X"): {0: 3},
("Two", "Y"): {0: 7},
("Zero", ""): {0: 0}
})
expected = DataFrame({
("One", "x"): {0: 1},
("Two", "X"): {0: 3},
("Two", "Y"): {0: 7},
("Zero", "Unnamed: 4_level_1"): {0: 0}
})
df.to_excel(path)
actual = pd.read_excel(path, header=[0, 1], index_col=0)
tm.assert_frame_equal(actual, expected)
df = pd.DataFrame({
("Beg", ""): {0: 0},
("Middle", "x"): {0: 1},
("Tail", "X"): {0: 3},
("Tail", "Y"): {0: 7}
})
expected = pd.DataFrame({
("Beg", "Unnamed: 1_level_1"): {0: 0},
("Middle", "x"): {0: 1},
("Tail", "X"): {0: 3},
("Tail", "Y"): {0: 7}
})
df.to_excel(path)
actual = pd.read_excel(path, header=[0, 1], index_col=0)
tm.assert_frame_equal(actual, expected)
@td.skip_if_no("xlsxwriter")
@pytest.mark.parametrize("c_idx_names", [True, False])
@pytest.mark.parametrize("r_idx_names", [True, False])
@pytest.mark.parametrize("c_idx_levels", [1, 3])
@pytest.mark.parametrize("r_idx_levels", [1, 3])
def test_excel_multindex_roundtrip(self, ext, c_idx_names, r_idx_names,
c_idx_levels, r_idx_levels):
# see gh-4679
with ensure_clean(ext) as pth:
if c_idx_levels == 1 and c_idx_names:
pytest.skip("Column index name cannot be "
"serialized unless it's a MultiIndex")
# Empty name case current read in as
# unnamed levels, not Nones.
check_names = r_idx_names or r_idx_levels <= 1
df = mkdf(5, 5, c_idx_names, r_idx_names,
c_idx_levels, r_idx_levels)
df.to_excel(pth)
act = pd.read_excel(pth, index_col=list(range(r_idx_levels)),
header=list(range(c_idx_levels)))
tm.assert_frame_equal(df, act, check_names=check_names)
df.iloc[0, :] = np.nan
df.to_excel(pth)
act = pd.read_excel(pth, index_col=list(range(r_idx_levels)),
header=list(range(c_idx_levels)))
tm.assert_frame_equal(df, act, check_names=check_names)
df.iloc[-1, :] = np.nan
df.to_excel(pth)
act = pd.read_excel(pth, index_col=list(range(r_idx_levels)),
header=list(range(c_idx_levels)))
tm.assert_frame_equal(df, act, check_names=check_names)
def test_excel_old_index_format(self, ext):
# see gh-4679
filename = "test_index_name_pre17" + ext
in_file = os.path.join(self.dirpath, filename)
# We detect headers to determine if index names exist, so
# that "index" name in the "names" version of the data will
# now be interpreted as rows that include null data.
data = np.array([[None, None, None, None, None],
["R0C0", "R0C1", "R0C2", "R0C3", "R0C4"],
["R1C0", "R1C1", "R1C2", "R1C3", "R1C4"],
["R2C0", "R2C1", "R2C2", "R2C3", "R2C4"],
["R3C0", "R3C1", "R3C2", "R3C3", "R3C4"],
["R4C0", "R4C1", "R4C2", "R4C3", "R4C4"]])
columns = ["C_l0_g0", "C_l0_g1", "C_l0_g2", "C_l0_g3", "C_l0_g4"]
mi = MultiIndex(levels=[["R0", "R_l0_g0", "R_l0_g1",
"R_l0_g2", "R_l0_g3", "R_l0_g4"],
["R1", "R_l1_g0", "R_l1_g1",
"R_l1_g2", "R_l1_g3", "R_l1_g4"]],
codes=[[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]],
names=[None, None])
si = Index(["R0", "R_l0_g0", "R_l0_g1", "R_l0_g2",
"R_l0_g3", "R_l0_g4"], name=None)
expected = pd.DataFrame(data, index=si, columns=columns)
actual = pd.read_excel(in_file, "single_names", index_col=0)
tm.assert_frame_equal(actual, expected)
expected.index = mi
actual = pd.read_excel(in_file, "multi_names", index_col=[0, 1])
tm.assert_frame_equal(actual, expected)
# The analogous versions of the "names" version data
# where there are explicitly no names for the indices.
data = np.array([["R0C0", "R0C1", "R0C2", "R0C3", "R0C4"],
["R1C0", "R1C1", "R1C2", "R1C3", "R1C4"],
["R2C0", "R2C1", "R2C2", "R2C3", "R2C4"],
["R3C0", "R3C1", "R3C2", "R3C3", "R3C4"],
["R4C0", "R4C1", "R4C2", "R4C3", "R4C4"]])
columns = ["C_l0_g0", "C_l0_g1", "C_l0_g2", "C_l0_g3", "C_l0_g4"]
mi = MultiIndex(levels=[["R_l0_g0", "R_l0_g1", "R_l0_g2",
"R_l0_g3", "R_l0_g4"],
["R_l1_g0", "R_l1_g1", "R_l1_g2",
"R_l1_g3", "R_l1_g4"]],
codes=[[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]],
names=[None, None])
si = Index(["R_l0_g0", "R_l0_g1", "R_l0_g2",
"R_l0_g3", "R_l0_g4"], name=None)
expected = pd.DataFrame(data, index=si, columns=columns)
actual = pd.read_excel(in_file, "single_no_names", index_col=0)
tm.assert_frame_equal(actual, expected)
expected.index = mi
actual = pd.read_excel(in_file, "multi_no_names", index_col=[0, 1])
tm.assert_frame_equal(actual, expected, check_names=False)
def test_read_excel_bool_header_arg(self, ext):
# GH 6114
for arg in [True, False]:
with pytest.raises(TypeError):
pd.read_excel(os.path.join(self.dirpath, 'test1' + ext),
header=arg)
def test_read_excel_chunksize(self, ext):
# GH 8011
with pytest.raises(NotImplementedError):
pd.read_excel(os.path.join(self.dirpath, 'test1' + ext),
chunksize=100)
@td.skip_if_no("xlwt")
@td.skip_if_no("openpyxl")
def test_read_excel_parse_dates(self, ext):
# see gh-11544, gh-12051
df = DataFrame(
{"col": [1, 2, 3],
"date_strings": pd.date_range("2012-01-01", periods=3)})
df2 = df.copy()
df2["date_strings"] = df2["date_strings"].dt.strftime("%m/%d/%Y")
with ensure_clean(ext) as pth:
df2.to_excel(pth)
res = read_excel(pth, index_col=0)
tm.assert_frame_equal(df2, res)
res = read_excel(pth, parse_dates=["date_strings"], index_col=0)
tm.assert_frame_equal(df, res)
date_parser = lambda x: pd.datetime.strptime(x, "%m/%d/%Y")
res = read_excel(pth, parse_dates=["date_strings"],
date_parser=date_parser, index_col=0)
tm.assert_frame_equal(df, res)
def test_read_excel_skiprows_list(self, ext):
# GH 4903
actual = pd.read_excel(os.path.join(self.dirpath,
'testskiprows' + ext),
'skiprows_list', skiprows=[0, 2])
expected = DataFrame([[1, 2.5, pd.Timestamp('2015-01-01'), True],
[2, 3.5, pd.Timestamp('2015-01-02'), False],
[3, 4.5, pd.Timestamp('2015-01-03'), False],
[4, 5.5, pd.Timestamp('2015-01-04'), True]],
columns=['a', 'b', 'c', 'd'])
tm.assert_frame_equal(actual, expected)
actual = pd.read_excel(os.path.join(self.dirpath,
'testskiprows' + ext),
'skiprows_list', skiprows=np.array([0, 2]))
tm.assert_frame_equal(actual, expected)
def test_read_excel_nrows(self, ext):
# GH 16645
num_rows_to_pull = 5
actual = pd.read_excel(os.path.join(self.dirpath, 'test1' + ext),
nrows=num_rows_to_pull)
expected = pd.read_excel(os.path.join(self.dirpath,
'test1' + ext))
expected = expected[:num_rows_to_pull]
tm.assert_frame_equal(actual, expected)
def test_read_excel_nrows_greater_than_nrows_in_file(self, ext):
# GH 16645
expected = pd.read_excel(os.path.join(self.dirpath,
'test1' + ext))
num_records_in_file = len(expected)
num_rows_to_pull = num_records_in_file + 10
actual = pd.read_excel(os.path.join(self.dirpath, 'test1' + ext),
nrows=num_rows_to_pull)
tm.assert_frame_equal(actual, expected)
def test_read_excel_nrows_non_integer_parameter(self, ext):
# GH 16645
msg = "'nrows' must be an integer >=0"
with pytest.raises(ValueError, match=msg):
pd.read_excel(os.path.join(self.dirpath, 'test1' + ext),
nrows='5')
def test_read_excel_squeeze(self, ext):
# GH 12157
f = os.path.join(self.dirpath, 'test_squeeze' + ext)
actual = pd.read_excel(f, 'two_columns', index_col=0, squeeze=True)
expected = pd.Series([2, 3, 4], [4, 5, 6], name='b')
expected.index.name = 'a'
tm.assert_series_equal(actual, expected)
actual = pd.read_excel(f, 'two_columns', squeeze=True)
expected = pd.DataFrame({'a': [4, 5, 6],
'b': [2, 3, 4]})
tm.assert_frame_equal(actual, expected)
actual = pd.read_excel(f, 'one_column', squeeze=True)
expected = pd.Series([1, 2, 3], name='a')
tm.assert_series_equal(actual, expected)
@pytest.mark.parametrize("ext", ['.xls', '.xlsx', '.xlsm'])
class TestXlrdReader(ReadingTestsBase):
"""
This is the base class for the xlrd tests, and 3 different file formats
are supported: xls, xlsx, xlsm
"""
@td.skip_if_no("xlwt")
def test_read_xlrd_book(self, ext):
import xlrd
df = self.frame
engine = "xlrd"
sheet_name = "SheetA"
with ensure_clean(ext) as pth:
df.to_excel(pth, sheet_name)
book = xlrd.open_workbook(pth)
with ExcelFile(book, engine=engine) as xl:
result = read_excel(xl, sheet_name, index_col=0)
tm.assert_frame_equal(df, result)
result = read_excel(book, sheet_name=sheet_name,
engine=engine, index_col=0)
tm.assert_frame_equal(df, result)
class _WriterBase(SharedItems):
@pytest.fixture(autouse=True)
def set_engine_and_path(self, request, merge_cells, engine, ext):
"""Fixture to set engine and open file for use in each test case
Rather than requiring `engine=...` to be provided explicitly as an
argument in each test, this fixture sets a global option to dictate
which engine should be used to write Excel files. After executing
the test it rolls back said change to the global option.
It also uses a context manager to open a temporary excel file for
the function to write to, accessible via `self.path`
Notes
-----
This fixture will run as part of each test method defined in the
class and any subclasses, on account of the `autouse=True`
argument
"""
option_name = 'io.excel.{ext}.writer'.format(ext=ext.strip('.'))
prev_engine = get_option(option_name)
set_option(option_name, engine)
with ensure_clean(ext) as path:
self.path = path
yield
set_option(option_name, prev_engine) # Roll back option change
@pytest.mark.parametrize("merge_cells", [True, False])
@pytest.mark.parametrize("engine,ext", [
pytest.param('openpyxl', '.xlsx', marks=pytest.mark.skipif(
not td.safe_import('openpyxl'), reason='No openpyxl')),
pytest.param('openpyxl', '.xlsm', marks=pytest.mark.skipif(
not td.safe_import('openpyxl'), reason='No openpyxl')),
pytest.param('xlwt', '.xls', marks=pytest.mark.skipif(
not td.safe_import('xlwt'), reason='No xlwt')),
pytest.param('xlsxwriter', '.xlsx', marks=pytest.mark.skipif(
not td.safe_import('xlsxwriter'), reason='No xlsxwriter'))
])
class TestExcelWriter(_WriterBase):
# Base class for test cases to run with different Excel writers.
def test_excel_sheet_by_name_raise(self, *_):
import xlrd
gt = DataFrame(np.random.randn(10, 2))
gt.to_excel(self.path)
xl = ExcelFile(self.path)
df = read_excel(xl, 0, index_col=0)
tm.assert_frame_equal(gt, df)
with pytest.raises(xlrd.XLRDError):
read_excel(xl, "0")
def test_excel_writer_context_manager(self, *_):
with ExcelWriter(self.path) as writer:
self.frame.to_excel(writer, "Data1")
self.frame2.to_excel(writer, "Data2")
with ExcelFile(self.path) as reader:
found_df = read_excel(reader, "Data1", index_col=0)
found_df2 = read_excel(reader, "Data2", index_col=0)
tm.assert_frame_equal(found_df, self.frame)
tm.assert_frame_equal(found_df2, self.frame2)
def test_roundtrip(self, merge_cells, engine, ext):
self.frame['A'][:5] = nan
self.frame.to_excel(self.path, 'test1')
self.frame.to_excel(self.path, 'test1', columns=['A', 'B'])
self.frame.to_excel(self.path, 'test1', header=False)
self.frame.to_excel(self.path, 'test1', index=False)
# test roundtrip
self.frame.to_excel(self.path, 'test1')
recons = read_excel(self.path, 'test1', index_col=0)
tm.assert_frame_equal(self.frame, recons)
self.frame.to_excel(self.path, 'test1', index=False)
recons = read_excel(self.path, 'test1', index_col=None)
recons.index = self.frame.index
tm.assert_frame_equal(self.frame, recons)
self.frame.to_excel(self.path, 'test1', na_rep='NA')
recons = read_excel(self.path, 'test1', index_col=0, na_values=['NA'])
tm.assert_frame_equal(self.frame, recons)
# GH 3611
self.frame.to_excel(self.path, 'test1', na_rep='88')
recons = read_excel(self.path, 'test1', index_col=0, na_values=['88'])
tm.assert_frame_equal(self.frame, recons)
self.frame.to_excel(self.path, 'test1', na_rep='88')
recons = read_excel(self.path, 'test1', index_col=0,
na_values=[88, 88.0])
tm.assert_frame_equal(self.frame, recons)
# GH 6573
self.frame.to_excel(self.path, 'Sheet1')
recons = read_excel(self.path, index_col=0)
tm.assert_frame_equal(self.frame, recons)
self.frame.to_excel(self.path, '0')
recons = read_excel(self.path, index_col=0)
tm.assert_frame_equal(self.frame, recons)
# GH 8825 Pandas Series should provide to_excel method
s = self.frame["A"]
s.to_excel(self.path)
recons = read_excel(self.path, index_col=0)
tm.assert_frame_equal(s.to_frame(), recons)
def test_mixed(self, merge_cells, engine, ext):
self.mixed_frame.to_excel(self.path, 'test1')
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1', index_col=0)
tm.assert_frame_equal(self.mixed_frame, recons)
def test_ts_frame(self, *_):
df = tm.makeTimeDataFrame()[:5]
df.to_excel(self.path, "test1")
reader = ExcelFile(self.path)
recons = read_excel(reader, "test1", index_col=0)
tm.assert_frame_equal(df, recons)
def test_basics_with_nan(self, merge_cells, engine, ext):
self.frame['A'][:5] = nan
self.frame.to_excel(self.path, 'test1')
self.frame.to_excel(self.path, 'test1', columns=['A', 'B'])
self.frame.to_excel(self.path, 'test1', header=False)
self.frame.to_excel(self.path, 'test1', index=False)
@pytest.mark.parametrize("np_type", [
np.int8, np.int16, np.int32, np.int64])
def test_int_types(self, merge_cells, engine, ext, np_type):
# Test np.int values read come back as int
# (rather than float which is Excel's format).
frame = DataFrame(np.random.randint(-10, 10, size=(10, 2)),
dtype=np_type)
frame.to_excel(self.path, "test1")
reader = ExcelFile(self.path)
recons = read_excel(reader, "test1", index_col=0)
int_frame = frame.astype(np.int64)
tm.assert_frame_equal(int_frame, recons)
recons2 = read_excel(self.path, "test1", index_col=0)
tm.assert_frame_equal(int_frame, recons2)
# Test with convert_float=False comes back as float.
float_frame = frame.astype(float)
recons = read_excel(self.path, "test1",
convert_float=False, index_col=0)
tm.assert_frame_equal(recons, float_frame,
check_index_type=False,
check_column_type=False)
@pytest.mark.parametrize("np_type", [
np.float16, np.float32, np.float64])
def test_float_types(self, merge_cells, engine, ext, np_type):
# Test np.float values read come back as float.
frame = DataFrame(np.random.random_sample(10), dtype=np_type)
frame.to_excel(self.path, "test1")
reader = ExcelFile(self.path)
recons = read_excel(reader, "test1", index_col=0).astype(np_type)
tm.assert_frame_equal(frame, recons, check_dtype=False)
@pytest.mark.parametrize("np_type", [np.bool8, np.bool_])
def test_bool_types(self, merge_cells, engine, ext, np_type):
# Test np.bool values read come back as float.
frame = (DataFrame([1, 0, True, False], dtype=np_type))
frame.to_excel(self.path, "test1")
reader = ExcelFile(self.path)
recons = read_excel(reader, "test1", index_col=0).astype(np_type)
tm.assert_frame_equal(frame, recons)
def test_inf_roundtrip(self, *_):
frame = DataFrame([(1, np.inf), (2, 3), (5, -np.inf)])
frame.to_excel(self.path, "test1")
reader = ExcelFile(self.path)
recons = read_excel(reader, "test1", index_col=0)
tm.assert_frame_equal(frame, recons)
def test_sheets(self, merge_cells, engine, ext):
self.frame['A'][:5] = nan
self.frame.to_excel(self.path, 'test1')
self.frame.to_excel(self.path, 'test1', columns=['A', 'B'])
self.frame.to_excel(self.path, 'test1', header=False)
self.frame.to_excel(self.path, 'test1', index=False)
# Test writing to separate sheets
writer = ExcelWriter(self.path)
self.frame.to_excel(writer, 'test1')
self.tsframe.to_excel(writer, 'test2')
writer.save()
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1', index_col=0)
tm.assert_frame_equal(self.frame, recons)
recons = read_excel(reader, 'test2', index_col=0)
tm.assert_frame_equal(self.tsframe, recons)
assert 2 == len(reader.sheet_names)
assert 'test1' == reader.sheet_names[0]
assert 'test2' == reader.sheet_names[1]
def test_colaliases(self, merge_cells, engine, ext):
self.frame['A'][:5] = nan
self.frame.to_excel(self.path, 'test1')
self.frame.to_excel(self.path, 'test1', columns=['A', 'B'])
self.frame.to_excel(self.path, 'test1', header=False)
self.frame.to_excel(self.path, 'test1', index=False)
# column aliases
col_aliases = Index(['AA', 'X', 'Y', 'Z'])
self.frame2.to_excel(self.path, 'test1', header=col_aliases)
reader = ExcelFile(self.path)
rs = read_excel(reader, 'test1', index_col=0)
xp = self.frame2.copy()
xp.columns = col_aliases
tm.assert_frame_equal(xp, rs)
def test_roundtrip_indexlabels(self, merge_cells, engine, ext):
self.frame['A'][:5] = nan
self.frame.to_excel(self.path, 'test1')
self.frame.to_excel(self.path, 'test1', columns=['A', 'B'])
self.frame.to_excel(self.path, 'test1', header=False)
self.frame.to_excel(self.path, 'test1', index=False)
# test index_label
frame = (DataFrame(np.random.randn(10, 2)) >= 0)
frame.to_excel(self.path, 'test1',
index_label=['test'],
merge_cells=merge_cells)
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1',
index_col=0,
).astype(np.int64)
frame.index.names = ['test']
assert frame.index.names == recons.index.names
frame = (DataFrame(np.random.randn(10, 2)) >= 0)
frame.to_excel(self.path,
'test1',
index_label=['test', 'dummy', 'dummy2'],
merge_cells=merge_cells)
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1',
index_col=0,
).astype(np.int64)
frame.index.names = ['test']
assert frame.index.names == recons.index.names
frame = (DataFrame(np.random.randn(10, 2)) >= 0)
frame.to_excel(self.path,
'test1',
index_label='test',
merge_cells=merge_cells)
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1',
index_col=0,
).astype(np.int64)
frame.index.names = ['test']
tm.assert_frame_equal(frame, recons.astype(bool))
self.frame.to_excel(self.path,
'test1',
columns=['A', 'B', 'C', 'D'],
index=False, merge_cells=merge_cells)
# take 'A' and 'B' as indexes (same row as cols 'C', 'D')
df = self.frame.copy()
df = df.set_index(['A', 'B'])
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1', index_col=[0, 1])
tm.assert_frame_equal(df, recons, check_less_precise=True)
def test_excel_roundtrip_indexname(self, merge_cells, engine, ext):
df = DataFrame(np.random.randn(10, 4))
df.index.name = 'foo'
df.to_excel(self.path, merge_cells=merge_cells)
xf = ExcelFile(self.path)
result = read_excel(xf, xf.sheet_names[0],
index_col=0)
tm.assert_frame_equal(result, df)
assert result.index.name == 'foo'
def test_excel_roundtrip_datetime(self, merge_cells, *_):
# datetime.date, not sure what to test here exactly
tsf = self.tsframe.copy()
tsf.index = [x.date() for x in self.tsframe.index]
tsf.to_excel(self.path, "test1", merge_cells=merge_cells)
reader = ExcelFile(self.path)
recons = read_excel(reader, "test1", index_col=0)
tm.assert_frame_equal(self.tsframe, recons)
def test_excel_date_datetime_format(self, merge_cells, engine, ext):
# see gh-4133
#
# Excel output format strings
df = DataFrame([[date(2014, 1, 31),
date(1999, 9, 24)],
[datetime(1998, 5, 26, 23, 33, 4),
datetime(2014, 2, 28, 13, 5, 13)]],
index=["DATE", "DATETIME"], columns=["X", "Y"])
df_expected = DataFrame([[datetime(2014, 1, 31),
datetime(1999, 9, 24)],
[datetime(1998, 5, 26, 23, 33, 4),
datetime(2014, 2, 28, 13, 5, 13)]],
index=["DATE", "DATETIME"], columns=["X", "Y"])
with ensure_clean(ext) as filename2:
writer1 = ExcelWriter(self.path)
writer2 = ExcelWriter(filename2,
date_format="DD.MM.YYYY",
datetime_format="DD.MM.YYYY HH-MM-SS")
df.to_excel(writer1, "test1")
df.to_excel(writer2, "test1")
writer1.close()
writer2.close()
reader1 = ExcelFile(self.path)
reader2 = ExcelFile(filename2)
rs1 = read_excel(reader1, "test1", index_col=0)
rs2 = read_excel(reader2, "test1", index_col=0)
tm.assert_frame_equal(rs1, rs2)
# Since the reader returns a datetime object for dates,
# we need to use df_expected to check the result.
tm.assert_frame_equal(rs2, df_expected)
def test_to_excel_interval_no_labels(self, *_):
# see gh-19242
#
# Test writing Interval without labels.
frame = DataFrame(np.random.randint(-10, 10, size=(20, 1)),
dtype=np.int64)
expected = frame.copy()
frame["new"] = pd.cut(frame[0], 10)
expected["new"] = pd.cut(expected[0], 10).astype(str)
frame.to_excel(self.path, "test1")
reader = ExcelFile(self.path)
recons = read_excel(reader, "test1", index_col=0)
tm.assert_frame_equal(expected, recons)
def test_to_excel_interval_labels(self, *_):
# see gh-19242
#
# Test writing Interval with labels.
frame = DataFrame(np.random.randint(-10, 10, size=(20, 1)),
dtype=np.int64)
expected = frame.copy()
intervals = pd.cut(frame[0], 10, labels=["A", "B", "C", "D", "E",
"F", "G", "H", "I", "J"])
frame["new"] = intervals
expected["new"] = pd.Series(list(intervals))
frame.to_excel(self.path, "test1")
reader = ExcelFile(self.path)
recons = read_excel(reader, "test1", index_col=0)
tm.assert_frame_equal(expected, recons)
def test_to_excel_timedelta(self, *_):
# see gh-19242, gh-9155
#
# Test writing timedelta to xls.
frame = DataFrame(np.random.randint(-10, 10, size=(20, 1)),
columns=["A"], dtype=np.int64)
expected = frame.copy()
frame["new"] = frame["A"].apply(lambda x: timedelta(seconds=x))
expected["new"] = expected["A"].apply(
lambda x: timedelta(seconds=x).total_seconds() / float(86400))
frame.to_excel(self.path, "test1")
reader = ExcelFile(self.path)
recons = read_excel(reader, "test1", index_col=0)
tm.assert_frame_equal(expected, recons)
def test_to_excel_periodindex(self, merge_cells, engine, ext):
frame = self.tsframe
xp = frame.resample('M', kind='period').mean()
xp.to_excel(self.path, 'sht1')
reader = ExcelFile(self.path)
rs = read_excel(reader, 'sht1', index_col=0)
tm.assert_frame_equal(xp, rs.to_period('M'))
def test_to_excel_multiindex(self, merge_cells, engine, ext):
frame = self.frame
arrays = np.arange(len(frame.index) * 2).reshape(2, -1)
new_index = MultiIndex.from_arrays(arrays,
names=['first', 'second'])
frame.index = new_index
frame.to_excel(self.path, 'test1', header=False)
frame.to_excel(self.path, 'test1', columns=['A', 'B'])
# round trip
frame.to_excel(self.path, 'test1', merge_cells=merge_cells)
reader = ExcelFile(self.path)
df = read_excel(reader, 'test1', index_col=[0, 1])
tm.assert_frame_equal(frame, df)
# GH13511
def test_to_excel_multiindex_nan_label(self, merge_cells, engine, ext):
frame = pd.DataFrame({'A': [None, 2, 3],
'B': [10, 20, 30],
'C': np.random.sample(3)})
frame = frame.set_index(['A', 'B'])
frame.to_excel(self.path, merge_cells=merge_cells)
df = read_excel(self.path, index_col=[0, 1])
tm.assert_frame_equal(frame, df)
# Test for Issue 11328. If column indices are integers, make
# sure they are handled correctly for either setting of
# merge_cells
def test_to_excel_multiindex_cols(self, merge_cells, engine, ext):
frame = self.frame
arrays = np.arange(len(frame.index) * 2).reshape(2, -1)
new_index = MultiIndex.from_arrays(arrays,
names=['first', 'second'])
frame.index = new_index
new_cols_index = MultiIndex.from_tuples([(40, 1), (40, 2),
(50, 1), (50, 2)])
frame.columns = new_cols_index
header = [0, 1]
if not merge_cells:
header = 0
# round trip
frame.to_excel(self.path, 'test1', merge_cells=merge_cells)
reader = ExcelFile(self.path)
df = read_excel(reader, 'test1', header=header,
index_col=[0, 1])
if not merge_cells:
fm = frame.columns.format(sparsify=False,
adjoin=False, names=False)
frame.columns = [".".join(map(str, q)) for q in zip(*fm)]
tm.assert_frame_equal(frame, df)
def test_to_excel_multiindex_dates(self, merge_cells, engine, ext):
# try multiindex with dates
tsframe = self.tsframe.copy()
new_index = [tsframe.index, np.arange(len(tsframe.index))]
tsframe.index = MultiIndex.from_arrays(new_index)
tsframe.index.names = ['time', 'foo']
tsframe.to_excel(self.path, 'test1', merge_cells=merge_cells)
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1',
index_col=[0, 1])
tm.assert_frame_equal(tsframe, recons)
assert recons.index.names == ('time', 'foo')
def test_to_excel_multiindex_no_write_index(self, merge_cells, engine,
ext):
# Test writing and re-reading a MI witout the index. GH 5616.
# Initial non-MI frame.
frame1 = DataFrame({'a': [10, 20], 'b': [30, 40], 'c': [50, 60]})
# Add a MI.
frame2 = frame1.copy()
multi_index = MultiIndex.from_tuples([(70, 80), (90, 100)])
frame2.index = multi_index
# Write out to Excel without the index.
frame2.to_excel(self.path, 'test1', index=False)
# Read it back in.
reader = ExcelFile(self.path)
frame3 = read_excel(reader, 'test1')
# Test that it is the same as the initial frame.
tm.assert_frame_equal(frame1, frame3)
def test_to_excel_float_format(self, *_):
df = DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=["A", "B"], columns=["X", "Y", "Z"])
df.to_excel(self.path, "test1", float_format="%.2f")
reader = ExcelFile(self.path)
result = read_excel(reader, "test1", index_col=0)
expected = DataFrame([[0.12, 0.23, 0.57],
[12.32, 123123.20, 321321.20]],
index=["A", "B"], columns=["X", "Y", "Z"])
tm.assert_frame_equal(result, expected)
def test_to_excel_output_encoding(self, merge_cells, engine, ext):
# Avoid mixed inferred_type.
df = DataFrame([[u"\u0192", u"\u0193", u"\u0194"],
[u"\u0195", u"\u0196", u"\u0197"]],
index=[u"A\u0192", u"B"],
columns=[u"X\u0193", u"Y", u"Z"])
with ensure_clean("__tmp_to_excel_float_format__." + ext) as filename:
df.to_excel(filename, sheet_name="TestSheet", encoding="utf8")
result = read_excel(filename, "TestSheet",
encoding="utf8", index_col=0)
tm.assert_frame_equal(result, df)
def test_to_excel_unicode_filename(self, merge_cells, engine, ext):
with ensure_clean(u("\u0192u.") + ext) as filename:
try:
f = open(filename, "wb")
except UnicodeEncodeError:
pytest.skip("No unicode file names on this system")
else:
f.close()
df = DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=["A", "B"], columns=["X", "Y", "Z"])
df.to_excel(filename, "test1", float_format="%.2f")
reader = ExcelFile(filename)
result = read_excel(reader, "test1", index_col=0)
expected = DataFrame([[0.12, 0.23, 0.57],
[12.32, 123123.20, 321321.20]],
index=["A", "B"], columns=["X", "Y", "Z"])
tm.assert_frame_equal(result, expected)
# def test_to_excel_header_styling_xls(self, merge_cells, engine, ext):
# import StringIO
# s = StringIO(
# """Date,ticker,type,value
# 2001-01-01,x,close,12.2
# 2001-01-01,x,open ,12.1
# 2001-01-01,y,close,12.2
# 2001-01-01,y,open ,12.1
# 2001-02-01,x,close,12.2
# 2001-02-01,x,open ,12.1
# 2001-02-01,y,close,12.2
# 2001-02-01,y,open ,12.1
# 2001-03-01,x,close,12.2
# 2001-03-01,x,open ,12.1
# 2001-03-01,y,close,12.2
# 2001-03-01,y,open ,12.1""")
# df = read_csv(s, parse_dates=["Date"])
# pdf = df.pivot_table(values="value", rows=["ticker"],
# cols=["Date", "type"])
# try:
# import xlwt
# import xlrd
# except ImportError:
# pytest.skip
# filename = '__tmp_to_excel_header_styling_xls__.xls'
# pdf.to_excel(filename, 'test1')
# wbk = xlrd.open_workbook(filename,
# formatting_info=True)
# assert ["test1"] == wbk.sheet_names()
# ws = wbk.sheet_by_name('test1')
# assert [(0, 1, 5, 7), (0, 1, 3, 5), (0, 1, 1, 3)] == ws.merged_cells
# for i in range(0, 2):
# for j in range(0, 7):
# xfx = ws.cell_xf_index(0, 0)
# cell_xf = wbk.xf_list[xfx]
# font = wbk.font_list
# assert 1 == font[cell_xf.font_index].bold
# assert 1 == cell_xf.border.top_line_style
# assert 1 == cell_xf.border.right_line_style
# assert 1 == cell_xf.border.bottom_line_style
# assert 1 == cell_xf.border.left_line_style
# assert 2 == cell_xf.alignment.hor_align
# os.remove(filename)
# def test_to_excel_header_styling_xlsx(self, merge_cells, engine, ext):
# import StringIO
# s = StringIO(
# """Date,ticker,type,value
# 2001-01-01,x,close,12.2
# 2001-01-01,x,open ,12.1
# 2001-01-01,y,close,12.2
# 2001-01-01,y,open ,12.1
# 2001-02-01,x,close,12.2
# 2001-02-01,x,open ,12.1
# 2001-02-01,y,close,12.2
# 2001-02-01,y,open ,12.1
# 2001-03-01,x,close,12.2
# 2001-03-01,x,open ,12.1
# 2001-03-01,y,close,12.2
# 2001-03-01,y,open ,12.1""")
# df = read_csv(s, parse_dates=["Date"])
# pdf = df.pivot_table(values="value", rows=["ticker"],
# cols=["Date", "type"])
# try:
# import openpyxl
# from openpyxl.cell import get_column_letter
# except ImportError:
# pytest.skip
# if openpyxl.__version__ < '1.6.1':
# pytest.skip
# # test xlsx_styling
# filename = '__tmp_to_excel_header_styling_xlsx__.xlsx'
# pdf.to_excel(filename, 'test1')
# wbk = openpyxl.load_workbook(filename)
# assert ["test1"] == wbk.get_sheet_names()
# ws = wbk.get_sheet_by_name('test1')
# xlsaddrs = ["%s2" % chr(i) for i in range(ord('A'), ord('H'))]
# xlsaddrs += ["A%s" % i for i in range(1, 6)]
# xlsaddrs += ["B1", "D1", "F1"]
# for xlsaddr in xlsaddrs:
# cell = ws.cell(xlsaddr)
# assert cell.style.font.bold
# assert (openpyxl.style.Border.BORDER_THIN ==
# cell.style.borders.top.border_style)
# assert (openpyxl.style.Border.BORDER_THIN ==
# cell.style.borders.right.border_style)
# assert (openpyxl.style.Border.BORDER_THIN ==
# cell.style.borders.bottom.border_style)
# assert (openpyxl.style.Border.BORDER_THIN ==
# cell.style.borders.left.border_style)
# assert (openpyxl.style.Alignment.HORIZONTAL_CENTER ==
# cell.style.alignment.horizontal)
# mergedcells_addrs = ["C1", "E1", "G1"]
# for maddr in mergedcells_addrs:
# assert ws.cell(maddr).merged
# os.remove(filename)
@pytest.mark.parametrize("use_headers", [True, False])
@pytest.mark.parametrize("r_idx_nlevels", [1, 2, 3])
@pytest.mark.parametrize("c_idx_nlevels", [1, 2, 3])
def test_excel_010_hemstring(self, merge_cells, engine, ext,
c_idx_nlevels, r_idx_nlevels, use_headers):
def roundtrip(data, header=True, parser_hdr=0, index=True):
data.to_excel(self.path, header=header,
merge_cells=merge_cells, index=index)
xf = ExcelFile(self.path)
return read_excel(xf, xf.sheet_names[0], header=parser_hdr)
# Basic test.
parser_header = 0 if use_headers else None
res = roundtrip(DataFrame([0]), use_headers, parser_header)
assert res.shape == (1, 2)
assert res.iloc[0, 0] is not np.nan
# More complex tests with multi-index.
nrows = 5
ncols = 3
from pandas.util.testing import makeCustomDataframe as mkdf
# ensure limited functionality in 0.10
# override of gh-2370 until sorted out in 0.11
df = mkdf(nrows, ncols, r_idx_nlevels=r_idx_nlevels,
c_idx_nlevels=c_idx_nlevels)
# This if will be removed once multi-column Excel writing
# is implemented. For now fixing gh-9794.
if c_idx_nlevels > 1:
with pytest.raises(NotImplementedError):
roundtrip(df, use_headers, index=False)
else:
res = roundtrip(df, use_headers)
if use_headers:
assert res.shape == (nrows, ncols + r_idx_nlevels)
else:
# First row taken as columns.
assert res.shape == (nrows - 1, ncols + r_idx_nlevels)
# No NaNs.
for r in range(len(res.index)):
for c in range(len(res.columns)):
assert res.iloc[r, c] is not np.nan
def test_duplicated_columns(self, *_):
# see gh-5235
df = DataFrame([[1, 2, 3], [1, 2, 3], [1, 2, 3]],
columns=["A", "B", "B"])
df.to_excel(self.path, "test1")
expected = DataFrame([[1, 2, 3], [1, 2, 3], [1, 2, 3]],
columns=["A", "B", "B.1"])
# By default, we mangle.
result = read_excel(self.path, "test1", index_col=0)
tm.assert_frame_equal(result, expected)
# Explicitly, we pass in the parameter.
result = read_excel(self.path, "test1", index_col=0,
mangle_dupe_cols=True)
tm.assert_frame_equal(result, expected)
# see gh-11007, gh-10970
df = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]],
columns=["A", "B", "A", "B"])
df.to_excel(self.path, "test1")
result = read_excel(self.path, "test1", index_col=0)
expected = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]],
columns=["A", "B", "A.1", "B.1"])
tm.assert_frame_equal(result, expected)
# see gh-10982
df.to_excel(self.path, "test1", index=False, header=False)
result = read_excel(self.path, "test1", header=None)
expected = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]])
tm.assert_frame_equal(result, expected)
msg = "Setting mangle_dupe_cols=False is not supported yet"
with pytest.raises(ValueError, match=msg):
read_excel(self.path, "test1", header=None, mangle_dupe_cols=False)
def test_swapped_columns(self, merge_cells, engine, ext):
# Test for issue #5427.
write_frame = DataFrame({'A': [1, 1, 1],
'B': [2, 2, 2]})
write_frame.to_excel(self.path, 'test1', columns=['B', 'A'])
read_frame = read_excel(self.path, 'test1', header=0)
tm.assert_series_equal(write_frame['A'], read_frame['A'])
tm.assert_series_equal(write_frame['B'], read_frame['B'])
def test_invalid_columns(self, *_):
# see gh-10982
write_frame = DataFrame({"A": [1, 1, 1],
"B": [2, 2, 2]})
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
write_frame.to_excel(self.path, "test1", columns=["B", "C"])
expected = write_frame.reindex(columns=["B", "C"])
read_frame = read_excel(self.path, "test1", index_col=0)
tm.assert_frame_equal(expected, read_frame)
with pytest.raises(KeyError):
write_frame.to_excel(self.path, "test1", columns=["C", "D"])
def test_comment_arg(self, *_):
# see gh-18735
#
# Test the comment argument functionality to read_excel.
# Create file to read in.
df = DataFrame({"A": ["one", "#one", "one"],
"B": ["two", "two", "#two"]})
df.to_excel(self.path, "test_c")
# Read file without comment arg.
result1 = read_excel(self.path, "test_c", index_col=0)
result1.iloc[1, 0] = None
result1.iloc[1, 1] = None
result1.iloc[2, 1] = None
result2 = read_excel(self.path, "test_c", comment="#", index_col=0)
tm.assert_frame_equal(result1, result2)
def test_comment_default(self, merge_cells, engine, ext):
# Re issue #18735
# Test the comment argument default to read_excel
# Create file to read in
df = DataFrame({'A': ['one', '#one', 'one'],
'B': ['two', 'two', '#two']})
df.to_excel(self.path, 'test_c')
# Read file with default and explicit comment=None
result1 = read_excel(self.path, 'test_c')
result2 = read_excel(self.path, 'test_c', comment=None)
tm.assert_frame_equal(result1, result2)
def test_comment_used(self, *_):
# see gh-18735
#
# Test the comment argument is working as expected when used.
# Create file to read in.
df = DataFrame({"A": ["one", "#one", "one"],
"B": ["two", "two", "#two"]})
df.to_excel(self.path, "test_c")
# Test read_frame_comment against manually produced expected output.
expected = DataFrame({"A": ["one", None, "one"],
"B": ["two", None, None]})
result = read_excel(self.path, "test_c", comment="#", index_col=0)
tm.assert_frame_equal(result, expected)
def test_comment_empty_line(self, merge_cells, engine, ext):
# Re issue #18735
# Test that read_excel ignores commented lines at the end of file
df = DataFrame({'a': ['1', '#2'], 'b': ['2', '3']})
df.to_excel(self.path, index=False)
# Test that all-comment lines at EoF are ignored
expected = DataFrame({'a': [1], 'b': [2]})
result = read_excel(self.path, comment='#')
tm.assert_frame_equal(result, expected)
def test_datetimes(self, merge_cells, engine, ext):
# Test writing and reading datetimes. For issue #9139. (xref #9185)
datetimes = [datetime(2013, 1, 13, 1, 2, 3),
datetime(2013, 1, 13, 2, 45, 56),
datetime(2013, 1, 13, 4, 29, 49),
datetime(2013, 1, 13, 6, 13, 42),
datetime(2013, 1, 13, 7, 57, 35),
datetime(2013, 1, 13, 9, 41, 28),
datetime(2013, 1, 13, 11, 25, 21),
datetime(2013, 1, 13, 13, 9, 14),
datetime(2013, 1, 13, 14, 53, 7),
datetime(2013, 1, 13, 16, 37, 0),
datetime(2013, 1, 13, 18, 20, 52)]
write_frame = DataFrame({'A': datetimes})
write_frame.to_excel(self.path, 'Sheet1')
read_frame = read_excel(self.path, 'Sheet1', header=0)
tm.assert_series_equal(write_frame['A'], read_frame['A'])
def test_bytes_io(self, merge_cells, engine, ext):
# see gh-7074
bio = BytesIO()
df = DataFrame(np.random.randn(10, 2))
# Pass engine explicitly, as there is no file path to infer from.
writer = ExcelWriter(bio, engine=engine)
df.to_excel(writer)
writer.save()
bio.seek(0)
reread_df = read_excel(bio, index_col=0)
tm.assert_frame_equal(df, reread_df)
def test_write_lists_dict(self, *_):
# see gh-8188.
df = DataFrame({"mixed": ["a", ["b", "c"], {"d": "e", "f": 2}],
"numeric": [1, 2, 3.0],
"str": ["apple", "banana", "cherry"]})
df.to_excel(self.path, "Sheet1")
read = read_excel(self.path, "Sheet1", header=0, index_col=0)
expected = df.copy()
expected.mixed = expected.mixed.apply(str)
expected.numeric = expected.numeric.astype("int64")
tm.assert_frame_equal(read, expected)
def test_true_and_false_value_options(self, *_):
# see gh-13347
df = pd.DataFrame([["foo", "bar"]], columns=["col1", "col2"])
expected = df.replace({"foo": True, "bar": False})
df.to_excel(self.path)
read_frame = read_excel(self.path, true_values=["foo"],
false_values=["bar"], index_col=0)
tm.assert_frame_equal(read_frame, expected)
def test_freeze_panes(self, *_):
# see gh-15160
expected = DataFrame([[1, 2], [3, 4]], columns=["col1", "col2"])
expected.to_excel(self.path, "Sheet1", freeze_panes=(1, 1))
result = read_excel(self.path, index_col=0)
tm.assert_frame_equal(result, expected)
def test_path_path_lib(self, merge_cells, engine, ext):
df = tm.makeDataFrame()
writer = partial(df.to_excel, engine=engine)
reader = partial(pd.read_excel, index_col=0)
result = tm.round_trip_pathlib(writer, reader,
path="foo.{ext}".format(ext=ext))
tm.assert_frame_equal(result, df)
def test_path_local_path(self, merge_cells, engine, ext):
df = tm.makeDataFrame()
writer = partial(df.to_excel, engine=engine)
reader = partial(pd.read_excel, index_col=0)
result = tm.round_trip_pathlib(writer, reader,
path="foo.{ext}".format(ext=ext))
tm.assert_frame_equal(result, df)
@td.skip_if_no('openpyxl')
@pytest.mark.parametrize("merge_cells,ext,engine", [
(None, '.xlsx', 'openpyxl')])
class TestOpenpyxlTests(_WriterBase):
def test_to_excel_styleconverter(self, merge_cells, ext, engine):
from openpyxl import styles
hstyle = {
"font": {
"color": '00FF0000',
"bold": True,
},
"borders": {
"top": "thin",
"right": "thin",
"bottom": "thin",
"left": "thin",
},
"alignment": {
"horizontal": "center",
"vertical": "top",
},
"fill": {
"patternType": 'solid',
'fgColor': {
'rgb': '006666FF',
'tint': 0.3,
},
},
"number_format": {
"format_code": "0.00"
},
"protection": {
"locked": True,
"hidden": False,
},
}
font_color = styles.Color('00FF0000')
font = styles.Font(bold=True, color=font_color)
side = styles.Side(style=styles.borders.BORDER_THIN)
border = styles.Border(top=side, right=side, bottom=side, left=side)
alignment = styles.Alignment(horizontal='center', vertical='top')
fill_color = styles.Color(rgb='006666FF', tint=0.3)
fill = styles.PatternFill(patternType='solid', fgColor=fill_color)
number_format = '0.00'
protection = styles.Protection(locked=True, hidden=False)
kw = _OpenpyxlWriter._convert_to_style_kwargs(hstyle)
assert kw['font'] == font
assert kw['border'] == border
assert kw['alignment'] == alignment
assert kw['fill'] == fill
assert kw['number_format'] == number_format
assert kw['protection'] == protection
def test_write_cells_merge_styled(self, merge_cells, ext, engine):
from pandas.io.formats.excel import ExcelCell
sheet_name = 'merge_styled'
sty_b1 = {'font': {'color': '00FF0000'}}
sty_a2 = {'font': {'color': '0000FF00'}}
initial_cells = [
ExcelCell(col=1, row=0, val=42, style=sty_b1),
ExcelCell(col=0, row=1, val=99, style=sty_a2),
]
sty_merged = {'font': {'color': '000000FF', 'bold': True}}
sty_kwargs = _OpenpyxlWriter._convert_to_style_kwargs(sty_merged)
openpyxl_sty_merged = sty_kwargs['font']
merge_cells = [
ExcelCell(col=0, row=0, val='pandas',
mergestart=1, mergeend=1, style=sty_merged),
]
with ensure_clean(ext) as path:
writer = _OpenpyxlWriter(path)
writer.write_cells(initial_cells, sheet_name=sheet_name)
writer.write_cells(merge_cells, sheet_name=sheet_name)
wks = writer.sheets[sheet_name]
xcell_b1 = wks['B1']
xcell_a2 = wks['A2']
assert xcell_b1.font == openpyxl_sty_merged
assert xcell_a2.font == openpyxl_sty_merged
@pytest.mark.parametrize("mode,expected", [
('w', ['baz']), ('a', ['foo', 'bar', 'baz'])])
def test_write_append_mode(self, merge_cells, ext, engine, mode, expected):
import openpyxl
df = DataFrame([1], columns=['baz'])
with ensure_clean(ext) as f:
wb = openpyxl.Workbook()
wb.worksheets[0].title = 'foo'
wb.worksheets[0]['A1'].value = 'foo'
wb.create_sheet('bar')
wb.worksheets[1]['A1'].value = 'bar'
wb.save(f)
writer = ExcelWriter(f, engine=engine, mode=mode)
df.to_excel(writer, sheet_name='baz', index=False)
writer.save()
wb2 = openpyxl.load_workbook(f)
result = [sheet.title for sheet in wb2.worksheets]
assert result == expected
for index, cell_value in enumerate(expected):
assert wb2.worksheets[index]['A1'].value == cell_value
@td.skip_if_no('xlwt')
@pytest.mark.parametrize("merge_cells,ext,engine", [
(None, '.xls', 'xlwt')])
class TestXlwtTests(_WriterBase):
def test_excel_raise_error_on_multiindex_columns_and_no_index(
self, merge_cells, ext, engine):
# MultiIndex as columns is not yet implemented 9794
cols = MultiIndex.from_tuples([('site', ''),
('2014', 'height'),
('2014', 'weight')])
df = DataFrame(np.random.randn(10, 3), columns=cols)
with pytest.raises(NotImplementedError):
with ensure_clean(ext) as path:
df.to_excel(path, index=False)
def test_excel_multiindex_columns_and_index_true(self, merge_cells, ext,
engine):
cols = MultiIndex.from_tuples([('site', ''),
('2014', 'height'),
('2014', 'weight')])
df = pd.DataFrame(np.random.randn(10, 3), columns=cols)
with ensure_clean(ext) as path:
df.to_excel(path, index=True)
def test_excel_multiindex_index(self, merge_cells, ext, engine):
# MultiIndex as index works so assert no error #9794
cols = MultiIndex.from_tuples([('site', ''),
('2014', 'height'),
('2014', 'weight')])
df = DataFrame(np.random.randn(3, 10), index=cols)
with ensure_clean(ext) as path:
df.to_excel(path, index=False)
def test_to_excel_styleconverter(self, merge_cells, ext, engine):
import xlwt
hstyle = {"font": {"bold": True},
"borders": {"top": "thin",
"right": "thin",
"bottom": "thin",
"left": "thin"},
"alignment": {"horizontal": "center", "vertical": "top"}}
xls_style = _XlwtWriter._convert_to_style(hstyle)
assert xls_style.font.bold
assert xlwt.Borders.THIN == xls_style.borders.top
assert xlwt.Borders.THIN == xls_style.borders.right
assert xlwt.Borders.THIN == xls_style.borders.bottom
assert xlwt.Borders.THIN == xls_style.borders.left
assert xlwt.Alignment.HORZ_CENTER == xls_style.alignment.horz
assert xlwt.Alignment.VERT_TOP == xls_style.alignment.vert
def test_write_append_mode_raises(self, merge_cells, ext, engine):
msg = "Append mode is not supported with xlwt!"
with ensure_clean(ext) as f:
with pytest.raises(ValueError, match=msg):
ExcelWriter(f, engine=engine, mode='a')
@td.skip_if_no('xlsxwriter')
@pytest.mark.parametrize("merge_cells,ext,engine", [
(None, '.xlsx', 'xlsxwriter')])
class TestXlsxWriterTests(_WriterBase):
@td.skip_if_no('openpyxl')
def test_column_format(self, merge_cells, ext, engine):
# Test that column formats are applied to cells. Test for issue #9167.
# Applicable to xlsxwriter only.
with warnings.catch_warnings():
# Ignore the openpyxl lxml warning.
warnings.simplefilter("ignore")
import openpyxl
with ensure_clean(ext) as path:
frame = DataFrame({'A': [123456, 123456],
'B': [123456, 123456]})
writer = ExcelWriter(path)
frame.to_excel(writer)
# Add a number format to col B and ensure it is applied to cells.
num_format = '#,##0'
write_workbook = writer.book
write_worksheet = write_workbook.worksheets()[0]
col_format = write_workbook.add_format({'num_format': num_format})
write_worksheet.set_column('B:B', None, col_format)
writer.save()
read_workbook = openpyxl.load_workbook(path)
try:
read_worksheet = read_workbook['Sheet1']
except TypeError:
# compat
read_worksheet = read_workbook.get_sheet_by_name(name='Sheet1')
# Get the number format from the cell.
try:
cell = read_worksheet['B2']
except TypeError:
# compat
cell = read_worksheet.cell('B2')
try:
read_num_format = cell.number_format
except Exception:
read_num_format = cell.style.number_format._format_code
assert read_num_format == num_format
def test_write_append_mode_raises(self, merge_cells, ext, engine):
msg = "Append mode is not supported with xlsxwriter!"
with ensure_clean(ext) as f:
with pytest.raises(ValueError, match=msg):
ExcelWriter(f, engine=engine, mode='a')
class TestExcelWriterEngineTests(object):
@pytest.mark.parametrize('klass,ext', [
pytest.param(_XlsxWriter, '.xlsx', marks=pytest.mark.skipif(
not td.safe_import('xlsxwriter'), reason='No xlsxwriter')),
pytest.param(_OpenpyxlWriter, '.xlsx', marks=pytest.mark.skipif(
not td.safe_import('openpyxl'), reason='No openpyxl')),
pytest.param(_XlwtWriter, '.xls', marks=pytest.mark.skipif(
not td.safe_import('xlwt'), reason='No xlwt'))
])
def test_ExcelWriter_dispatch(self, klass, ext):
with ensure_clean(ext) as path:
writer = ExcelWriter(path)
if ext == '.xlsx' and td.safe_import('xlsxwriter'):
# xlsxwriter has preference over openpyxl if both installed
assert isinstance(writer, _XlsxWriter)
else:
assert isinstance(writer, klass)
def test_ExcelWriter_dispatch_raises(self):
with pytest.raises(ValueError, match='No engine'):
ExcelWriter('nothing')
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
def test_register_writer(self):
# some awkward mocking to test out dispatch and such actually works
called_save = []
called_write_cells = []
class DummyClass(ExcelWriter):
called_save = False
called_write_cells = False
supported_extensions = ['xlsx', 'xls']
engine = 'dummy'
def save(self):
called_save.append(True)
def write_cells(self, *args, **kwargs):
called_write_cells.append(True)
def check_called(func):
func()
assert len(called_save) >= 1
assert len(called_write_cells) >= 1
del called_save[:]
del called_write_cells[:]
with pd.option_context('io.excel.xlsx.writer', 'dummy'):
register_writer(DummyClass)
writer = ExcelWriter('something.xlsx')
assert isinstance(writer, DummyClass)
df = tm.makeCustomDataframe(1, 1)
check_called(lambda: df.to_excel('something.xlsx'))
check_called(
lambda: df.to_excel(
'something.xls', engine='dummy'))
@pytest.mark.parametrize('engine', [
pytest.param('xlwt',
marks=pytest.mark.xfail(reason='xlwt does not support '
'openpyxl-compatible '
'style dicts')),
'xlsxwriter',
'openpyxl',
])
def test_styler_to_excel(engine):
def style(df):
# XXX: RGB colors not supported in xlwt
return DataFrame([['font-weight: bold', '', ''],
['', 'color: blue', ''],
['', '', 'text-decoration: underline'],
['border-style: solid', '', ''],
['', 'font-style: italic', ''],
['', '', 'text-align: right'],
['background-color: red', '', ''],
['number-format: 0%', '', ''],
['', '', ''],
['', '', ''],
['', '', '']],
index=df.index, columns=df.columns)
def assert_equal_style(cell1, cell2, engine):
if engine in ['xlsxwriter', 'openpyxl']:
pytest.xfail(reason=("GH25351: failing on some attribute "
"comparisons in {}".format(engine)))
# XXX: should find a better way to check equality
assert cell1.alignment.__dict__ == cell2.alignment.__dict__
assert cell1.border.__dict__ == cell2.border.__dict__
assert cell1.fill.__dict__ == cell2.fill.__dict__
assert cell1.font.__dict__ == cell2.font.__dict__
assert cell1.number_format == cell2.number_format
assert cell1.protection.__dict__ == cell2.protection.__dict__
def custom_converter(css):
# use bold iff there is custom style attached to the cell
if css.strip(' \n;'):
return {'font': {'bold': True}}
return {}
pytest.importorskip('jinja2')
pytest.importorskip(engine)
# Prepare spreadsheets
df = DataFrame(np.random.randn(11, 3))
with ensure_clean('.xlsx' if engine != 'xlwt' else '.xls') as path:
writer = ExcelWriter(path, engine=engine)
df.to_excel(writer, sheet_name='frame')
df.style.to_excel(writer, sheet_name='unstyled')
styled = df.style.apply(style, axis=None)
styled.to_excel(writer, sheet_name='styled')
ExcelFormatter(styled, style_converter=custom_converter).write(
writer, sheet_name='custom')
writer.save()
if engine not in ('openpyxl', 'xlsxwriter'):
# For other engines, we only smoke test
return
openpyxl = pytest.importorskip('openpyxl')
wb = openpyxl.load_workbook(path)
# (1) compare DataFrame.to_excel and Styler.to_excel when unstyled
n_cells = 0
for col1, col2 in zip(wb['frame'].columns,
wb['unstyled'].columns):
assert len(col1) == len(col2)
for cell1, cell2 in zip(col1, col2):
assert cell1.value == cell2.value
assert_equal_style(cell1, cell2, engine)
n_cells += 1
# ensure iteration actually happened:
assert n_cells == (11 + 1) * (3 + 1)
# (2) check styling with default converter
# XXX: openpyxl (as at 2.4) prefixes colors with 00, xlsxwriter with FF
alpha = '00' if engine == 'openpyxl' else 'FF'
n_cells = 0
for col1, col2 in zip(wb['frame'].columns,
wb['styled'].columns):
assert len(col1) == len(col2)
for cell1, cell2 in zip(col1, col2):
ref = '%s%d' % (cell2.column, cell2.row)
# XXX: this isn't as strong a test as ideal; we should
# confirm that differences are exclusive
if ref == 'B2':
assert not cell1.font.bold
assert cell2.font.bold
elif ref == 'C3':
assert cell1.font.color.rgb != cell2.font.color.rgb
assert cell2.font.color.rgb == alpha + '0000FF'
elif ref == 'D4':
# This fails with engine=xlsxwriter due to
# https://bitbucket.org/openpyxl/openpyxl/issues/800
if engine == 'xlsxwriter' \
and (LooseVersion(openpyxl.__version__) <
LooseVersion('2.4.6')):
pass
else:
assert cell1.font.underline != cell2.font.underline
assert cell2.font.underline == 'single'
elif ref == 'B5':
assert not cell1.border.left.style
assert (cell2.border.top.style ==
cell2.border.right.style ==
cell2.border.bottom.style ==
cell2.border.left.style ==
'medium')
elif ref == 'C6':
assert not cell1.font.italic
assert cell2.font.italic
elif ref == 'D7':
assert (cell1.alignment.horizontal !=
cell2.alignment.horizontal)
assert cell2.alignment.horizontal == 'right'
elif ref == 'B8':
assert cell1.fill.fgColor.rgb != cell2.fill.fgColor.rgb
assert cell1.fill.patternType != cell2.fill.patternType
assert cell2.fill.fgColor.rgb == alpha + 'FF0000'
assert cell2.fill.patternType == 'solid'
elif ref == 'B9':
assert cell1.number_format == 'General'
assert cell2.number_format == '0%'
else:
assert_equal_style(cell1, cell2, engine)
assert cell1.value == cell2.value
n_cells += 1
assert n_cells == (11 + 1) * (3 + 1)
# (3) check styling with custom converter
n_cells = 0
for col1, col2 in zip(wb['frame'].columns,
wb['custom'].columns):
assert len(col1) == len(col2)
for cell1, cell2 in zip(col1, col2):
ref = '%s%d' % (cell2.column, cell2.row)
if ref in ('B2', 'C3', 'D4', 'B5', 'C6', 'D7', 'B8', 'B9'):
assert not cell1.font.bold
assert cell2.font.bold
else:
assert_equal_style(cell1, cell2, engine)
assert cell1.value == cell2.value
n_cells += 1
assert n_cells == (11 + 1) * (3 + 1)
@td.skip_if_no('openpyxl')
@pytest.mark.skipif(not PY36, reason='requires fspath')
class TestFSPath(object):
def test_excelfile_fspath(self):
with tm.ensure_clean('foo.xlsx') as path:
df = DataFrame({"A": [1, 2]})
df.to_excel(path)
xl = ExcelFile(path)
result = os.fspath(xl)
assert result == path
def test_excelwriter_fspath(self):
with tm.ensure_clean('foo.xlsx') as path:
writer = ExcelWriter(path)
assert os.fspath(writer) == str(path)
|
the-stack_106_18414
|
import sys
import os
import logging
import codecs
from ConfigParser import SafeConfigParser
import ConfigParser
class monitor_configuration():
def __init__(self, configuration_file):
self.folder_localinbox = None
self.folder_localoutbox = None
self.folder_remoteinbox = None
self.folder_remoteoutbox = None
self.folder_remoteorphan = None
self.folder_hl7flag = None
self.folder_ack1flag = None
self.folder_ack2flag = None
self.folder_ack3flag = None
self.folder_tobedeleted = None
self.folder_logs = None
self.sleeptime = 100
self.operation_method_is_copy = False
self.operation_method_is_write = False
self.operation_method_is_move = False
self.operation_delay = 0.1
self.recheck_content = False
self.hl7_operation_method_is_copy = False
self.hl7_operation_method_is_move = False
self.hl7_operation_delay = 0.1
self.hl7_operation_shell_commands = []
self.ack_operation_shell_commands = []
self.validate_configuration(configuration_file)
def __get_option_list(self, parser, section, option_prefix, range_index):
option_list = []
try:
for index in range(range_index):
option_name = option_prefix + "_" + str(index)
option_value = parser.get(section, option_name)
if option_value:
option_value = option_value.strip()
if len(option_value)>0:
option_list.append(option_value)
except ConfigParser.NoOptionError:
return option_list
return option_list
def __get_log_option(self, log_str, default_log):
log_dict = {
'DEBUG':logging.DEBUG,
'INFO':logging.INFO,
'WARNING':logging.WARNING,
'ERROR':logging.ERROR,
'CRITICAL':logging.CRITICAL
}
return log_dict.get(log_str, default_log)
def validate_configuration(self, configuration_file):
parser = SafeConfigParser()
with codecs.open(configuration_file,'r', encoding='utf-8') as cf:
parser.readfp(cf)
self.folder_localinbox = parser.get('General','folder_localinbox')
self.folder_localoutbox = parser.get('General','folder_localoutbox')
self.folder_remoteinbox = parser.get('General','folder_remoteinbox')
self.folder_remoteoutbox = parser.get('General','folder_remoteoutbox')
self.folder_remoteorphan = parser.get('General','folder_remoteorphan')
self.folder_hl7flag = parser.get('General','folder_hl7flag')
self.folder_ack1flag = parser.get('General','folder_ack1flag')
self.folder_ack2flag = parser.get('General','folder_ack2flag')
self.folder_ack3flag = parser.get('General','folder_ack3flag')
self.folder_tobedeleted = parser.get('General','folder_tobedeleted')
self.folder_logs = parser.get('General', 'folder_logs')
self.stdout_log = self.__get_log_option(parser.get('General', 'stdout_log'), logging.INFO)
self.all_file_log = self.__get_log_option(parser.get('General', 'all_file_log'), logging.DEBUG)
# ack operation parameters
operation_method = parser.get('General','operation_method')
if operation_method:
operation_method = operation_method.upper()
else:
operation_method = 'COPY'
if operation_method=='COPY':
self.operation_method_is_copy = True
elif operation_method =='WRITE':
self.operation_method_is_write = True
else:
self.operation_method_is_move = True
operation_delay = parser.get('General', 'operation_delay')
try:
self.operation_delay = float(operation_delay)
except:
self.operation_delay = 0.1
recheck_content = parser.get('General', 'recheck_content')
if recheck_content and recheck_content.upper() == 'TRUE':
self.recheck_content = True
else:
self.recheck_content = False
# HL7 operation parameters
hl7_operation_method = parser.get('General', 'hl7_operation_method')
if hl7_operation_method:
hl7_operation_method = hl7_operation_method.upper()
else:
hl7_operation_method = 'COPY'
if hl7_operation_method == 'COPY':
self.hl7_operation_method_is_copy = True
else:
self.hl7_operation_method_is_move = True
hl7_operation_delay = parser.get('General', 'hl7_operation_delay')
try:
self.hl7_operation_delay = float(hl7_operation_delay)
except:
self.hl7_operation_delay = 0.1
self.sleeptime = int(parser.get('General', 'sleeptime'))
#HL7 shell command
self.hl7_operation_shell_commands = self.__get_option_list(parser, "General",
"hl7_operation_shell_command", 20)
self.ack_operation_shell_commands = self.__get_option_list(parser, "General",
"ack_operation_shell_command", 20)
tmp_folder = self.folder_localinbox
if not os.path.exists(tmp_folder):
os.makedirs(tmp_folder)
tmp_folder = self.folder_localoutbox
if not os.path.exists(tmp_folder):
os.makedirs(tmp_folder)
tmp_folder = self.folder_remoteinbox
if not os.path.exists(tmp_folder):
os.makedirs(tmp_folder)
tmp_folder = self.folder_remoteoutbox
if not os.path.exists(tmp_folder):
os.makedirs(tmp_folder)
tmp_folder = self.folder_remoteorphan
if not os.path.exists(tmp_folder):
os.makedirs(tmp_folder)
tmp_folder = self.folder_hl7flag
if not os.path.exists(tmp_folder):
os.makedirs(tmp_folder)
tmp_folder = self.folder_ack1flag
if not os.path.exists(tmp_folder):
os.makedirs(tmp_folder)
tmp_folder = self.folder_ack2flag
if not os.path.exists(tmp_folder):
os.makedirs(tmp_folder)
tmp_folder = self.folder_ack3flag
if not os.path.exists(tmp_folder):
os.makedirs(tmp_folder)
tmp_folder = self.folder_tobedeleted
if not os.path.exists(tmp_folder):
os.makedirs(tmp_folder)
tmp_folder = self.folder_logs
if not os.path.exists(tmp_folder):
os.makedirs(tmp_folder)
if __name__=='__main__':
my_config = monitor_configuration(r'../sample/sample_config.ini')
print(my_config.folder_localinbox)
|
the-stack_106_18415
|
import bme280
from smbus2 import SMBus
import time
import config#config file
import RPi.GPIO as GPIO# this is used for the ligts
from datetime import datetime
if config.ParticulateStatus==1:#we'll only import the sps30 stuff if it is selected in the config file.
from sps30 import SPS30
'''
The Sensors_Functions.py program is in charge of taking readings from the sensors.
For instance, if we would print Temperature() in the console, it would return a temperature reading at the moment it was run.
Similar functions exist for each sensor. At the end of the file, we have the header and the data functions which properly format everything for a .csv file.
The header function generates a header depending on the sensors chosen in the config.py file.
And for the data function, it generates a string of readings depending on the configuration file.
'''
# Functions for each sensor. when we call them, they take a reading and return that value
def Temperature():
#From the atmospheric sensor
with SMBus(1) as bus:
bme280.load_calibration_params(bus,config.addressBME280)
bme280_data = bme280.sample(bus,config.addressBME280)
ambient_temperature = bme280_data.temperature
return round(ambient_temperature,3)
def Pressure_atmos():
#From the atmospheric sensor
with SMBus(1) as bus:
bme280.load_calibration_params(bus,config.addressBME280)
bme280_data = bme280.sample(bus,config.addressBME280)
pressure = bme280_data.pressure
return round(pressure*3040/4053,3) #default gives in hPa, but after several heated discussions over which unit to use, we agreed on using Torr. 760 Torr= 1 atmosphere
def Humidity():
#From the atmospheric sensor
with SMBus(1) as bus:
bme280.load_calibration_params(bus,config.addressBME280)
bme280_data = bme280.sample(bus,config.addressBME280)
humidity = bme280_data.humidity
return round(humidity,3)
def diff_pressure():# a tiny time delay is used to prevent errors from taking place.
with SMBus(1) as bus:
bus.write_i2c_block_data(config.addressDiff_Pressure, 0x3F, [0xF9]) #Stop any cont measurement of the sensor
time.sleep(0.5)
bus.write_i2c_block_data(config.addressDiff_Pressure, 0x36, [0X03]) # The command code 0x3603 is split into two arguments, cmd=0x36 and [val]=0x03
time.sleep(0.5)
reading=bus.read_i2c_block_data(config.addressDiff_Pressure,0,9)
pressure_value=reading[0]+float(reading[1])/255
if pressure_value>=0 and pressure_value<128:
diffirential_pressure=round(pressure_value*60/256,3) #scale factor adjustment
elif pressure_value>128 and pressure_value<=256:
diffirential_pressure=round(-(256-pressure_value)*60/256,3) #scale factor adjustment
#it returns the differential pressure in Pa
return diffirential_pressure
def particulate_sensor():
#measures the different sized particles in the air, sadly cannot detect neutrinos or subatomic particles.
with SPS30(1) as sps:
sps.read_measured_values()
return [round(sps.dict_values['pm1p0'],2),round(sps.dict_values['pm2p5'],2),round(sps.dict_values['pm4p0'],2),round(sps.dict_values['pm10p0'],2),round(sps.dict_values['nc0p5'],2),round(sps.dict_values['nc1p0'],2),round(sps.dict_values['nc2p5'],2),round(sps.dict_values['nc4p0'],2),round(sps.dict_values['nc10p0'],2),round(sps.dict_values['typical'],2)]
#The header function generates a header for the .csv file. It will give the name for each column along with the units.
def Header():#The header used when uploading the data to the csv file.
statement='timestamp'#by default we'll always have the time.
if config.TemperatureStatus==1:
statement+=',Temperature_C'
if config.PressureStatus==1:
statement+=',Pressure_Torr'
if config.HumidityStatus==1:
statement+=',Percent_Humidity'
if config.Pressure_DiffStatus==1:
statement+=',Differential_Pressure_Pa'
if config.ParticulateStatus==1:
statement+=',MC1um_ug_per_m3,MC2point5um_ug_per_m3,MC4um_ug_per_m3,MC10um_ug_per_m3,0point5um_Counts_Per_cm3,1um_Counts_Per_cm3,2point5um_Counts_Per_cm3,4um_Counts_Per_cm3,10um_Counts_Per_cm3,Typical_Particle_Size_um'
return statement
#This is how all the data is collected and stringed together. It is properly formated for the bvl-MongoDB script.
def data():#The data readings from the sensor.
statement=str(datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
if config.TemperatureStatus==1:
statement+=','+str(Temperature())
if config.PressureStatus==1:
statement+=','+str(Pressure_atmos())
if config.HumidityStatus==1:
statement+=','+str(Humidity())
if config.Pressure_DiffStatus==1:
statement+=','+str(diff_pressure())
if config.ParticulateStatus==1:
for x in particulate_sensor():
statement+=','+str(x)
return statement
#rgb lights that will tell us if everything is ok.
def lights(setting,period=0):#period is in minutes and is only used for the rainbow lights
if config.LightSignals==0:#if lights are disabled, it will just skip over it.
return None
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)# this means that we are refering to the BCM Pi pins and not the Board pin numbers
GPIO.setup(config.greenGPIO,GPIO.OUT)#Defining all the LED gpio pins as outputs for the Pi
GPIO.setup(config.redGPIO,GPIO.OUT)
GPIO.setup(config.blueGPIO,GPIO.OUT)
if setting == 'green':
GPIO.output(config.redGPIO,GPIO.LOW)#turns off all the other colors other than the selected one
GPIO.output(config.blueGPIO,GPIO.LOW)
GPIO.output(config.greenGPIO,GPIO.HIGH)
if setting == 'off':
GPIO.cleanup()#tunrs off all the pins
if setting == 'red':
GPIO.output(config.blueGPIO,GPIO.LOW)
GPIO.output(config.greenGPIO,GPIO.LOW)
GPIO.output(config.redGPIO,GPIO.HIGH)
if setting == 'rainbow':#Soud's special request!!!
RED = GPIO.PWM(config.redGPIO, 100)
GREEN = GPIO.PWM(config.greenGPIO, 100)
BLUE = GPIO.PWM(config.blueGPIO, 100)
for i in range(int(4*period)):#it takes 15 seconds to cycle through the colors, we multiply it by 4 to make a minute and the period is chosen by the user.
RED.start(100)
GREEN.start(1)
BLUE.start(1)
for x in range(1,101):#power consciencious HSV curve
GREEN.ChangeDutyCycle(x)
RED.ChangeDutyCycle(101-x)
time.sleep(0.05)
for x in range(1,101):
GREEN.ChangeDutyCycle(101-x)
BLUE.ChangeDutyCycle(x)
time.sleep(0.05)
for x in range(1,101):
RED.ChangeDutyCycle(x)
BLUE.ChangeDutyCycle(101-x)
time.sleep(0.05)
|
the-stack_106_18417
|
from GB_model import *
#Paths to images of real, fake (phantom), and 'no signs' in npy format (each dir contains subdirectories, one for each expert)
# (provide the parent directory to the dataset generator's output for real, fake, and nosign)
real_path = 'data/real'
fake_path = 'data/fake'
nosign_path = 'data/real_nosign'
# Init model
GB = GhostBusters(save_path='models',device_ID="0")
# Train model (first experts then combiner)
GB.train(real_path,fake_path,nosign_path)
|
the-stack_106_18418
|
class Node:
def __init__(self, value):
self.value = value
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def unshift(self, value):
node = Node(value)
node.next = self.head
self.head = node
def create_linked_list():
llist = LinkedList()
for i in range(0, 17, 4):
llist.unshift(i)
return llist
def display(head):
print("begin")
while(head):
print(head.value)
head = head.next
print("end")
def reverse_linked_list(head):
current = head
prev = None
while(current):
temp = current.next
current.next = prev
prev = current
current = temp
return prev
llist = create_linked_list()
display(llist.head)
reversed_head = reverse_linked_list(llist.head)
display(reversed_head)
|
the-stack_106_18419
|
"""kinto_mpd - A kinto plugin to start/stop playing a playlist in MPD"""
from kinto.core.events import ResourceChanged
from mpd import MPDClient
client = MPDClient()
__version__ = '0.1.0'
__author__ = 'Mathieu Agopian <[email protected]>'
__all__ = []
def includeme(config):
print("I am the ElasticSearch MPD plugin!")
config.add_subscriber(on_resource_changed, ResourceChanged)
def on_resource_changed(event):
resource_name = event.payload['resource_name']
if resource_name != "record":
return
for change in event.impacted_records:
print("Record changed:", change)
try:
client.connect("localhost", 6600)
record = change['new']
if record.get('status', 'off') == 'on':
client.clear()
client.load(record['id'])
client.play(0)
else:
client.stop()
except Exception as e:
print("Exception:", e)
finally:
client.close()
client.disconnect()
|
the-stack_106_18421
|
import json
import re
from collections import OrderedDict
from redash.query_runner import *
# TODO: make this more general and move into __init__.py
class ResultSet(object):
def __init__(self):
self.columns = OrderedDict()
self.rows = []
def add_row(self, row):
for key in row.keys():
self.add_column(key)
self.rows.append(row)
def add_column(self, column, column_type=TYPE_STRING):
if column not in self.columns:
self.columns[column] = {'name': column, 'type': column_type, 'friendly_name': column}
def to_json(self):
return json.dumps({'rows': self.rows, 'columns': self.columns.values()})
def parse_issue(issue, field_mapping):
result = OrderedDict()
result['key'] = issue['key']
for k, v in issue['fields'].iteritems():#
output_name = field_mapping.get_output_field_name(k)
member_names = field_mapping.get_dict_members(k)
if isinstance(v, dict):
if len(member_names) > 0:
# if field mapping with dict member mappings defined get value of each member
for member_name in member_names:
if member_name in v:
result[field_mapping.get_dict_output_field_name(k,member_name)] = v[member_name]
else:
# these special mapping rules are kept for backwards compatibility
if 'key' in v:
result['{}_key'.format(output_name)] = v['key']
if 'name' in v:
result['{}_name'.format(output_name)] = v['name']
if k in v:
result[output_name] = v[k]
if 'watchCount' in v:
result[output_name] = v['watchCount']
elif isinstance(v, list):
if len(member_names) > 0:
# if field mapping with dict member mappings defined get value of each member
for member_name in member_names:
listValues = []
for listItem in v:
if isinstance(listItem, dict):
if member_name in listItem:
listValues.append(listItem[member_name])
if len(listValues) > 0:
result[field_mapping.get_dict_output_field_name(k,member_name)] = ','.join(listValues)
else:
# otherwise support list values only for non-dict items
listValues = []
for listItem in v:
if not isinstance(listItem, dict):
listValues.append(listItem)
if len(listValues) > 0:
result[output_name] = ','.join(listValues)
else:
result[output_name] = v
return result
def parse_issues(data, field_mapping):
results = ResultSet()
for issue in data['issues']:
results.add_row(parse_issue(issue, field_mapping))
return results
def parse_count(data):
results = ResultSet()
results.add_row({'count': data['total']})
return results
class FieldMapping:
def __init__(cls, query_field_mapping):
cls.mapping = []
for k, v in query_field_mapping.iteritems():
field_name = k
member_name = None
# check for member name contained in field name
member_parser = re.search('(\w+)\.(\w+)', k)
if (member_parser):
field_name = member_parser.group(1)
member_name = member_parser.group(2)
cls.mapping.append({
'field_name': field_name,
'member_name': member_name,
'output_field_name': v
})
def get_output_field_name(cls,field_name):
for item in cls.mapping:
if item['field_name'] == field_name and not item['member_name']:
return item['output_field_name']
return field_name
def get_dict_members(cls,field_name):
member_names = []
for item in cls.mapping:
if item['field_name'] == field_name and item['member_name']:
member_names.append(item['member_name'])
return member_names
def get_dict_output_field_name(cls,field_name, member_name):
for item in cls.mapping:
if item['field_name'] == field_name and item['member_name'] == member_name:
return item['output_field_name']
return None
class JiraJQL(BaseHTTPQueryRunner):
noop_query = '{"queryType": "count"}'
default_doc_url = ("https://confluence.atlassian.com/jirasoftwarecloud/"
"advanced-searching-764478330.html")
response_error = "JIRA returned unexpected status code"
requires_authentication = True
url_title = 'JIRA URL'
username_title = 'Username'
password_title = 'Password'
@classmethod
def name(cls):
return "JIRA (JQL)"
@classmethod
def annotate_query(cls):
return False
def __init__(self, configuration):
super(JiraJQL, self).__init__(configuration)
self.syntax = 'json'
def run_query(self, query, user):
jql_url = '{}/rest/api/2/search'.format(self.configuration["url"])
try:
query = json.loads(query)
query_type = query.pop('queryType', 'select')
field_mapping = FieldMapping(query.pop('fieldMapping', {}))
if query_type == 'count':
query['maxResults'] = 1
query['fields'] = ''
else:
query['maxResults'] = query.get('maxResults', 1000)
response, error = self.get_response(jql_url, params=query)
if error is not None:
return None, error
data = response.json()
if query_type == 'count':
results = parse_count(data)
else:
results = parse_issues(data, field_mapping)
return results.to_json(), None
except KeyboardInterrupt:
return None, "Query cancelled by user."
register(JiraJQL)
|
the-stack_106_18422
|
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from codecs import open # To use a consistent encoding
from os import path
from setuptools import setup
HERE = path.dirname(path.abspath(__file__))
# Get version info
ABOUT = {}
with open(path.join(HERE, 'datadog_checks', 'exchange_server', '__about__.py')) as f:
exec(f.read(), ABOUT)
# Get the long description from the README file
with open(path.join(HERE, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Parse requirements
def get_requirements(fpath):
with open(path.join(HERE, fpath), encoding='utf-8') as f:
return f.readlines()
CHECKS_BASE_REQ = 'datadog_checks_base>=4.2.0'
setup(
name='datadog-exchange_server',
version=ABOUT["__version__"],
description='The MS Exchange check',
long_description=long_description,
long_description_content_type='text/markdown',
keywords='datadog agent exchange check',
# The project's main homepage.
url='https://github.com/DataDog/integrations-core',
# Author details
author='Datadog',
author_email='[email protected]',
# License
license='BSD',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: System :: Monitoring',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
# The package we're going to ship
packages=['datadog_checks.exchange_server'],
# Run-time dependencies
install_requires=[CHECKS_BASE_REQ],
# Extra files to ship with the wheel package
include_package_data=True,
)
|
the-stack_106_18424
|
import os
import random
import socket
import subprocess
from contextlib import closing
import six
from dagster_graphql.client.util import parse_raw_log_lines
from dagster_k8s.utils import get_pod_names_in_job, retrieve_pod_logs, wait_for_job_success
from dagster import check
IS_BUILDKITE = os.getenv('BUILDKITE') is not None
def image_pull_policy():
# This is because when running local tests, we need to load the image into the kind cluster (and
# then not attempt to pull it) because we don't want to require credentials for a private
# registry / pollute the private registry / set up and network a local registry as a condition
# of running tests
if IS_BUILDKITE:
return 'Always'
else:
return 'IfNotPresent'
def get_test_namespace():
namespace_suffix = hex(random.randint(0, 16 ** 6))[2:]
return 'dagster-test-%s' % namespace_suffix
def within_docker():
'''detect if we're running inside of a docker container
from: https://stackoverflow.com/a/48710609/11295366
'''
cgroup_path = '/proc/self/cgroup'
return (
os.path.exists('/.dockerenv')
or os.path.isfile(cgroup_path)
and any('docker' in line for line in open(cgroup_path))
)
def which_(exe):
'''Uses distutils to look for an executable, mimicking unix which'''
from distutils import spawn # pylint: disable=no-name-in-module
# https://github.com/PyCQA/pylint/issues/73
return spawn.find_executable(exe)
def check_output(*args, **kwargs):
try:
return subprocess.check_output(*args, **kwargs)
except subprocess.CalledProcessError as exc:
output = exc.output.decode()
six.raise_from(Exception(output), exc)
def find_free_port():
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(('', 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
def remove_none_recursively(obj):
'''Remove none values from a dict. This is used here to support comparing provided config vs.
config we retrive from kubernetes, which returns all fields, even those which have no value
configured.
'''
if isinstance(obj, (list, tuple, set)):
return type(obj)(remove_none_recursively(x) for x in obj if x is not None)
elif isinstance(obj, dict):
return type(obj)(
(remove_none_recursively(k), remove_none_recursively(v))
for k, v in obj.items()
if k is not None and v is not None
)
else:
return obj
def wait_for_job_and_get_logs(job_name, namespace):
'''Wait for a dagster-k8s job to complete, ensure it launched only one pod,
and then grab the logs from the pod it launched.
'''
check.str_param(job_name, 'job_name')
check.str_param(namespace, 'namespace')
wait_for_job_success(job_name, namespace=namespace)
pod_names = get_pod_names_in_job(job_name, namespace)
assert len(pod_names) == 1
pod_name = pod_names[0]
raw_logs = retrieve_pod_logs(pod_name, namespace=namespace)
return parse_raw_log_lines(raw_logs.split('\n'))
|
the-stack_106_18425
|
from __future__ import annotations
import warnings
import weakref
from collections import defaultdict
from dataclasses import dataclass
from inspect import signature
from typing import TYPE_CHECKING, Callable, Dict, List, Set, Union
from .interactions import Shortcut
from .key_bindings import KeymapProvider
from .translations import trans
if TYPE_CHECKING:
from qtpy.QtWidgets import QPushButton
def call_with_context(function, context):
"""
call function `function` with the corresponding value taken from context
This is use in the action manager to pass the rights instances to the actions,
without having the need for them to take a **kwarg, and is mostly needed when
triggering actions via buttons, or to record.
If we went a declarative way to trigger action we cannot refer to instances
or objects that must be passed to the action, or at least this is
problematic.
We circumvent this by having a context (dictionary of str:instance) in
the action manager, and anything can tell the action manager "this is the
current instance a key". When an action is triggered; we inspect the
signature look at which instances it may need and pass this as parameters.
"""
context_keys = [
k
for k, v in signature(function).parameters.items()
if v.kind not in (v.VAR_POSITIONAL, v.VAR_KEYWORD)
]
ctx = {k: v for k, v in context.items() if k in context_keys}
return function(**ctx)
@dataclass
class Action:
command: Callable
description: str
keymapprovider: KeymapProvider # subclassclass or instance of a subclass
def callable(self, context):
if not hasattr(self, '_command_with_context'):
self._command_with_context = lambda: call_with_context(
self.command, context
)
return self._command_with_context
class ButtonWrapper:
"""
Pyside seem to have an issue where calling disconnect/connect
seem to segfault.
We wrap our buttons in this to no call disconnect and reconnect the same callback
When not necessary.
This also make it simpler to make connecting a callback idempotent, and make
it easier to re-update all gui elements when a shortcut or description is
changed.
"""
def __init__(
self,
button: QPushButton,
extra_tooltip_text,
action_name,
action_manager,
):
"""
wrapper around button to disconnect an action only
if it has been connected before.
"""
self._button = weakref.ref(button)
self._connected = None
self._extra_tooltip_text: str = extra_tooltip_text
self._action_name = action_name
self._action_manager = weakref.ref(action_manager)
button.destroyed.connect(self._destroyed)
def _destroyed(self):
if self._action_manager() is None:
return
self._action_manager()._buttons[self._action_name].remove(self)
def setToolTip(self, value):
if self._button() is None:
# destroyed signal not handled yet
return
return self._button().setToolTip(
value + ' ' + self._extra_tooltip_text
)
def clicked_maybe_connect(self, callback):
if callback is not self._connected:
if self._connected is not None:
self._button().clicked.disconnect(self._connected)
self._button().clicked.connect(callback)
self._connected = callback
else:
# do nothing it's the same callback.
pass
@property
def destroyed(self):
return self._button() and self._button().destroyed
class Context:
def __init__(self):
self._attr = {}
self._callables = {}
def __getitem__(self, key):
if key in self._attr:
return self._attr[key]
elif key in self._callables:
return self._callables[key]()
def items(self):
for k, v in self._attr.items():
yield k, v
for k, v in self._callables.items():
yield k, v()
def __setitem__(self, key, value):
if callable(value):
self._callables[key] = value
else:
self._attr[key] = value
class ActionManager:
"""
Manage the bindings between buttons; shortcuts, callbacks gui elements...
The action manager is aware of the various buttons, keybindings and other
elements that may trigger an action and is able to synchronise all of those.
Thus when a shortcut is bound; this should be capable of updating the
buttons tooltip menus etc to show the shortcuts, descriptions...
In most cases this should also allow to bind non existing shortcuts,
actions, buttons, in which case they will be bound only once the actions are
registered.
For actions that need access to a global element (a viewer, a plugin, ... ),
you want to give this item a unique name, and add it to the action manager
`context` object.
>>> action_manager.context['number'] = 1
... action_manager.context['qtv'] = viewer.qt_viewer
>>> def callback(qtv, number):
... qtv.dims[number] +=1
>>> action_manager.register_action('bump one', callback,
... 'Add one to dims',
... None)
The callback signature is going to be inspected and required globals passed
in.
"""
_actions: Dict[str, Action]
def __init__(self):
# map associating a name/id with a Comm
self._actions: Dict[str, Action] = {}
self._buttons: Dict[str, Set[ButtonWrapper]] = defaultdict(set)
self._shortcuts: Dict[str, Set[str]] = defaultdict(set)
self.context = Context() # Dict[str, Any] = {}
self._stack: List[str] = []
self._tooltip_include_action_name = False
def _debug(self, val):
self._tooltip_include_action_name = val
for name in self._buttons.keys():
self._update_gui_elements(name)
def _validate_action_name(self, name):
if len(name.split(':')) != 2:
raise ValueError(
trans._(
'Action names need to be in the form `package:name`, got {name!r}',
name=name,
deferred=True,
)
)
def register_action(
self,
name: str,
command: callable,
description: str,
keymapprovider: KeymapProvider,
):
"""
Register an action for future usage
An action is generally a callback associated with
- a name (unique), usually `packagename:name`
- a description
- A keymap provider (easier for focus and backward compatibility).
Actions can then be later bound/unbound from button elements, and
shortcuts; and the action manager will take care of modifying the keymap
of instances to handle shortcuts; and UI elements to have tooltips with
descriptions and shortcuts;
Parameters
----------
name : str
unique name/id of the command that can be used to refer to this
command
command : callable
take 0, or 1 parameter; if `keymapprovider` is not None, will be
called with `keymapprovider` as first parameter.
description : str
Long string to describe what the command does, will be used in
tooltips.
keymapprovider : KeymapProvider
KeymapProvider class or instance to use to bind the shortcut(s) when
registered. This make sure the shortcut is active only when an
instance of this is in focus.
Notes
-----
Registering an action, binding buttons and shortcuts can happen in any
order and should have the same effect. In particular registering an
action can happen later (plugin loading), while user preference
(keyboard shortcut), has already been happen. When this is the case, the
button and shortcut binding is delayed until an action with the
corresponding name is registered.
See Also
--------
bind_button, bind_shortcut
"""
self._validate_action_name(name)
self._actions[name] = Action(command, description, keymapprovider)
self._update_shortcut_bindings(name)
self._update_gui_elements(name)
def _update_buttons(self, buttons, tooltip: str, callback):
for button in buttons:
# test if only tooltip makes crash
button.setToolTip(tooltip)
button.clicked_maybe_connect(callback)
def _update_gui_elements(self, name: str):
"""
Update the description and shortcuts of all the (known) gui elements.
"""
if name not in self._actions:
return
buttons = self._buttons.get(name, set())
desc = self._actions[name].description
# update buttons with shortcut and description
if name in self._shortcuts:
shortcuts = self._shortcuts[name]
joinstr = (
' '
+ trans._('or', msgctxt='<keysequence> or <keysequence>')
+ ' '
)
shortcut_str = (
'('
+ joinstr.join(
f"{Shortcut(shortcut).platform}" for shortcut in shortcuts
)
+ ')'
)
else:
shortcut_str = ''
callable_ = self._actions[name].callable(self.context)
append = '[' + name + ']' if self._tooltip_include_action_name else ''
self._update_buttons(buttons, desc + shortcut_str + append, callable_)
def _update_shortcut_bindings(self, name: str):
"""
Update the key mappable for given action name
to trigger the action within the given context and
"""
if name not in self._actions:
return
action = self._actions[name]
if name not in self._shortcuts:
return
shortcuts = self._shortcuts.get(name)
keymapprovider = action.keymapprovider
if hasattr(keymapprovider, 'bind_key'):
for shortcut in shortcuts:
keymapprovider.bind_key(shortcut, overwrite=True)(
action.command
)
def bind_button(self, name: str, button, extra_tooltip_text='') -> None:
"""
Bind `button` to trigger Action `name` on click.
Parameters
----------
name : str
name of the corresponding action in the form ``packagename:name``
button : QtStateButton | QPushButton
A abject presenting a qt-button like interface that when clicked
should trigger the action. The tooltip will be set the action
description and the corresponding shortcut if available.
extra_tooltip_text : str
Extra text to add at the end of the tooltip. This is useful to
convey more information about this action as the action manager may
update the tooltip based on the action name.
Notes
-----
calling `bind_button` can be done before an action with the
corresponding name is registered, in which case the effect will be
delayed until the corresponding action is registered.
"""
self._validate_action_name(name)
if hasattr(button, 'change'):
button.clicked.disconnect(button.change)
button_wrap = ButtonWrapper(button, extra_tooltip_text, name, self)
assert button not in [x._button() for x in self._buttons[name]]
self._buttons[name].add(button_wrap)
self._update_gui_elements(name)
def bind_shortcut(self, name: str, shortcut: str) -> None:
"""
bind shortcut `shortcut` to trigger action `name`
Parameters
----------
name : str
name of the corresponding action in the form ``packagename:name``
shortcut : str
Shortcut to assign to this action use dash as separator. See
`Shortcut` for known modifiers.
Notes
-----
calling `bind_button` can be done before an action with the
corresponding name is registered, in which case the effect will be
delayed until the corresponding action is registered.
"""
self._validate_action_name(name)
self._shortcuts[name].add(shortcut)
self._update_shortcut_bindings(name)
self._update_gui_elements(name)
def unbind_shortcut(self, name: str) -> Union[Set[str], None]:
"""
Unbind all shortcuts for a given action name.
Parameters
----------
name : str
name of the action in the form `packagename:name` to unbind.
Returns
-------
shortcuts: set of str | None
Previously bound shortcuts or None if not such shortcuts was bound,
or no such action exists.
Warns
-----
UserWarning:
When trying to unbind an action unknown form the action manager,
this warning will be emitted.
"""
action = self._actions.get(name, None)
if action is None:
warnings.warn(
trans._(
"Attempting to unbind an action which does not exists ({name}), "
"this may have no effects. This can happen if your settings are out of "
"date, if you upgraded napari, upgraded or deactivated a plugin, or made "
"a typo in in your custom keybinding.",
name=name,
),
UserWarning,
stacklevel=2,
)
shortcuts = self._shortcuts.get(name)
if shortcuts:
if action and hasattr(action.keymapprovider, 'bind_key'):
for shortcut in shortcuts:
action.keymapprovider.bind_key(shortcut)(None)
del self._shortcuts[name]
self._update_gui_elements(name)
return shortcuts
def _get_layer_shortcuts(self, layers):
"""
Get shortcuts filtered by the given layers.
Parameters
----------
layers : list of layers
Layers to use for shortcuts filtering.
Returns
-------
dict
Dictionary of layers with dictionaries of shortcuts to
descriptions.
"""
layer_shortcuts = {}
for layer in layers:
layer_shortcuts[layer] = {}
for name, shortcut in self._shortcuts.items():
action = self._actions.get(name, None)
if action and layer == action.keymapprovider:
layer_shortcuts[layer][str(shortcut)] = action.description
return layer_shortcuts
def _get_layer_actions(self, layer):
"""
Get actions filtered by the given layers.
Parameters
----------
layer : Layer
Layer to use for actions filtering.
Returns
-------
layer_actions: dict
Dictionary of names of actions with action values for a layer.
"""
layer_actions = {}
for name, action in self._actions.items():
if action and layer == action.keymapprovider:
layer_actions[name] = action
return layer_actions
def _get_active_shortcuts(self, active_keymap):
"""
Get active shortcuts for the given active keymap.
Parameters
----------
active_keymap : KeymapProvider
The active keymap provider.
Returns
-------
dict
Dictionary of shortcuts to descriptions.
"""
active_func_names = [i[1].__name__ for i in active_keymap.items()]
active_shortcuts = {}
for name, shortcut in self._shortcuts.items():
action = self._actions.get(name, None)
if action and action.command.__name__ in active_func_names:
active_shortcuts[str(shortcut)] = action.description
return active_shortcuts
action_manager = ActionManager()
|
the-stack_106_18427
|
#
# Copyright 2018 CNIT - Consorzio Nazionale Interuniversitario per le Telecomunicazioni
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from django.shortcuts import render, redirect
from sf_t3d.decorators import login_required
from django.http import HttpResponse
import json
from lib.osm.osmclient.clientv2 import Client
import authosm.utils as osmutils
import yaml
import logging
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger('vimhandler.py')
@login_required
def list(request):
user = osmutils.get_user(request)
project_id = user.project_id
result = {'type': 'ns', 'project_id': project_id}
raw_content_types = request.META.get('HTTP_ACCEPT', '*/*').split(',')
if 'application/json' not in raw_content_types:
return __response_handler(request, result, 'vim_list.html')
client = Client()
result_client = client.vim_list(user.get_token())
result["datacenters"] = result_client['data'] if result_client and result_client['error'] is False else []
return __response_handler(request, result, 'vim_list.html')
@login_required
def create(request):
user = osmutils.get_user(request)
project_id = user.project_id
result = {'project_id': project_id}
if request.method == 'GET':
return __response_handler(request, result, 'vim_create.html')
else:
try:
new_vim_dict = request.POST.dict()
client = Client()
keys = ["schema_version",
"schema_type",
"name",
"vim_url",
"vim_type",
"vim_user",
"vim_password",
"vim_tenant_name",
"description"]
vim_data = dict(filter(lambda i: i[0] in keys and len(
i[1]) > 0, new_vim_dict.items()))
vim_data['config'] = {}
config_file = request.FILES.get('config_file')
if config_file is not None:
config = ''
for line in config_file:
config = config + line.decode()
vim_data['config'] = yaml.load(config)
elif 'config' in request.POST and request.POST.get('config') != '':
vim_data['config'] = yaml.load(request.POST.get('config'))
except Exception as e:
return __response_handler(request, {'status': 400, 'code': 'BAD_REQUEST', 'detail': e.message}, url=None, status=400)
result = client.vim_create(user.get_token(), vim_data)
if result['error']:
return __response_handler(request, result['data'], url=None,
status=result['data']['status'] if 'status' in result['data'] else 500)
else:
return __response_handler(request, {}, url=None, status=200)
@login_required
def delete(request, vim_id=None):
user = osmutils.get_user(request)
try:
client = Client()
del_res = client.vim_delete(user.get_token(), vim_id)
except Exception as e:
log.exception(e)
return __response_handler(request, del_res, 'vims:list', to_redirect=True)
@login_required
def show(request, vim_id=None):
user = osmutils.get_user(request)
project_id = user.project_id
client = Client()
result = client.vim_get(user.get_token(), vim_id)
if isinstance(result, dict) and 'error' in result and result['error']:
return render(request, 'error.html')
return __response_handler(request, {
"datacenter": result['data'],
"project_id": project_id
}, 'vim_show.html')
def __response_handler(request, data_res, url=None, to_redirect=None, *args, **kwargs):
raw_content_types = request.META.get('HTTP_ACCEPT', '*/*').split(',')
if 'application/json' in raw_content_types or url is None:
return HttpResponse(json.dumps(data_res), content_type="application/json", *args, **kwargs)
elif to_redirect:
return redirect(url, *args, **kwargs)
else:
return render(request, url, data_res)
|
the-stack_106_18429
|
"""Pytorch based computation backend."""
import math
from collections.abc import Iterable
from functools import wraps
import numpy as _np
import torch
from torch import angle, arange, arccos, arccosh, arcsin, arctanh, argmin
from torch import atan2 as arctan2 # NOQA
from torch import bool as t_bool
from torch import broadcast_tensors as broadcast_arrays
from torch import (
ceil,
clip,
complex64,
complex128,
conj,
cos,
cosh,
cross,
divide,
empty_like,
eq,
erf,
exp,
eye,
flatten,
float32,
float64,
floor,
)
from torch import fmod as mod
from torch import (
greater,
hstack,
imag,
int32,
int64,
isnan,
kron,
less,
log,
logical_or,
)
from torch import max as amax
from torch import mean, meshgrid, moveaxis, nonzero, ones, ones_like, outer, polygamma
from torch import pow as power
from torch import real
from torch import repeat_interleave as repeat
from torch import (
reshape,
sign,
sin,
sinh,
stack,
std,
tan,
tanh,
trapz,
uint8,
unique,
vstack,
zeros,
zeros_like,
)
from ..constants import pytorch_atol, pytorch_rtol
from . import autodiff # NOQA
from . import linalg # NOQA
from . import random # NOQA
DTYPES = {
int32: 0,
int64: 1,
float32: 2,
float64: 3,
complex64: 4,
complex128: 5,
}
atol = pytorch_atol
rtol = pytorch_rtol
def _raise_not_implemented_error(*args, **kwargs):
raise NotImplementedError
searchsorted = _raise_not_implemented_error
def _box_scalar(function):
@wraps(function)
def wrapper(x):
if not torch.is_tensor(x):
x = torch.tensor(x)
return function(x)
return wrapper
abs = _box_scalar(abs)
ceil = _box_scalar(ceil)
cos = _box_scalar(cos)
cosh = _box_scalar(cosh)
exp = _box_scalar(exp)
imag = _box_scalar(imag)
log = _box_scalar(log)
real = _box_scalar(real)
sin = _box_scalar(sin)
sinh = _box_scalar(sinh)
tan = _box_scalar(tan)
def comb(n, k):
return math.factorial(n) // math.factorial(k) // math.factorial(n - k)
def matmul(x, y, *, out=None):
x, y = convert_to_wider_dtype([x, y])
return torch.matmul(x, y, out=out)
def to_numpy(x):
return x.numpy()
def from_numpy(x):
return torch.from_numpy(x)
def one_hot(labels, num_classes):
if not torch.is_tensor(labels):
labels = torch.LongTensor(labels)
return torch.nn.functional.one_hot(labels, num_classes).type(torch.uint8)
def argmax(a, **kwargs):
if a.dtype == torch.bool:
return torch.as_tensor(_np.argmax(a.data.numpy(), **kwargs))
return torch.argmax(a, **kwargs)
def convert_to_wider_dtype(tensor_list):
dtype_list = [DTYPES[x.dtype] for x in tensor_list]
wider_dtype_index = max(dtype_list)
wider_dtype = list(DTYPES.keys())[wider_dtype_index]
tensor_list = [cast(x, dtype=wider_dtype) for x in tensor_list]
return tensor_list
def less_equal(x, y, **kwargs):
if not torch.is_tensor(x):
x = torch.tensor(x)
if not torch.is_tensor(y):
y = torch.tensor(y)
return torch.le(x, y, **kwargs)
def empty(shape, dtype=float64):
return torch.empty(*shape, dtype=dtype)
def split(x, indices_or_sections, axis=0):
if isinstance(indices_or_sections, int):
indices_or_sections = x.shape[axis] // indices_or_sections
return torch.split(x, indices_or_sections, dim=axis)
indices_or_sections = _np.array(indices_or_sections)
intervals_length = indices_or_sections[1:] - indices_or_sections[:-1]
last_interval_length = x.shape[axis] - indices_or_sections[-1]
if last_interval_length > 0:
intervals_length = _np.append(intervals_length, last_interval_length)
intervals_length = _np.insert(intervals_length, 0, indices_or_sections[0])
return torch.split(x, tuple(intervals_length), dim=axis)
def logical_and(x, y):
if torch.is_tensor(x):
return torch.logical_and(x, y)
return x and y
def any(x, axis=None):
if not torch.is_tensor(x):
x = torch.tensor(x)
if axis is None:
return torch.any(x)
if isinstance(axis, int):
return torch.any(x.bool(), axis)
if len(axis) == 1:
return torch.any(x, *axis)
axis = list(axis)
for i_axis, one_axis in enumerate(axis):
if one_axis < 0:
axis[i_axis] = ndim(x) + one_axis
new_axis = tuple(k - 1 if k >= 0 else k for k in axis[1:])
return any(torch.any(x.bool(), axis[0]), new_axis)
def cast(x, dtype):
if torch.is_tensor(x):
return x.to(dtype=dtype)
return array(x).to(dtype=dtype)
def flip(x, axis):
if isinstance(axis, int):
axis = [axis]
if axis is None:
axis = list(range(x.ndim))
return torch.flip(x, dims=axis)
def concatenate(seq, axis=0, out=None):
seq = convert_to_wider_dtype(seq)
return torch.cat(seq, dim=axis, out=out)
def _get_largest_dtype(seq):
dtype_dict = {
0: t_bool,
1: uint8,
2: int32,
3: int64,
4: float32,
5: float64,
6: complex128,
}
reverse_dict = {dtype_dict[key]: key for key in dtype_dict}
dtype_code_set = {reverse_dict[t.dtype] for t in seq}
return dtype_dict[max(dtype_code_set)]
def array(val, dtype=None):
if isinstance(val, (list, tuple)):
if isinstance(val[0], (list, tuple)):
aux_list = [array(t, dtype) for t in val]
if dtype is None:
local_dtype = _get_largest_dtype(aux_list)
aux_list = [cast(t, local_dtype) for t in aux_list]
return stack(aux_list)
if not any([isinstance(t, torch.Tensor) for t in val]):
val = _np.copy(_np.array(val))
elif any([not isinstance(t, torch.Tensor) for t in val]):
tensor_members = [t for t in val if torch.is_tensor(t)]
local_dtype = _get_largest_dtype(tensor_members)
for index, t in enumerate(val):
if torch.is_tensor(t) and t.dtype != local_dtype:
cast(t, local_dtype)
elif torch.is_tensor(t):
val[index] = cast(t, dtype=local_dtype)
else:
val[index] = torch.tensor(t, dtype=local_dtype)
val = stack(val)
else:
val = stack(val)
if isinstance(val, (bool, int, float)):
val = _np.array(val)
if isinstance(val, _np.ndarray):
val = torch.from_numpy(val)
if not isinstance(val, torch.Tensor):
val = torch.Tensor([val])
if dtype is not None:
if val.dtype != dtype:
val = cast(val, dtype)
elif val.dtype == torch.float64:
val = val.float()
return val
def all(x, axis=None):
if not torch.is_tensor(x):
x = torch.tensor(x)
if axis is None:
return x.bool().all()
if isinstance(axis, int):
return torch.all(x.bool(), axis)
if len(axis) == 1:
return torch.all(x, *axis)
axis = list(axis)
for i_axis, one_axis in enumerate(axis):
if one_axis < 0:
axis[i_axis] = ndim(x) + one_axis
new_axis = tuple(k - 1 if k >= 0 else k for k in axis[1:])
return all(torch.all(x.bool(), axis[0]), new_axis)
def get_slice(x, indices):
"""Return a slice of an array, following Numpy's style.
Parameters
----------
x : array-like, shape=[dim]
Initial array.
indices : iterable(iterable(int))
Indices which are kept along each axis, starting from 0.
Returns
-------
slice : array-like
Slice of x given by indices.
Notes
-----
This follows Numpy's convention: indices are grouped by axis.
Examples
--------
>>> a = torch.tensor(range(30)).reshape(3,10)
>>> get_slice(a, ((0, 2), (8, 9)))
tensor([8, 29])
"""
return x[indices]
def allclose(a, b, atol=atol, rtol=rtol):
if not isinstance(a, torch.Tensor):
a = torch.tensor(a)
if not isinstance(b, torch.Tensor):
b = torch.tensor(b)
a = to_ndarray(a.float(), to_ndim=1)
b = to_ndarray(b.float(), to_ndim=1)
n_a = a.shape[0]
n_b = b.shape[0]
nb_dim = a.dim()
if n_a > n_b:
reps = (int(n_a / n_b),) + (nb_dim - 1) * (1,)
b = tile(b, reps)
elif n_a < n_b:
reps = (int(n_b / n_a),) + (nb_dim - 1) * (1,)
a = tile(a, reps)
return torch.allclose(a, b, atol=atol, rtol=rtol)
def shape(val):
return val.shape
def dot(a, b):
return einsum("...i,...i->...", a, b)
def maximum(a, b):
return torch.max(array(a), array(b))
def minimum(a, b):
return torch.min(array(a), array(b))
def to_ndarray(x, to_ndim, axis=0):
x = array(x)
if x.dim() == to_ndim - 1:
x = torch.unsqueeze(x, dim=axis)
return x
def broadcast_to(x, shape):
if not torch.is_tensor(x):
x = torch.tensor(x)
return x.expand(shape)
def sqrt(x):
if not isinstance(x, torch.Tensor):
x = torch.tensor(x).float()
return torch.sqrt(x)
def isclose(x, y, rtol=rtol, atol=atol):
if not torch.is_tensor(x):
x = torch.tensor(x)
if not torch.is_tensor(y):
y = torch.tensor(y)
return torch.isclose(x, y, atol=atol, rtol=rtol)
def sum(x, axis=None, keepdims=None, **kwargs):
if axis is None:
if keepdims is None:
return torch.sum(x, **kwargs)
return torch.sum(x, keepdim=keepdims, **kwargs)
if keepdims is None:
return torch.sum(x, dim=axis, **kwargs)
return torch.sum(x, dim=axis, keepdim=keepdims, **kwargs)
def einsum(*args, **kwargs):
einsum_str = args[0]
input_tensors_list = args[1:]
input_tensors_list = convert_to_wider_dtype(input_tensors_list)
if len(input_tensors_list) == 1:
return torch.einsum(einsum_str, input_tensors_list)
einsum_list = einsum_str.split("->")
input_str = einsum_list[0]
if len(einsum_list) > 1:
output_str = einsum_list[1]
input_str_list = input_str.split(",")
is_ellipsis = [input_str[:3] == "..." for input_str in input_str_list]
all_ellipsis = bool(_np.prod(is_ellipsis))
if all_ellipsis:
ndims = [len(input_str[3:]) for input_str in input_str_list]
if len(input_str_list) > 2:
raise NotImplementedError(
"Ellipsis support not implemented for >2 input tensors"
)
tensor_a = input_tensors_list[0]
tensor_b = input_tensors_list[1]
initial_ndim_a = tensor_a.ndim
initial_ndim_b = tensor_b.ndim
tensor_a = to_ndarray(tensor_a, to_ndim=ndims[0] + 1)
tensor_b = to_ndarray(tensor_b, to_ndim=ndims[1] + 1)
n_tensor_a = tensor_a.shape[0]
n_tensor_b = tensor_b.shape[0]
cond = (
n_tensor_a == n_tensor_b == 1
and initial_ndim_a != tensor_a.ndim
and initial_ndim_b != tensor_b.ndim
)
if cond:
tensor_a = squeeze(tensor_a, axis=0)
tensor_b = squeeze(tensor_b, axis=0)
input_prefix_list = ["", ""]
output_prefix = ""
elif n_tensor_a != n_tensor_b:
if n_tensor_a == 1:
tensor_a = squeeze(tensor_a, axis=0)
input_prefix_list = ["", "r"]
output_prefix = "r"
elif n_tensor_b == 1:
tensor_b = squeeze(tensor_b, axis=0)
input_prefix_list = ["r", ""]
output_prefix = "r"
else:
raise ValueError("Shape mismatch for einsum.")
else:
input_prefix_list = ["r", "r"]
output_prefix = "r"
input_str_list = [
input_str.replace("...", prefix)
for input_str, prefix in zip(input_str_list, input_prefix_list)
]
input_str = input_str_list[0] + "," + input_str_list[1]
einsum_str = input_str
if len(einsum_list) > 1:
output_str = output_str.replace("...", output_prefix)
einsum_str = input_str + "->" + output_str
result = torch.einsum(einsum_str, tensor_a, tensor_b, **kwargs)
return result
return torch.einsum(*args, **kwargs)
def T(x):
return torch.t(x)
def transpose(x, axes=None):
if axes:
return x.permute(axes)
if x.dim() == 1:
return x
if x.dim() > 2 and axes is None:
return x.permute(tuple(range(x.ndim)[::-1]))
return x.t()
def squeeze(x, axis=None):
if axis is None:
return torch.squeeze(x)
return torch.squeeze(x, dim=axis)
def trace(x, axis1=0, axis2=1):
min_axis = min(axis1, axis2)
max_axis = max(axis1, axis2)
if min_axis == 1 and max_axis == 2:
return torch.einsum("...ii", x)
if min_axis == -2 and max_axis == -1:
return torch.einsum("...ii", x)
if min_axis == 0 and max_axis == 1:
return torch.einsum("ii...", x)
if min_axis == 0 and max_axis == 2:
return torch.einsum("i...i", x)
raise NotImplementedError()
def linspace(start, stop, num):
return torch.linspace(start=start, end=stop, steps=num)
def equal(a, b, **kwargs):
if a.dtype == torch.ByteTensor:
a = cast(a, torch.uint8).float()
if b.dtype == torch.ByteTensor:
b = cast(b, torch.uint8).float()
return torch.eq(a, b, **kwargs)
def diag_indices(*args, **kwargs):
return tuple(map(torch.from_numpy, _np.diag_indices(*args, **kwargs)))
def tril(mat, k=0):
return torch.tril(mat, diagonal=k)
def triu(mat, k=0):
return torch.triu(mat, diagonal=k)
def tril_indices(n, k=0, m=None):
if m is None:
m = n
return torch.tril_indices(row=n, col=m, offset=k)
def triu_indices(n, k=0, m=None):
if m is None:
m = n
return torch.triu_indices(row=n, col=m, offset=k)
def tile(x, y):
if not torch.is_tensor(x):
x = torch.tensor(x)
return x.repeat(y)
def expand_dims(x, axis=0):
return torch.unsqueeze(x, dim=axis)
def ndim(x):
return x.dim()
def hsplit(x, indices_or_section):
if isinstance(indices_or_section, int):
indices_or_section = x.shape[1] // indices_or_section
return torch.split(x, indices_or_section, dim=1)
def diagonal(x, offset=0, axis1=0, axis2=1):
return torch.diagonal(x, offset=offset, dim1=axis1, dim2=axis2)
def set_diag(x, new_diag):
"""Set the diagonal along the last two axis.
Parameters
----------
x : array-like, shape=[dim]
Initial array.
new_diag : array-like, shape=[dim[-2]]
Values to set on the diagonal.
Returns
-------
None
Notes
-----
This mimics tensorflow.linalg.set_diag(x, new_diag), when new_diag is a
1-D array, but modifies x instead of creating a copy.
"""
arr_shape = x.shape
off_diag = (1 - torch.eye(arr_shape[-1])) * x
diag = torch.einsum("ij,...i->...ij", torch.eye(new_diag.shape[-1]), new_diag)
return diag + off_diag
def prod(x, axis=None):
if axis is None:
axis = 0
return torch.prod(x, axis)
def where(condition, x=None, y=None):
if x is None and y is None:
return torch.where(condition)
if not torch.is_tensor(x):
x = torch.tensor(x)
if not torch.is_tensor(y):
y = torch.tensor(y)
y = cast(y, x.dtype)
return torch.where(condition, x, y)
def get_mask_i_float(i, n):
"""Create a 1D array of zeros with one element at one, with floating type.
Parameters
----------
i : int
Index of the non-zero element.
n: n
Length of the created array.
Returns
-------
mask_i_float : array-like, shape=[n,]
1D array of zeros except at index i, where it is one
"""
range_n = arange(cast(array(n), int32))
i_float = cast(array(i), int32)
mask_i = equal(range_n, i_float)
mask_i_float = cast(mask_i, float32)
return mask_i_float
def _is_boolean(x):
if isinstance(x, bool):
return True
if isinstance(x, (tuple, list)):
return _is_boolean(x[0])
if torch.is_tensor(x):
return x.dtype in [torch.bool, torch.uint8]
return False
def _is_iterable(x):
if isinstance(x, (list, tuple)):
return True
if torch.is_tensor(x):
return ndim(x) > 0
return False
def assignment(x, values, indices, axis=0):
"""Assign values at given indices of an array.
Parameters
----------
x: array-like, shape=[dim]
Initial array.
values: {float, list(float)}
Value or list of values to be assigned.
indices: {int, tuple, list(int), list(tuple)}
Single int or tuple, or list of ints or tuples of indices where value
is assigned.
If the length of the tuples is shorter than ndim(x), values are
assigned to each copy along axis.
axis: int, optional
Axis along which values are assigned, if vectorized.
Returns
-------
x_new : array-like, shape=[dim]
Copy of x with the values assigned at the given indices.
Notes
-----
If a single value is provided, it is assigned at all the indices.
If a list is given, it must have the same length as indices.
"""
x_new = copy(x)
use_vectorization = hasattr(indices, "__len__") and len(indices) < ndim(x)
if _is_boolean(indices):
x_new[indices] = values
return x_new
zip_indices = _is_iterable(indices) and _is_iterable(indices[0])
len_indices = len(indices) if _is_iterable(indices) else 1
if zip_indices:
indices = tuple(zip(*indices))
if not use_vectorization:
if not zip_indices:
len_indices = len(indices) if _is_iterable(indices) else 1
len_values = len(values) if _is_iterable(values) else 1
if len_values > 1 and len_values != len_indices:
raise ValueError("Either one value or as many values as indices")
x_new[indices] = values
else:
indices = tuple(list(indices[:axis]) + [slice(None)] + list(indices[axis:]))
x_new[indices] = values
return x_new
def assignment_by_sum(x, values, indices, axis=0):
"""Add values at given indices of an array.
Parameters
----------
x: array-like, shape=[dim]
Initial array.
values: {float, list(float)}
Value or list of values to be assigned.
indices: {int, tuple, list(int), list(tuple)}
Single int or tuple, or list of ints or tuples of indices where value
is assigned.
If the length of the tuples is shorter than ndim(x), values are
assigned to each copy along axis.
axis: int, optional
Axis along which values are assigned, if vectorized.
Returns
-------
x_new : array-like, shape=[dim]
Copy of x with the values assigned at the given indices.
Notes
-----
If a single value is provided, it is assigned at all the indices.
If a list is given, it must have the same length as indices.
"""
x_new = copy(x)
values = array(values)
use_vectorization = hasattr(indices, "__len__") and len(indices) < ndim(x)
if _is_boolean(indices):
x_new[indices] += values
return x_new
zip_indices = _is_iterable(indices) and _is_iterable(indices[0])
if zip_indices:
indices = list(zip(*indices))
if not use_vectorization:
len_indices = len(indices) if _is_iterable(indices) else 1
len_values = len(values) if _is_iterable(values) else 1
if len_values > 1 and len_values != len_indices:
raise ValueError("Either one value or as many values as indices")
x_new[indices] += values
else:
indices = tuple(list(indices[:axis]) + [slice(None)] + list(indices[axis:]))
x_new[indices] += values
return x_new
def copy(x):
return x.clone()
def cumsum(x, axis=None):
if not torch.is_tensor(x):
x = array(x)
if axis is None:
return x.flatten().cumsum(dim=0)
return torch.cumsum(x, dim=axis)
def cumprod(x, axis=None):
if axis is None:
axis = 0
return torch.cumprod(x, axis)
def array_from_sparse(indices, data, target_shape):
"""Create an array of given shape, with values at specific indices.
The rest of the array will be filled with zeros.
Parameters
----------
indices : iterable(tuple(int))
Index of each element which will be assigned a specific value.
data : iterable(scalar)
Value associated at each index.
target_shape : tuple(int)
Shape of the output array.
Returns
-------
a : array, shape=target_shape
Array of zeros with specified values assigned to specified indices.
"""
return torch.sparse.FloatTensor(
torch.LongTensor(indices).t(),
torch.FloatTensor(cast(data, float32)),
torch.Size(target_shape),
).to_dense()
def vectorize(x, pyfunc, multiple_args=False, **kwargs):
if multiple_args:
return stack(list(map(lambda y: pyfunc(*y), zip(*x))))
return stack(list(map(pyfunc, x)))
def vec_to_diag(vec):
return torch.diag_embed(vec, offset=0)
def tril_to_vec(x, k=0):
n = x.shape[-1]
rows, cols = tril_indices(n, k=k)
return x[..., rows, cols]
def triu_to_vec(x, k=0):
n = x.shape[-1]
rows, cols = triu_indices(n, k=k)
return x[..., rows, cols]
def mat_from_diag_triu_tril(diag, tri_upp, tri_low):
"""Build matrix from given components.
Forms a matrix from diagonal, strictly upper triangular and
strictly lower traingular parts.
Parameters
----------
diag : array_like, shape=[..., n]
tri_upp : array_like, shape=[..., (n * (n - 1)) / 2]
tri_low : array_like, shape=[..., (n * (n - 1)) / 2]
Returns
-------
mat : array_like, shape=[..., n, n]
"""
n = diag.shape[-1]
(i,) = diag_indices(n, ndim=1)
j, k = triu_indices(n, k=1)
mat = torch.zeros((diag.shape + (n,)))
mat[..., i, i] = diag
mat[..., j, k] = tri_upp
mat[..., k, j] = tri_low
return mat
def ravel_tril_indices(n, k=0, m=None):
if m is None:
size = (n, n)
else:
size = (n, m)
idxs = _np.tril_indices(n, k, m)
return torch.from_numpy(_np.ravel_multi_index(idxs, size))
def sort(a, axis=-1):
sorted_a, _ = torch.sort(a, dim=axis)
return sorted_a
def amin(a, axis=-1):
(values, _) = torch.min(a, dim=axis)
return values
def take(a, indices, axis=0):
if not torch.is_tensor(indices):
indices = torch.as_tensor(indices)
return torch.squeeze(torch.index_select(a, axis, indices))
def _unnest_iterable(ls):
out = []
if isinstance(ls, Iterable):
for inner_ls in ls:
out.extend(_unnest_iterable(inner_ls))
else:
out.append(ls)
return out
def pad(a, pad_width, constant_value=0.0):
return torch.nn.functional.pad(
a, _unnest_iterable(reversed(pad_width)), value=constant_value
)
|
the-stack_106_18430
|
from sharpy.plans.acts import ActBase
from sharpy.plans.acts.morph_building import MorphBuilding
from sc2 import UnitTypeId, AbilityId
class MorphUnit(ActBase):
def __init__(self, unit_type: UnitTypeId, ability_type: AbilityId, result_type: UnitTypeId,
cocoon_type: UnitTypeId, target_count: int):
super().__init__()
self.target_count = target_count
self.result_type = result_type
self.ability_type = ability_type
self.unit_type = unit_type
self.cocoon_type = cocoon_type
async def execute(self) -> bool:
target_count = self.cache.own(self.result_type).amount
start_units = self.cache.own(self.unit_type).ready.sorted_by_distance_to(self.knowledge.own_main_zone.center_location)
cocoon_units = self.cache.own(self.cocoon_type)
target_count += len(cocoon_units)
for target in start_units: # type: Unit
if target.orders and target.orders[0].ability.id == self.ability_type:
target_count += 1
if target_count >= self.target_count:
return True
for target in start_units:
if target.is_ready:
if self.knowledge.can_afford(self.ability_type):
self.do(target(self.ability_type))
self.knowledge.reserve_costs(self.ability_type)
target_count += 1
if target_count >= self.target_count:
return True
if start_units:
return False
return True
class MorphRavager(MorphUnit):
def __init__(self, target_count: int):
super().__init__(UnitTypeId.ROACH, AbilityId.MORPHTORAVAGER_RAVAGER, UnitTypeId.RAVAGER,
UnitTypeId.RAVAGERCOCOON, target_count)
class MorphOverseer(MorphUnit):
def __init__(self, target_count: int):
super().__init__(UnitTypeId.OVERLORD, AbilityId.MORPH_OVERSEER, UnitTypeId.OVERSEER,
UnitTypeId.OVERLORDCOCOON, target_count)
class MorphBroodLord(MorphUnit):
def __init__(self, target_count: int):
super().__init__(UnitTypeId.CORRUPTOR, AbilityId.MORPHTOBROODLORD_BROODLORD, UnitTypeId.BROODLORD,
UnitTypeId.BROODLORDCOCOON, target_count)
class MorphLurker(MorphUnit):
def __init__(self, target_count: int):
super().__init__(UnitTypeId.HYDRALISK, AbilityId.MORPH_LURKER, UnitTypeId.LURKERMP,
UnitTypeId.LURKERMPEGG, target_count)
class MorphBaneling(MorphUnit):
def __init__(self, target_count: int):
super().__init__(UnitTypeId.ZERGLING, AbilityId.MORPHZERGLINGTOBANELING_BANELING, UnitTypeId.BANELING,
UnitTypeId.BANELINGCOCOON, target_count)
|
the-stack_106_18433
|
"""
This script implements an incremental merge strategy, where there is a largeDB,
and a smallDB. In general new PCL dataset files are merged into the smallDB (in
order to speed up the merge). When the smallDB grows to some threshold, say 10%
of the size of the largeDB, then the small and largeDB are merged together.
This script handles the common path of merging a new set of PCL dataset files
into a smallDB.
Inputs:
dirLargeDB: directory of the largeDB - for reference only and to validate
that the datasets are disjoint and gene maps match.
dirSmallDB: directory of the smallDB - the db files and metadata that the
new PCL data will be combined into.
dirNewPCL: the directory containing the new PCL files to merge in.
newDsetFile: the list of the new datasets to merge in, the first column is
the name of the pcl files found in dirNewPCL
largeDsetFile: list of datasets in the largeDB
smallDsetFile: list of datasets in the smallDB
sleipnirBinDir: sleipnir binaries
outDir: where the final merge of smallDB and newPCL will be written (somewhere other
than the smallDB dir so that verification can happen)
Example usage:
conda activate genomics
python seekIncrementalMerge.py -p <newPclDir> -dn <newDatasetList> \
-dl <largeDatasetList> -ds <smallDatasetList> -l <largeDBDir> \
-s <smallDBDir> -b <sleipnirBinaries> -o <mergedDir> \
"""
import os
import sys
import argparse
import glob
import subprocess
from datetime import datetime
currPath = os.path.dirname(os.path.realpath(__file__))
sys.path.append(currPath)
import seekUtils as sutils
from seekCreateDB import createSeekDB
# This script will create a new database from PCL files
# It will use the parameters of an existing small and large database (which must agree).
# It will then prepare the data for combining into the small database.
# At the end it will print the command to run to merge the new database into the small database.
# Notes:
# If you just want to create a smallDB, use the seekCreateDB.py script
# TODO - make it optional to specify a newPCL diretory. If none is specified
# then merge the small and large db.
def copyFile(fileName, srcDir, destDir):
src = os.path.join(srcDir, fileName)
dst = os.path.join(destDir, fileName)
ret = subprocess.run(f'cp {src} {dst}', shell=True)
assert ret.returncode == 0
def checkFilesMatch(fileName, dir1, dir2):
f1 = os.path.join(dir1, fileName)
f2 = os.path.join(dir2, fileName)
ret = subprocess.run(f'diff {f1} {f2}', shell=True)
assert ret.returncode == 0
def concatenateFiles(fileName, dir1, dir2, outDir):
f1 = os.path.join(dir1, fileName)
f2 = os.path.join(dir2, fileName)
dst = os.path.join(outDir, fileName)
ret = subprocess.run(f'cat {f1} {f2} > {dst}', shell=True)
assert ret.returncode == 0
def combineDirs(subDirName, dir1, dir2, outDir):
d1 = os.path.join(dir1, subDirName)
d2 = os.path.join(dir2, subDirName)
dst = os.path.join(outDir, subDirName)
os.makedirs(dst, exist_ok=True)
ret = subprocess.run(f'cp -a {d1}/* {dst}', shell=True)
assert ret.returncode == 0
ret = subprocess.run(f'cp -a {d2}/* {dst}', shell=True)
assert ret.returncode == 0
def main(args):
refCfg = sutils.getDefaultConfig()
refCfg.datasetsFile = os.path.basename(args.smallDsetFile)
os.makedirs(args.outDir, exist_ok=True)
# STEP 01: Load the dataset lists for the new, small and large DBs and make sure
# they are all disjoint
# check existence of the dataset map files
if not os.path.exists(args.smallDsetFile):
raise FileNotFoundError("Small DB dataset_platform map not found: " + args.smallDsetFile)
if not os.path.exists(args.largeDsetFile):
raise FileNotFoundError("Large DB dataset_platform map not found: " + args.largeDsetFile)
if not os.path.exists(args.newDsetFile):
raise FileNotFoundError("New dataset_platform map not found: " + args.newDsetFile)
smallDsets = sutils.readDatasetList(args.smallDsetFile)
largeDsets = sutils.readDatasetList(args.largeDsetFile)
newDsets = sutils.readDatasetList(args.newDsetFile)
smallDsets = set([dset[1] for dset in smallDsets])
largeDsets = set([dset[1] for dset in largeDsets])
newDsets = set([dset[1] for dset in newDsets])
large_small_overlap = largeDsets.intersection(smallDsets)
large_new_overlap = largeDsets.intersection(newDsets)
small_new_overlap = smallDsets.intersection(newDsets)
# Datasets must be disjoint between all the databases
assert not large_small_overlap, "large and small datasets overlap " + str(large_small_overlap)
assert not large_new_overlap, "large and new datasets overlap " + str(large_new_overlap)
assert not small_new_overlap, "small and new datasets overlap " + str(small_new_overlap)
# STEP 02: Do some checks
# check that large and small DBs have the same number/name of db files
assert os.path.isdir(args.dirLargeDB)
assert os.path.isdir(args.dirSmallDB)
largeDBFileDir = os.path.join(args.dirLargeDB, refCfg.dbDir)
smallDBFileDir = os.path.join(args.dirSmallDB, refCfg.dbDir)
assert os.path.isdir(largeDBFileDir)
assert os.path.isdir(smallDBFileDir)
largeDBFiles = glob.glob1(largeDBFileDir, "*.db")
largeDBFiles.sort()
smallDBFiles = glob.glob1(smallDBFileDir, "*.db")
smallDBFiles.sort()
assert largeDBFiles == smallDBFiles
# assert numDBFiles == numSmallDBFiles, "numDBFiles mismatch between large and small db"
checkFilesMatch(refCfg.geneMapFile, args.dirLargeDB, args.dirSmallDB)
checkFilesMatch(refCfg.quantFile, args.dirLargeDB, args.dirSmallDB)
# STEP 03: Create a new database from the new incremental pcl files
# make a temp directory to hold the incremental database
dateStr = datetime.now().strftime("%Y%m%d_%H%M%S")
incrDBDirName = os.path.join(args.outDir, "incr_dset_" + dateStr)
os.makedirs(incrDBDirName)
# copy over geneMap and quant files needed from refDB
copyFile(refCfg.geneMapFile, args.dirLargeDB, incrDBDirName)
copyFile(refCfg.quantFile, args.dirLargeDB, incrDBDirName)
# set the configs
newCfg = sutils.getDefaultConfig()
newCfg.binDir = args.sleipnirBinDir
newCfg.pclDir = args.dirNewPCL
newCfg.datasetsFile = args.newDsetFile
newCfg.inDir = incrDBDirName
newCfg.outDir = incrDBDirName
newCfg.numDbFiles = len(largeDBFiles)
sutils.checkConfig(newCfg)
# create the db
res = createSeekDB(newCfg, None, runAll=True, concurrency=8)
assert res == True, "createSeekDB failed"
print(f'Incremental database created in {incrDBDirName}')
# STEP 04: Combine metadata
copyFile(refCfg.geneMapFile, args.dirLargeDB, args.outDir)
copyFile(refCfg.quantFile, args.dirLargeDB, args.outDir)
dsetFileBaseName = os.path.basename(args.smallDsetFile)
concatenateFiles(dsetFileBaseName, args.dirSmallDB, incrDBDirName, args.outDir)
concatenateFiles(refCfg.dsetSizeFile, args.dirSmallDB, incrDBDirName, args.outDir)
combineDirs('prep', args.dirSmallDB, incrDBDirName, args.outDir)
combineDirs('sinfo', args.dirSmallDB, incrDBDirName, args.outDir)
combineDirs('gvar', args.dirSmallDB, incrDBDirName, args.outDir)
combineDirs('pclbin', args.dirSmallDB, incrDBDirName, args.outDir)
# STEP 05: Run the db combiner
if args.yesToPrompts is False:
reply = input('Proceed with dbCombiner command? ' + '(y/n): ')
reply.lower().strip()
if reply[0] != 'y':
return -1
mergedCfg = sutils.getDefaultConfig()
mergedCfg.binDir = args.sleipnirBinDir
mergedCfg.inDir = args.dirSmallDB
mergedCfg.outDir = args.outDir
mergedCfg.datasetsFile = dsetFileBaseName
mergedCfg.numDbFiles = len(largeDBFiles)
sutils.checkConfig(mergedCfg)
dbDirsToCombine = [smallDBFileDir, newCfg.dbDir]
sutils.parallelCombineThreadDBs(mergedCfg, dbDirsToCombine, concurrency=8)
# STEP 06: Run verification of the combined DB files
if args.yesToPrompts is False:
reply = input('Proceed with db verification command? ' + '(y/n): ')
reply.lower().strip()
if reply[0] != 'y':
return -1
sutils.verifyCombinedDBs(mergedCfg, dbDirsToCombine, concurrency=8)
print("Run SeekPrep to combine platform statistics files...")
# STEP 07: Calculate the platform averages
smallPlatDir = os.path.join(args.dirSmallDB, 'plat')
incrPlatDir = os.path.join(incrDBDirName, 'plat')
outPlatDir = os.path.join(args.outDir, 'plat')
os.makedirs(outPlatDir, exist_ok=True)
cmd = f'{args.sleipnirBinDir}/SeekPrep -f -m -i {mergedCfg.geneMapFile} ' \
f'-1 {smallPlatDir} -2 {incrPlatDir} -D {outPlatDir}'
ret = subprocess.run(cmd, shell=True)
assert ret.returncode == 0
# FINALIZATION STEPS
# Recommend after completion of merge do the following by hand:
# Rename small DB directory to prev.num
# Rename combined DB directory to small DB name
# Check size of small DB relative to large DB, and recommend combining
# at some size/percentage threshold.
return 0
if __name__ == "__main__":
argParser = argparse.ArgumentParser()
argParser.add_argument('--dirNewPCL', '-p', type=str, required=True,
help='directory containing the PCL files for the new datasets')
argParser.add_argument('--newDsetFile', '-dn', type=str, required=True,
help='text file listing the new datasets and corresponding platforms, one per line')
argParser.add_argument('--largeDsetFile', '-dl', type=str, required=True,
help='text file listing the large DB datasets and corresponding platforms, one per line')
argParser.add_argument('--smallDsetFile', '-ds', type=str, required=True,
help='text file listing the small DB datasets and corresponding platforms, one per line')
argParser.add_argument('--dirSmallDB', '-s', type=str, required=True,
help='directory of existing small DB')
argParser.add_argument('--dirLargeDB', '-l', type=str, required=True,
help='directory of existing large DB')
argParser.add_argument('--sleipnirBinDir', '-b', type=str, required=True,
help='Directory where the Sleipnir binaries are installed')
argParser.add_argument('--outDir', '-o', type=str, required=True,
help='Output directory to write new database into')
argParser.add_argument('--yesToPrompts', '-y', default=False, action='store_true',
help='Answer yes to all prompts')
args = argParser.parse_args()
res = main(args)
sys.exit(res)
|
the-stack_106_18435
|
#!/usr/bin/env python3
#
# Copyright (c) 2017, Pivotal Software Inc.
#
from gppylib.commands import base
from gppylib.commands.unix import *
from gppylib.commands.gp import *
from gppylib.gparray import GpArray
from gppylib.gplog import get_default_logger
class GpResGroup(object):
def __init__(self):
self.logger = get_default_logger()
def validate(self):
pool = base.WorkerPool()
gp_array = GpArray.initFromCatalog(dbconn.DbURL(), utility=True)
host_list = list(set(gp_array.get_hostlist(True)))
msg = None
for h in host_list:
cmd = Command(h, "gpcheckresgroupimpl", REMOTE, h)
pool.addCommand(cmd)
pool.join()
items = pool.getCompletedItems()
failed = []
for i in items:
if not i.was_successful():
failed.append("[%s:%s]"%(i.remoteHost, i.get_stderr().rstrip()))
pool.haltWork()
pool.joinWorkers()
if failed:
msg = ",".join(failed)
return msg
|
the-stack_106_18436
|
import os
import cv2
import torch
import datetime
import numpy as np
from torchvision.utils import save_image
import torch.nn.functional as F
from cnn_raccoon import input_images_dict
from cnn_raccoon import images_top_dir, img_relative_path
def tensor_image_converter(tensor):
"""
Converts PyTorch Tensor to Numpy Array (Image)
"""
tensor = tensor.squeeze()
if len(tensor.shape) > 2:
tensor = tensor.permute(1, 2, 0)
img = tensor.detach().cpu().numpy()
return img
def module2traced(module, inputs):
"""
Function taken from: https://github.com/FrancescoSaverioZuppichini/A-journey-into-Convolutional-Neural-Network-visualization-
"""
handles, modules = [], []
def trace(module, inputs, outputs):
modules.append(module)
def traverse(module):
for m in module.children():
traverse(m)
is_leaf = len(list(module.children())) == 0
if is_leaf:
handles.append(module.register_forward_hook(trace))
traverse(module)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
module = module.to(device)
_ = module(inputs.to(device))
[h.remove() for h in handles]
return modules
def convert_to_grayscale(cv2im):
"""
Converts 3d image to grayscale
Args:
cv2im (numpy arr): RGB image with shape (D,W,H)
returns:
grayscale_im (numpy_arr): Grayscale image with shape (1,W,D)
credits to https://github.com/utkuozbulak/pytorch-cnn-visualizations
"""
grayscale_im = np.sum(np.abs(cv2im), axis=0)
im_max = np.percentile(grayscale_im, 99)
im_min = np.min(grayscale_im)
grayscale_im = (np.clip((grayscale_im - im_min) / (im_max - im_min), 0, 1))
grayscale_im = np.expand_dims(grayscale_im, axis=0)
return grayscale_im
def image2cam(image, cam):
"""
Credits to: https://github.com/FrancescoSaverioZuppichini/A-journey-into-Convolutional-Neural-Network-visualization-
"""
h, w, c = image.shape
cam -= np.min(cam)
cam /= np.max(cam)
cam = cv2.resize(cam, (w, h))
cam = np.uint8(cam * 255.0)
img_with_cam = cv2.applyColorMap(cam, cv2.COLORMAP_JET)
img_with_cam = cv2.cvtColor(img_with_cam, cv2.COLOR_BGR2RGB)
img_with_cam = img_with_cam + (image * 255)
img_with_cam /= np.max(img_with_cam)
return img_with_cam
def tensor2cam(image, cam):
"""
Credits to: https://github.com/FrancescoSaverioZuppichini/A-journey-into-Convolutional-Neural-Network-visualization-
"""
image_with_heatmap = image2cam(image.squeeze().permute(1, 2, 0).cpu().numpy(), cam.detach().cpu().numpy())
return torch.from_numpy(image_with_heatmap).permute(2, 0, 1)
def save_input_images(images):
"""
Saves input images and expose them to the Flask server.
:param images: Input images taken from Inspector
"""
input_images = images_top_dir + "/input_images"
if not os.path.exists(input_images):
os.mkdir(input_images)
for i in range(images.shape[0]):
ts = datetime.datetime.now().timestamp()
img_path = input_images + "/img_{}_{}.jpg".format(str(ts).replace(".", ""), i)
img_relative = img_relative_path + "/input_images" + "/img_{}_{}.jpg".format(str(ts).replace(".", ""), i)
image = images[i]
image = F.interpolate(image, size=128)
save_image(image, img_path)
if i in input_images_dict.keys():
input_images_dict[i].append(img_relative)
else:
input_images_dict[i] = img_relative
|
the-stack_106_18437
|
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from abc import ABC, abstractmethod
from collections import namedtuple
from typing import Any, Optional
from magma.enodebd.data_models.data_model import InvalidTrParamPath
from magma.enodebd.data_models.data_model_parameters import ParameterName
from magma.enodebd.device_config.configuration_init import build_desired_config
from magma.enodebd.exceptions import ConfigurationError, Tr069Error
from magma.enodebd.logger import EnodebdLogger as logger
from magma.enodebd.state_machines.acs_state_utils import (
does_inform_have_event,
get_all_objects_to_add,
get_all_objects_to_delete,
get_all_param_values_to_set,
get_obj_param_values_to_set,
get_object_params_to_get,
get_optional_param_to_check,
get_param_values_to_set,
get_params_to_get,
parse_get_parameter_values_response,
process_inform_message,
)
from magma.enodebd.state_machines.enb_acs import EnodebAcsStateMachine
from magma.enodebd.state_machines.timer import StateMachineTimer
from magma.enodebd.tr069 import models
AcsMsgAndTransition = namedtuple(
'AcsMsgAndTransition', ['msg', 'next_state'],
)
AcsReadMsgResult = namedtuple(
'AcsReadMsgResult', ['msg_handled', 'next_state'],
)
class EnodebAcsState(ABC):
"""
State class for the Enodeb state machine
States can transition after reading a message from the eNB, sending a
message out to the eNB, or when a timer completes. As such, some states
are only responsible for message sending, and others are only responsible
for reading incoming messages.
In the constructor, set up state transitions.
"""
def __init__(self):
self._acs = None
def enter(self) -> None:
"""
Set up your timers here. Call transition(..) on the ACS when the timer
completes or throw an error
"""
pass
def exit(self) -> None:
"""Destroy timers here"""
pass
def read_msg(self, message: Any) -> AcsReadMsgResult:
"""
Args: message: tr069 message
Returns: name of the next state, if transition required
"""
raise ConfigurationError(
'%s should implement read_msg() if it '
'needs to handle message reading' % self.__class__.__name__,
)
def get_msg(self, message: Any) -> AcsMsgAndTransition:
"""
Produce a message to send back to the eNB.
Args:
message: TR-069 message which was already processed by read_msg
Returns: Message and possible transition
"""
raise ConfigurationError(
'%s should implement get_msg() if it '
'needs to produce messages' % self.__class__.__name__,
)
@property
def acs(self) -> EnodebAcsStateMachine:
return self._acs
@acs.setter
def acs(self, val: EnodebAcsStateMachine) -> None:
self._acs = val
@abstractmethod
def state_description(self) -> str:
""" Provide a few words about what the state represents """
pass
class WaitInformState(EnodebAcsState):
"""
This state indicates that no Inform message has been received yet, or
that no Inform message has been received for a long time.
This state is used to handle an Inform message that arrived when enodebd
already believes that the eNB is connected. As such, it is unclear to
enodebd whether the eNB is just sending another Inform, or if a different
eNB was plugged into the same interface.
"""
def __init__(
self,
acs: EnodebAcsStateMachine,
when_done: str,
when_boot: Optional[str] = None,
):
super().__init__()
self.acs = acs
self.done_transition = when_done
self.boot_transition = when_boot
self.has_enb_just_booted = False
def read_msg(self, message: Any) -> AcsReadMsgResult:
"""
Args:
message: models.Inform Tr069 Inform message
"""
if not isinstance(message, models.Inform):
return AcsReadMsgResult(False, None)
process_inform_message(
message, self.acs.data_model,
self.acs.device_cfg,
)
if does_inform_have_event(message, '1 BOOT'):
return AcsReadMsgResult(True, self.boot_transition)
return AcsReadMsgResult(True, None)
def get_msg(self, message: Any) -> AcsMsgAndTransition:
""" Reply with InformResponse """
response = models.InformResponse()
# Set maxEnvelopes to 1, as per TR-069 spec
response.MaxEnvelopes = 1
return AcsMsgAndTransition(response, self.done_transition)
def state_description(self) -> str:
return 'Waiting for an Inform'
class GetRPCMethodsState(EnodebAcsState):
"""
After the first Inform message from boot, it is expected that the eNB
will try to learn the RPC methods of the ACS.
"""
def __init__(self, acs: EnodebAcsStateMachine, when_done: str, when_skip: str):
super().__init__()
self.acs = acs
self.done_transition = when_done
self.skip_transition = when_skip
def read_msg(self, message: Any) -> AcsReadMsgResult:
# If this is a regular Inform, not after a reboot we'll get an empty
if isinstance(message, models.DummyInput):
return AcsReadMsgResult(True, self.skip_transition)
if not isinstance(message, models.GetRPCMethods):
return AcsReadMsgResult(False, self.done_transition)
return AcsReadMsgResult(True, None)
def get_msg(self, message: Any) -> AcsMsgAndTransition:
resp = models.GetRPCMethodsResponse()
resp.MethodList = models.MethodList()
RPC_METHODS = ['Inform', 'GetRPCMethods', 'TransferComplete']
resp.MethodList.arrayType = 'xsd:string[%d]' \
% len(RPC_METHODS)
resp.MethodList.string = RPC_METHODS
return AcsMsgAndTransition(resp, self.done_transition)
def state_description(self) -> str:
return 'Waiting for incoming GetRPC Methods after boot'
class BaicellsRemWaitState(EnodebAcsState):
"""
We've already received an Inform message. This state is to handle a
Baicells eNodeB issue.
After eNodeB is rebooted, hold off configuring it for some time to give
time for REM to run. This is a BaiCells eNodeB issue that doesn't support
enabling the eNodeB during initial REM.
In this state, just hang at responding to Inform, and then ending the
TR-069 session.
"""
CONFIG_DELAY_AFTER_BOOT = 600
def __init__(self, acs: EnodebAcsStateMachine, when_done: str):
super().__init__()
self.acs = acs
self.done_transition = when_done
self.rem_timer = None
def enter(self):
self.rem_timer = StateMachineTimer(self.CONFIG_DELAY_AFTER_BOOT)
logger.info(
'Holding off of eNB configuration for %s seconds. '
'Will resume after eNB REM process has finished. ',
self.CONFIG_DELAY_AFTER_BOOT,
)
def exit(self):
self.rem_timer = None
def read_msg(self, message: Any) -> AcsReadMsgResult:
if not isinstance(message, models.Inform):
return AcsReadMsgResult(False, None)
process_inform_message(
message, self.acs.data_model,
self.acs.device_cfg,
)
return AcsReadMsgResult(True, None)
def get_msg(self, message: Any) -> AcsMsgAndTransition:
if self.rem_timer.is_done():
return AcsMsgAndTransition(
models.DummyInput(),
self.done_transition,
)
return AcsMsgAndTransition(models.DummyInput(), None)
def state_description(self) -> str:
remaining = self.rem_timer.seconds_remaining()
return 'Waiting for eNB REM to run for %d more seconds before ' \
'resuming with configuration.' % remaining
class WaitEmptyMessageState(EnodebAcsState):
def __init__(
self,
acs: EnodebAcsStateMachine,
when_done: str,
when_missing: Optional[str] = None,
):
super().__init__()
self.acs = acs
self.done_transition = when_done
self.unknown_param_transition = when_missing
def read_msg(self, message: Any) -> AcsReadMsgResult:
"""
It's expected that we transition into this state right after receiving
an Inform message and replying with an InformResponse. At that point,
the eNB sends an empty HTTP request (aka DummyInput) to initiate the
rest of the provisioning process
"""
if not isinstance(message, models.DummyInput):
logger.debug("Ignoring message %s", str(type(message)))
return AcsReadMsgResult(msg_handled=False, next_state=None)
if self.unknown_param_transition:
if get_optional_param_to_check(self.acs.data_model):
return AcsReadMsgResult(
msg_handled=True,
next_state=self.unknown_param_transition,
)
return AcsReadMsgResult(
msg_handled=True,
next_state=self.done_transition,
)
def get_msg(self, message: Any) -> AcsReadMsgResult:
"""
Return a dummy message waiting for the empty message from CPE
"""
request = models.DummyInput()
return AcsMsgAndTransition(msg=request, next_state=None)
def state_description(self) -> str:
return 'Waiting for empty message from eNodeB'
class CheckOptionalParamsState(EnodebAcsState):
def __init__(
self,
acs: EnodebAcsStateMachine,
when_done: str,
):
super().__init__()
self.acs = acs
self.done_transition = when_done
self.optional_param = None
def get_msg(self, message: Any) -> AcsMsgAndTransition:
self.optional_param = get_optional_param_to_check(self.acs.data_model)
if self.optional_param is None:
raise Tr069Error('Invalid State')
# Generate the request
request = models.GetParameterValues()
request.ParameterNames = models.ParameterNames()
request.ParameterNames.arrayType = 'xsd:string[1]'
request.ParameterNames.string = []
path = self.acs.data_model.get_parameter(self.optional_param).path
request.ParameterNames.string.append(path)
return AcsMsgAndTransition(request, None)
def read_msg(self, message: Any) -> AcsReadMsgResult:
""" Process either GetParameterValuesResponse or a Fault """
if type(message) == models.Fault:
self.acs.data_model.set_parameter_presence(
self.optional_param,
False,
)
elif type(message) == models.GetParameterValuesResponse:
name_to_val = parse_get_parameter_values_response(
self.acs.data_model,
message,
)
logger.debug(
'Received CPE parameter values: %s',
str(name_to_val),
)
for name, val in name_to_val.items():
self.acs.data_model.set_parameter_presence(
self.optional_param,
True,
)
magma_val = self.acs.data_model.transform_for_magma(name, val)
self.acs.device_cfg.set_parameter(name, magma_val)
else:
return AcsReadMsgResult(False, None)
if get_optional_param_to_check(self.acs.data_model) is not None:
return AcsReadMsgResult(True, None)
return AcsReadMsgResult(True, self.done_transition)
def state_description(self) -> str:
return 'Checking if some optional parameters exist in data model'
class SendGetTransientParametersState(EnodebAcsState):
"""
Periodically read eNodeB status. Note: keep frequency low to avoid
backing up large numbers of read operations if enodebd is busy.
Some eNB parameters are read only and updated by the eNB itself.
"""
PARAMETERS = [
ParameterName.OP_STATE,
ParameterName.RF_TX_STATUS,
ParameterName.GPS_STATUS,
ParameterName.PTP_STATUS,
ParameterName.MME_STATUS,
ParameterName.GPS_LAT,
ParameterName.GPS_LONG,
]
def __init__(self, acs: EnodebAcsStateMachine, when_done: str):
super().__init__()
self.acs = acs
self.done_transition = when_done
def read_msg(self, message: Any) -> AcsReadMsgResult:
if not isinstance(message, models.DummyInput):
return AcsReadMsgResult(False, None)
return AcsReadMsgResult(True, None)
def get_msg(self, message: Any) -> AcsMsgAndTransition:
request = models.GetParameterValues()
request.ParameterNames = models.ParameterNames()
request.ParameterNames.string = []
for name in self.PARAMETERS:
# Not all data models have these parameters
if self.acs.data_model.is_parameter_present(name):
path = self.acs.data_model.get_parameter(name).path
request.ParameterNames.string.append(path)
request.ParameterNames.arrayType = \
'xsd:string[%d]' % len(request.ParameterNames.string)
return AcsMsgAndTransition(request, self.done_transition)
def state_description(self) -> str:
return 'Getting transient read-only parameters'
class WaitGetTransientParametersState(EnodebAcsState):
"""
Periodically read eNodeB status. Note: keep frequency low to avoid
backing up large numbers of read operations if enodebd is busy
"""
def __init__(
self,
acs: EnodebAcsStateMachine,
when_get: str,
when_get_obj_params: str,
when_delete: str,
when_add: str,
when_set: str,
when_skip: str,
request_all_params: bool = False,
):
super().__init__()
self.acs = acs
self.done_transition = when_get
self.get_obj_params_transition = when_get_obj_params
self.rm_obj_transition = when_delete
self.add_obj_transition = when_add
self.set_transition = when_set
self.skip_transition = when_skip
self.request_all_params = request_all_params
def read_msg(self, message: Any) -> AcsReadMsgResult:
if not isinstance(message, models.GetParameterValuesResponse):
return AcsReadMsgResult(False, None)
# Current values of the fetched parameters
name_to_val = parse_get_parameter_values_response(
self.acs.data_model,
message,
)
logger.debug('Fetched Transient Params: %s', str(name_to_val))
# Update device configuration
for name in name_to_val:
magma_val = \
self.acs.data_model.transform_for_magma(
name,
name_to_val[name],
)
self.acs.device_cfg.set_parameter(name, magma_val)
return AcsReadMsgResult(True, self.get_next_state())
def get_next_state(self) -> str:
should_get_params = \
len(
get_params_to_get(
self.acs.device_cfg,
self.acs.data_model,
request_all_params=self.request_all_params,
),
) > 0
if should_get_params:
return self.done_transition
should_get_obj_params = \
len(
get_object_params_to_get(
self.acs.desired_cfg,
self.acs.device_cfg,
self.acs.data_model,
),
) > 0
if should_get_obj_params:
return self.get_obj_params_transition
elif len(
get_all_objects_to_delete(
self.acs.desired_cfg,
self.acs.device_cfg,
),
) > 0:
return self.rm_obj_transition
elif len(
get_all_objects_to_add(
self.acs.desired_cfg,
self.acs.device_cfg,
),
) > 0:
return self.add_obj_transition
return self.skip_transition
def state_description(self) -> str:
return 'Getting transient read-only parameters'
class GetParametersState(EnodebAcsState):
"""
Get the value of most parameters of the eNB that are defined in the data
model. Object parameters are excluded.
"""
def __init__(
self,
acs: EnodebAcsStateMachine,
when_done: str,
request_all_params: bool = False,
):
super().__init__()
self.acs = acs
self.done_transition = when_done
# Set to True if we want to request values of all parameters, even if
# the ACS state machine already has recorded values of them.
self.request_all_params = request_all_params
def read_msg(self, message: Any) -> AcsReadMsgResult:
"""
It's expected that we transition into this state right after receiving
an Inform message and replying with an InformResponse. At that point,
the eNB sends an empty HTTP request (aka DummyInput) to initiate the
rest of the provisioning process
"""
if not isinstance(message, models.DummyInput):
return AcsReadMsgResult(False, None)
return AcsReadMsgResult(True, None)
def get_msg(self, message: Any) -> AcsMsgAndTransition:
"""
Respond with GetParameterValuesRequest
Get the values of all parameters defined in the data model.
Also check which addable objects are present, and what the values of
parameters for those objects are.
"""
# Get the names of regular parameters
names = get_params_to_get(
self.acs.device_cfg, self.acs.data_model,
self.request_all_params,
)
# Generate the request
request = models.GetParameterValues()
request.ParameterNames = models.ParameterNames()
request.ParameterNames.arrayType = 'xsd:string[%d]' \
% len(names)
request.ParameterNames.string = []
for name in names:
path = self.acs.data_model.get_parameter(name).path
if path is not InvalidTrParamPath:
# Only get data elements backed by tr69 path
request.ParameterNames.string.append(path)
return AcsMsgAndTransition(request, self.done_transition)
def state_description(self) -> str:
return 'Getting non-object parameters'
class WaitGetParametersState(EnodebAcsState):
def __init__(self, acs: EnodebAcsStateMachine, when_done: str):
super().__init__()
self.acs = acs
self.done_transition = when_done
def read_msg(self, message: Any) -> AcsReadMsgResult:
""" Process GetParameterValuesResponse """
if not isinstance(message, models.GetParameterValuesResponse):
return AcsReadMsgResult(False, None)
name_to_val = parse_get_parameter_values_response(
self.acs.data_model,
message,
)
logger.debug('Received CPE parameter values: %s', str(name_to_val))
for name, val in name_to_val.items():
magma_val = self.acs.data_model.transform_for_magma(name, val)
self.acs.device_cfg.set_parameter(name, magma_val)
return AcsReadMsgResult(True, self.done_transition)
def state_description(self) -> str:
return 'Getting non-object parameters'
class GetObjectParametersState(EnodebAcsState):
def __init__(self, acs: EnodebAcsStateMachine, when_done: str, request_all_params: bool = False):
super().__init__()
self.acs = acs
self.done_transition = when_done
self.request_all_params = request_all_params
def get_msg(self, message: Any) -> AcsMsgAndTransition:
""" Respond with GetParameterValuesRequest """
names = get_object_params_to_get(
self.acs.desired_cfg,
self.acs.device_cfg,
self.acs.data_model,
self.request_all_params,
)
# Generate the request
request = models.GetParameterValues()
request.ParameterNames = models.ParameterNames()
request.ParameterNames.arrayType = 'xsd:string[%d]' \
% len(names)
request.ParameterNames.string = []
for name in names:
path = self.acs.data_model.get_parameter(name).path
request.ParameterNames.string.append(path)
return AcsMsgAndTransition(request, self.done_transition)
def state_description(self) -> str:
return 'Getting object parameters'
class WaitGetObjectParametersState(EnodebAcsState):
def __init__(
self,
acs: EnodebAcsStateMachine,
when_delete: str,
when_add: str,
when_set: str,
when_skip: str,
):
super().__init__()
self.acs = acs
self.rm_obj_transition = when_delete
self.add_obj_transition = when_add
self.set_params_transition = when_set
self.skip_transition = when_skip
def read_msg(self, message: Any) -> AcsReadMsgResult:
""" Process GetParameterValuesResponse """
if not isinstance(message, models.GetParameterValuesResponse):
return AcsReadMsgResult(False, None)
path_to_val = {}
if hasattr(message.ParameterList, 'ParameterValueStruct') and \
message.ParameterList.ParameterValueStruct is not None:
for param_value_struct in message.ParameterList.ParameterValueStruct:
path_to_val[param_value_struct.Name] = \
param_value_struct.Value.Data
logger.debug('Received object parameters: %s', str(path_to_val))
# Number of PLMN objects reported can be incorrect. Let's count them
num_plmns = 0
obj_to_params = self.acs.data_model.get_numbered_param_names()
while True:
obj_name = ParameterName.PLMN_N % (num_plmns + 1)
if obj_name not in obj_to_params or len(obj_to_params[obj_name]) == 0:
logger.warning(
"eNB has PLMN %s but not defined in model",
obj_name,
)
break
param_name_list = obj_to_params[obj_name]
obj_path = self.acs.data_model.get_parameter(param_name_list[0]).path
if obj_path not in path_to_val:
break
if not self.acs.device_cfg.has_object(obj_name):
self.acs.device_cfg.add_object(obj_name)
num_plmns += 1
for name in param_name_list:
path = self.acs.data_model.get_parameter(name).path
value = path_to_val[path]
magma_val = \
self.acs.data_model.transform_for_magma(name, value)
self.acs.device_cfg.set_parameter_for_object(
name, magma_val,
obj_name,
)
num_plmns_reported = \
int(self.acs.device_cfg.get_parameter(ParameterName.NUM_PLMNS))
if num_plmns != num_plmns_reported:
logger.warning(
"eNB reported %d PLMNs but found %d",
num_plmns_reported, num_plmns,
)
self.acs.device_cfg.set_parameter(
ParameterName.NUM_PLMNS,
num_plmns,
)
# Now we can have the desired state
if self.acs.desired_cfg is None:
self.acs.desired_cfg = build_desired_config(
self.acs.mconfig,
self.acs.service_config,
self.acs.device_cfg,
self.acs.data_model,
self.acs.config_postprocessor,
)
if len(
get_all_objects_to_delete(
self.acs.desired_cfg,
self.acs.device_cfg,
),
) > 0:
return AcsReadMsgResult(True, self.rm_obj_transition)
elif len(
get_all_objects_to_add(
self.acs.desired_cfg,
self.acs.device_cfg,
),
) > 0:
return AcsReadMsgResult(True, self.add_obj_transition)
elif len(
get_all_param_values_to_set(
self.acs.desired_cfg,
self.acs.device_cfg,
self.acs.data_model,
),
) > 0:
return AcsReadMsgResult(True, self.set_params_transition)
return AcsReadMsgResult(True, self.skip_transition)
def state_description(self) -> str:
return 'Getting object parameters'
class DeleteObjectsState(EnodebAcsState):
def __init__(
self,
acs: EnodebAcsStateMachine,
when_add: str,
when_skip: str,
):
super().__init__()
self.acs = acs
self.deleted_param = None
self.add_obj_transition = when_add
self.skip_transition = when_skip
def get_msg(self, message: Any) -> AcsMsgAndTransition:
"""
Send DeleteObject message to TR-069 and poll for response(s).
Input:
- Object name (string)
"""
request = models.DeleteObject()
self.deleted_param = get_all_objects_to_delete(
self.acs.desired_cfg,
self.acs.device_cfg,
)[0]
request.ObjectName = \
self.acs.data_model.get_parameter(self.deleted_param).path
return AcsMsgAndTransition(request, None)
def read_msg(self, message: Any) -> AcsReadMsgResult:
"""
Send DeleteObject message to TR-069 and poll for response(s).
Input:
- Object name (string)
"""
if type(message) == models.DeleteObjectResponse:
if message.Status != 0:
raise Tr069Error(
'Received DeleteObjectResponse with '
'Status=%d' % message.Status,
)
elif type(message) == models.Fault:
raise Tr069Error(
'Received Fault in response to DeleteObject '
'(faultstring = %s)' % message.FaultString,
)
else:
return AcsReadMsgResult(False, None)
self.acs.device_cfg.delete_object(self.deleted_param)
obj_list_to_delete = get_all_objects_to_delete(
self.acs.desired_cfg,
self.acs.device_cfg,
)
if len(obj_list_to_delete) > 0:
return AcsReadMsgResult(True, None)
if len(
get_all_objects_to_add(
self.acs.desired_cfg,
self.acs.device_cfg,
),
) == 0:
return AcsReadMsgResult(True, self.skip_transition)
return AcsReadMsgResult(True, self.add_obj_transition)
def state_description(self) -> str:
return 'Deleting objects'
class AddObjectsState(EnodebAcsState):
def __init__(self, acs: EnodebAcsStateMachine, when_done: str):
super().__init__()
self.acs = acs
self.done_transition = when_done
self.added_param = None
def get_msg(self, message: Any) -> AcsMsgAndTransition:
request = models.AddObject()
self.added_param = get_all_objects_to_add(
self.acs.desired_cfg,
self.acs.device_cfg,
)[0]
desired_param = self.acs.data_model.get_parameter(self.added_param)
desired_path = desired_param.path
path_parts = desired_path.split('.')
# If adding enumerated object, ie. XX.N. we should add it to the
# parent object XX. so strip the index
if len(path_parts) > 2 and \
path_parts[-1] == '' and path_parts[-2].isnumeric():
logger.debug('Stripping index from path=%s', desired_path)
desired_path = '.'.join(path_parts[:-2]) + '.'
request.ObjectName = desired_path
return AcsMsgAndTransition(request, None)
def read_msg(self, message: Any) -> AcsReadMsgResult:
if type(message) == models.AddObjectResponse:
if message.Status != 0:
raise Tr069Error(
'Received AddObjectResponse with '
'Status=%d' % message.Status,
)
elif type(message) == models.Fault:
raise Tr069Error(
'Received Fault in response to AddObject '
'(faultstring = %s)' % message.FaultString,
)
else:
return AcsReadMsgResult(False, None)
instance_n = message.InstanceNumber
self.acs.device_cfg.add_object(self.added_param % instance_n)
obj_list_to_add = get_all_objects_to_add(
self.acs.desired_cfg,
self.acs.device_cfg,
)
if len(obj_list_to_add) > 0:
return AcsReadMsgResult(True, None)
return AcsReadMsgResult(True, self.done_transition)
def state_description(self) -> str:
return 'Adding objects'
class SetParameterValuesState(EnodebAcsState):
def __init__(self, acs: EnodebAcsStateMachine, when_done: str):
super().__init__()
self.acs = acs
self.done_transition = when_done
def get_msg(self, message: Any) -> AcsMsgAndTransition:
request = models.SetParameterValues()
request.ParameterList = models.ParameterValueList()
param_values = get_all_param_values_to_set(
self.acs.desired_cfg,
self.acs.device_cfg,
self.acs.data_model,
)
request.ParameterList.arrayType = 'cwmp:ParameterValueStruct[%d]' \
% len(param_values)
request.ParameterList.ParameterValueStruct = []
logger.debug(
'Sending TR069 request to set CPE parameter values: %s',
str(param_values),
)
# TODO: Match key response when we support having multiple outstanding
# calls.
if self.acs.has_version_key:
request.ParameterKey = models.ParameterKeyType()
request.ParameterKey.Data =\
"SetParameter-{:10.0f}".format(self.acs.parameter_version_key)
request.ParameterKey.type = 'xsd:string'
for name, value in param_values.items():
param_info = self.acs.data_model.get_parameter(name)
type_ = param_info.type
name_value = models.ParameterValueStruct()
name_value.Value = models.anySimpleType()
name_value.Name = param_info.path
enb_value = self.acs.data_model.transform_for_enb(name, value)
if type_ in ('int', 'unsignedInt'):
name_value.Value.type = 'xsd:%s' % type_
name_value.Value.Data = str(enb_value)
elif type_ == 'boolean':
# Boolean values have integral representations in spec
name_value.Value.type = 'xsd:boolean'
name_value.Value.Data = str(int(enb_value))
elif type_ == 'string':
name_value.Value.type = 'xsd:string'
name_value.Value.Data = str(enb_value)
else:
raise Tr069Error(
'Unsupported type for %s: %s' %
(name, type_),
)
if param_info.is_invasive:
self.acs.are_invasive_changes_applied = False
request.ParameterList.ParameterValueStruct.append(name_value)
return AcsMsgAndTransition(request, self.done_transition)
def state_description(self) -> str:
return 'Setting parameter values'
class SetParameterValuesNotAdminState(EnodebAcsState):
def __init__(self, acs: EnodebAcsStateMachine, when_done: str):
super().__init__()
self.acs = acs
self.done_transition = when_done
def get_msg(self, message: Any) -> AcsMsgAndTransition:
request = models.SetParameterValues()
request.ParameterList = models.ParameterValueList()
param_values = get_all_param_values_to_set(
self.acs.desired_cfg,
self.acs.device_cfg,
self.acs.data_model,
exclude_admin=True,
)
request.ParameterList.arrayType = 'cwmp:ParameterValueStruct[%d]' \
% len(param_values)
request.ParameterList.ParameterValueStruct = []
logger.debug(
'Sending TR069 request to set CPE parameter values: %s',
str(param_values),
)
for name, value in param_values.items():
param_info = self.acs.data_model.get_parameter(name)
type_ = param_info.type
name_value = models.ParameterValueStruct()
name_value.Value = models.anySimpleType()
name_value.Name = param_info.path
enb_value = self.acs.data_model.transform_for_enb(name, value)
if type_ in ('int', 'unsignedInt'):
name_value.Value.type = 'xsd:%s' % type_
name_value.Value.Data = str(enb_value)
elif type_ == 'boolean':
# Boolean values have integral representations in spec
name_value.Value.type = 'xsd:boolean'
name_value.Value.Data = str(int(enb_value))
elif type_ == 'string':
name_value.Value.type = 'xsd:string'
name_value.Value.Data = str(enb_value)
else:
raise Tr069Error(
'Unsupported type for %s: %s' %
(name, type_),
)
if param_info.is_invasive:
self.acs.are_invasive_changes_applied = False
request.ParameterList.ParameterValueStruct.append(name_value)
return AcsMsgAndTransition(request, self.done_transition)
def state_description(self) -> str:
return 'Setting parameter values excluding Admin Enable'
class WaitSetParameterValuesState(EnodebAcsState):
def __init__(
self,
acs: EnodebAcsStateMachine,
when_done: str,
when_apply_invasive: str,
status_non_zero_allowed: bool = False,
):
super().__init__()
self.acs = acs
self.done_transition = when_done
self.apply_invasive_transition = when_apply_invasive
# Set Params can legally return zero and non zero status
# Per tr-196, if there are errors the method should return a fault.
# Make flag optional to compensate for existing radios returning non
# zero on error.
self.status_non_zero_allowed = status_non_zero_allowed
def read_msg(self, message: Any) -> AcsReadMsgResult:
if type(message) == models.SetParameterValuesResponse:
if not self.status_non_zero_allowed:
if message.Status != 0:
raise Tr069Error(
'Received SetParameterValuesResponse with '
'Status=%d' % message.Status,
)
self._mark_as_configured()
if not self.acs.are_invasive_changes_applied:
return AcsReadMsgResult(True, self.apply_invasive_transition)
return AcsReadMsgResult(True, self.done_transition)
elif type(message) == models.Fault:
logger.error(
'Received Fault in response to SetParameterValues, '
'Code (%s), Message (%s)', message.FaultCode,
message.FaultString,
)
if message.SetParameterValuesFault is not None:
for fault in message.SetParameterValuesFault:
logger.error(
'SetParameterValuesFault Param: %s, '
'Code: %s, String: %s', fault.ParameterName,
fault.FaultCode, fault.FaultString,
)
return AcsReadMsgResult(False, None)
def _mark_as_configured(self) -> None:
"""
A successful attempt at setting parameter values means that we need to
update what we think the eNB's configuration is to match what we just
set the parameter values to.
"""
# Values of parameters
name_to_val = get_param_values_to_set(
self.acs.desired_cfg,
self.acs.device_cfg,
self.acs.data_model,
)
for name, val in name_to_val.items():
magma_val = self.acs.data_model.transform_for_magma(name, val)
self.acs.device_cfg.set_parameter(name, magma_val)
# Values of object parameters
obj_to_name_to_val = get_obj_param_values_to_set(
self.acs.desired_cfg,
self.acs.device_cfg,
self.acs.data_model,
)
for obj_name, name_to_val in obj_to_name_to_val.items():
for name, val in name_to_val.items():
logger.debug(
'Set obj: %s, name: %s, val: %s', str(obj_name),
str(name), str(val),
)
magma_val = self.acs.data_model.transform_for_magma(name, val)
self.acs.device_cfg.set_parameter_for_object(
name, magma_val,
obj_name,
)
logger.info('Successfully configured CPE parameters!')
def state_description(self) -> str:
return 'Setting parameter values'
class EndSessionState(EnodebAcsState):
""" To end a TR-069 session, send an empty HTTP response """
def __init__(self, acs: EnodebAcsStateMachine):
super().__init__()
self.acs = acs
def read_msg(self, message: Any) -> AcsReadMsgResult:
"""
No message is expected after enodebd sends the eNodeB
an empty HTTP response.
If a device sends an empty HTTP request, we can just
ignore it and send another empty response.
"""
if isinstance(message, models.DummyInput):
return AcsReadMsgResult(True, None)
return AcsReadMsgResult(False, None)
def get_msg(self, message: Any) -> AcsMsgAndTransition:
request = models.DummyInput()
return AcsMsgAndTransition(request, None)
def state_description(self) -> str:
return 'Completed provisioning eNB. Awaiting new Inform.'
class EnbSendRebootState(EnodebAcsState):
def __init__(self, acs: EnodebAcsStateMachine, when_done: str):
super().__init__()
self.acs = acs
self.done_transition = when_done
self.prev_msg_was_inform = False
def read_msg(self, message: Any) -> AcsReadMsgResult:
"""
This state can be transitioned into through user command.
All messages received by enodebd will be ignored in this state.
"""
if self.prev_msg_was_inform \
and not isinstance(message, models.DummyInput):
return AcsReadMsgResult(False, None)
elif isinstance(message, models.Inform):
self.prev_msg_was_inform = True
process_inform_message(
message, self.acs.data_model,
self.acs.device_cfg,
)
return AcsReadMsgResult(True, None)
self.prev_msg_was_inform = False
return AcsReadMsgResult(True, None)
def get_msg(self, message: Any) -> AcsMsgAndTransition:
if self.prev_msg_was_inform:
response = models.InformResponse()
# Set maxEnvelopes to 1, as per TR-069 spec
response.MaxEnvelopes = 1
return AcsMsgAndTransition(response, None)
logger.info('Sending reboot request to eNB')
request = models.Reboot()
request.CommandKey = ''
self.acs.are_invasive_changes_applied = True
return AcsMsgAndTransition(request, self.done_transition)
def state_description(self) -> str:
return 'Rebooting eNB'
class SendRebootState(EnodebAcsState):
def __init__(self, acs: EnodebAcsStateMachine, when_done: str):
super().__init__()
self.acs = acs
self.done_transition = when_done
self.prev_msg_was_inform = False
def read_msg(self, message: Any) -> AcsReadMsgResult:
"""
This state can be transitioned into through user command.
All messages received by enodebd will be ignored in this state.
"""
if self.prev_msg_was_inform \
and not isinstance(message, models.DummyInput):
return AcsReadMsgResult(False, None)
elif isinstance(message, models.Inform):
self.prev_msg_was_inform = True
process_inform_message(
message, self.acs.data_model,
self.acs.device_cfg,
)
return AcsReadMsgResult(True, None)
self.prev_msg_was_inform = False
return AcsReadMsgResult(True, None)
def get_msg(self, message: Any) -> AcsMsgAndTransition:
if self.prev_msg_was_inform:
response = models.InformResponse()
# Set maxEnvelopes to 1, as per TR-069 spec
response.MaxEnvelopes = 1
return AcsMsgAndTransition(response, None)
logger.info('Sending reboot request to eNB')
request = models.Reboot()
request.CommandKey = ''
return AcsMsgAndTransition(request, self.done_transition)
def state_description(self) -> str:
return 'Rebooting eNB'
class WaitRebootResponseState(EnodebAcsState):
def __init__(self, acs: EnodebAcsStateMachine, when_done: str):
super().__init__()
self.acs = acs
self.done_transition = when_done
def read_msg(self, message: Any) -> AcsReadMsgResult:
if not isinstance(message, models.RebootResponse):
return AcsReadMsgResult(False, None)
return AcsReadMsgResult(True, None)
def get_msg(self, message: Any) -> AcsMsgAndTransition:
""" Reply with empty message """
return AcsMsgAndTransition(models.DummyInput(), self.done_transition)
def state_description(self) -> str:
return 'Rebooting eNB'
class WaitInformMRebootState(EnodebAcsState):
"""
After sending a reboot request, we expect an Inform request with a
specific 'inform event code'
"""
# Time to wait for eNodeB reboot. The measured time
# (on BaiCells indoor eNodeB)
# is ~110secs, so add healthy padding on top of this.
REBOOT_TIMEOUT = 300 # In seconds
# We expect that the Inform we receive tells us the eNB has rebooted
INFORM_EVENT_CODE = 'M Reboot'
def __init__(
self,
acs: EnodebAcsStateMachine,
when_done: str,
when_timeout: str,
):
super().__init__()
self.acs = acs
self.done_transition = when_done
self.timeout_transition = when_timeout
self.timeout_timer = None
self.timer_handle = None
def enter(self):
self.timeout_timer = StateMachineTimer(self.REBOOT_TIMEOUT)
def check_timer() -> None:
if self.timeout_timer.is_done():
self.acs.transition(self.timeout_transition)
raise Tr069Error(
'Did not receive Inform response after '
'rebooting',
)
self.timer_handle = \
self.acs.event_loop.call_later(
self.REBOOT_TIMEOUT,
check_timer,
)
def exit(self):
self.timer_handle.cancel()
self.timeout_timer = None
def read_msg(self, message: Any) -> AcsReadMsgResult:
if not isinstance(message, models.Inform):
return AcsReadMsgResult(False, None)
if not does_inform_have_event(message, self.INFORM_EVENT_CODE):
raise Tr069Error(
'Did not receive M Reboot event code in '
'Inform',
)
process_inform_message(
message, self.acs.data_model,
self.acs.device_cfg,
)
return AcsReadMsgResult(True, self.done_transition)
def state_description(self) -> str:
return 'Waiting for M Reboot code from Inform'
class WaitRebootDelayState(EnodebAcsState):
"""
After receiving the Inform notifying us that the eNodeB has successfully
rebooted, wait a short duration to prevent unspecified race conditions
that may occur w.r.t reboot
"""
# Short delay timer to prevent race conditions w.r.t. reboot
SHORT_CONFIG_DELAY = 10
def __init__(self, acs: EnodebAcsStateMachine, when_done: str):
super().__init__()
self.acs = acs
self.done_transition = when_done
self.config_timer = None
self.timer_handle = None
def enter(self):
self.config_timer = StateMachineTimer(self.SHORT_CONFIG_DELAY)
def check_timer() -> None:
if self.config_timer.is_done():
self.acs.transition(self.done_transition)
self.timer_handle = \
self.acs.event_loop.call_later(
self.SHORT_CONFIG_DELAY,
check_timer,
)
def exit(self):
self.timer_handle.cancel()
self.config_timer = None
def read_msg(self, message: Any) -> AcsReadMsgResult:
return AcsReadMsgResult(True, None)
def get_msg(self, message: Any) -> AcsMsgAndTransition:
return AcsMsgAndTransition(models.DummyInput(), None)
def state_description(self) -> str:
return 'Waiting after eNB reboot to prevent race conditions'
class ErrorState(EnodebAcsState):
"""
The eNB handler will enter this state when an unhandled Fault is received.
If the inform_transition_target constructor parameter is non-null, this
state will attempt to autoremediate by transitioning to the specified
target state when an Inform is received.
"""
def __init__(
self, acs: EnodebAcsStateMachine,
inform_transition_target: Optional[str] = None,
):
super().__init__()
self.acs = acs
self.inform_transition_target = inform_transition_target
def read_msg(self, message: Any) -> AcsReadMsgResult:
return AcsReadMsgResult(True, None)
def get_msg(self, message: Any) -> AcsMsgAndTransition:
if not self.inform_transition_target:
return AcsMsgAndTransition(models.DummyInput(), None)
if isinstance(message, models.Inform):
return AcsMsgAndTransition(
models.DummyInput(),
self.inform_transition_target,
)
return AcsMsgAndTransition(models.DummyInput(), None)
def state_description(self) -> str:
return 'Error state - awaiting manual restart of enodebd service or ' \
'an Inform to be received from the eNB'
class NotifyDPState(EnodebAcsState, ABC):
""" Notify DP ...
For Baicells QRTB we can expect an inform message on
End Session state, either a queued one or a periodic one
"""
def __init__(
self,
acs: EnodebAcsStateMachine,
when_inform: str,
):
super().__init__()
self.acs = acs
self.inform_transition = when_inform
@abstractmethod
def enter(self):
"""
Perform additional actions on state enter
"""
pass
def read_msg(self, message: Any) -> AcsReadMsgResult:
"""
Send an empty response if a device sends an empty HTTP message
If its an inform, try to process it. It could be a queued
inform or a periodic one.
Args:
message (Any): TR069 message
Returns:
AcsReadMsgResult
"""
if isinstance(message, models.DummyInput):
return AcsReadMsgResult(msg_handled=True, next_state=None)
elif isinstance(message, models.Inform):
return AcsReadMsgResult(msg_handled=True, next_state=self.inform_transition)
return AcsReadMsgResult(msg_handled=False, next_state=None)
def get_msg(self, message: Any) -> AcsMsgAndTransition:
"""
Send back a message to enb
Args:
message (Any): TR069 message
Returns:
AcsMsgAndTransition
"""
request = models.DummyInput()
return AcsMsgAndTransition(msg=request, next_state=None)
def state_description(self) -> str:
"""
Describe the state
Returns:
str
"""
return 'Notifying DP. Awaiting new Inform.'
|
the-stack_106_18438
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Setup.py module for the workflow's worker utilities.
All the workflow related code is gathered in a package that will be built as a
source distribution, staged in the staging area for the workflow being run and
then installed in the workers when they start running.
This behavior is triggered by specifying the --setup_file command line option
when running the workflow for remote execution.
"""
from __future__ import print_function
import subprocess
from distutils.command.build import build as _build
import os
import setuptools
# This class handles the pip install mechanism.
class build(_build): # pylint: disable=invalid-name
"""A build command class that will be invoked during package install.
The package built using the current setup.py will be staged and later
installed in the worker using `pip install package'. This class will be
instantiated during install for this specific scenario and will trigger
running the custom commands specified.
"""
sub_commands = _build.sub_commands + [('CustomCommands', None)]
# Some custom command to run during setup. The command is not essential for this
# workflow. It is used here as an example. Each command will spawn a child
# process. Typically, these commands will include steps to install non-Python
# packages. For instance, to install a C++-based library libjpeg62 the following
# two commands will have to be added:
#
# ['apt-get', 'update'],
# ['apt-get', '--assume-yes', 'install', 'libjpeg62'],
#
# First, note that there is no need to use the sudo command because the setup
# script runs with appropriate access.
# Second, if apt-get tool is used then the first command needs to be 'apt-get
# update' so the tool refreshes itself and initializes links to download
# repositories. Without this initial step the other apt-get install commands
# will fail with package not found errors. Note also --assume-yes option which
# shortcuts the interactive confirmation.
#
# Note that in this example custom commands will run after installing required
# packages. If you have a PyPI package that depends on one of the custom
# commands, move installation of the dependent package to the list of custom
# commands, e.g.:
#
# ['pip', 'install', 'my_package'],
#
CUSTOM_COMMANDS = [
# 'apt-get update'.split(),
# 'apt-get --assume-yes install python-tk'.split()
]
class CustomCommands(setuptools.Command):
"""A setuptools Command class able to run arbitrary commands."""
def initialize_options(self):
pass
def finalize_options(self):
pass
def RunCustomCommand(self, command_list):
print('Running command: %s' % command_list)
p = subprocess.Popen(
command_list,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
# Can use communicate(input='y\n'.encode()) if the command run requires
# some confirmation.
stdout_data, _ = p.communicate()
print('Command output: %s' % stdout_data)
if p.returncode != 0:
raise RuntimeError(
'Command %s failed: exit code: %s' % (command_list, p.returncode))
def run(self):
for command in CUSTOM_COMMANDS:
self.RunCustomCommand(command)
# Configure the required packages and scripts to install.
# Note that the Python Dataflow containers come with numpy already installed
# so this dependency will not trigger anything to be installed unless a version
# restriction is specified.
REQUIRED_PACKAGES = [
'pyresample netcdf4 google-cloud-storage '
'retrying cloudml-hypertune'.split(),
]
setuptools.setup(
name='ltgpred',
version='0.0.1',
description='Predict lightning activity',
install_requires=REQUIRED_PACKAGES,
packages=setuptools.find_packages(),
cmdclass={
# Command class instantiated and run during pip install scenarios.
'build': build,
'CustomCommands': CustomCommands,
})
|
the-stack_106_18440
|
import json
import re
from ProxyPool.Utls import getPage
from pyquery import PyQuery as pq
class ProxyMetaClass(type):
def __new__(cls, name, bases, attrs):
count = 0
attrs['__CrawlFunc__'] = []
for k, v in attrs.items():
if 'crawl' in k:
attrs['__CrawlFunc__'].append(k)
count += 1
attrs['__CrawlFuncCount__'] = count
return type.__new__(cls, name, bases, attrs)
class Crawler(object, metaclass=ProxyMetaClass):
def getProxies(self, callback):
proxies = []
for proxy in eval("self.{}()".format(callback)):
print('success get proxy:', proxy)
proxies.append(proxy)
return proxies
# def crawl_daili66(self, page_count=4):
# """
# 获取代理66
# :param page_count: 页码
# :return: 代理
# """
# start_url = 'http://www.66ip.cn/{}.html'
# urls = [start_url.format(page) for page in range(1, page_count + 1)]
# for url in urls:
# print('Crawling', url)
# html = getPage(url)
# if html:
# doc = pq(html)
# trs = doc('.containerbox table tr:gt(0)').items()
# for tr in trs:
# ip = tr.find('td:nth-child(1)').text()
# port = tr.find('td:nth-child(2)').text()
# yield ':'.join([ip, port])
# def crawl_ip181(self):
# start_url = 'http://www.ip181.com/'
# html = getPage(start_url)
# ip_address = re.compile('<tr.*?>\s*<td>(.*?)</td>\s*<td>(.*?)</td>')
# # \s* 匹配空格,起到换行作用
# re_ip_address = ip_address.findall(html)
# for address,port in re_ip_address:
# result = address + ':' + port
# yield result.replace(' ', '')
def crawl_ip3366(self):
for page in range(1, 4):
start_url = 'http://www.ip3366.net/free/?stype=1&page={}'.format(page)
html = getPage(start_url)
ip_address = re.compile('<tr>\s*<td>(.*?)</td>\s*<td>(.*?)</td>')
# \s * 匹配空格,起到换行作用
re_ip_address = ip_address.findall(html)
for address, port in re_ip_address:
result = address+':'+ port
yield result.replace(' ', '')
def crawl_kxdaili(self):
start_url = 'https://www.kuaidaili.com/free/'
html = getPage(start_url)
ip_address = re.compile('<tr.*?>\s*<td>(.*?)</td>\s*<td>(.*?)</td>')
# \s* 匹配空格,起到换行作用
re_ip_address = ip_address.findall(html)
for address, port in re_ip_address:
result = address + ':' + port
yield result.replace(' ', '')
# def crawl_premproxy(self):
# for i in ['China-01','China-02','China-03','China-04','Taiwan-01']:
# start_url = 'https://premproxy.com/proxy-by-country/{}.htm'.format(i)
# html = getPage(start_url)
# if html:
# ip_address = re.compile('<td data-label="IP:port ">(.*?)</td>')
# re_ip_address = ip_address.findall(html)
# for address_port in re_ip_address:
# yield address_port.replace(' ','')
# def crawl_xroxy(self):
# for i in ['CN','TW']:
# start_url = 'http://www.xroxy.com/proxylist.php?country={}'.format(i)
# html = getPage(start_url)
# if html:
# ip_address1 = re.compile("title='View this Proxy details'>\s*(.*).*")
# re_ip_address1 = ip_address1.findall(html)
# ip_address2 = re.compile("title='Select proxies with port number .*'>(.*)</a>")
# re_ip_address2 = ip_address2.findall(html)
# for address,port in zip(re_ip_address1,re_ip_address2):
# address_port = address+':'+port
# yield address_port.replace(' ','')
# def crawl_kuaidaili(self):
# for i in range(1, 4):
# start_url = 'http://www.kuaidaili.com/free/inha/{}/'.format(i)
# html = getPage(start_url)
# if html:
# ip_address = re.compile('<td data-title="IP">(.*?)</td>')
# re_ip_address = ip_address.findall(html)
# port = re.compile('<td data-title="PORT">(.*?)</td>')
# re_port = port.findall(html)
# for address,port in zip(re_ip_address, re_port):
# address_port = address+':'+port
# yield address_port.replace(' ','')
# def crawl_xicidaili(self):
# for i in range(1, 3):
# start_url = 'http://www.xicidaili.com/nn/{}'.format(i)
# headers = {
# 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
# 'Cookie':'_free_proxy_session=BAh7B0kiD3Nlc3Npb25faWQGOgZFVEkiJWRjYzc5MmM1MTBiMDMzYTUzNTZjNzA4NjBhNWRjZjliBjsAVEkiEF9jc3JmX3Rva2VuBjsARkkiMUp6S2tXT3g5a0FCT01ndzlmWWZqRVJNek1WanRuUDBCbTJUN21GMTBKd3M9BjsARg%3D%3D--2a69429cb2115c6a0cc9a86e0ebe2800c0d471b3',
# 'Host':'www.xicidaili.com',
# 'Referer':'http://www.xicidaili.com/nn/3',
# 'Upgrade-Insecure-Requests':'1',
# }
# html = getPage(start_url, options=headers)
# if html:
# find_trs = re.compile('<tr class.*?>(.*?)</tr>', re.S)
# trs = find_trs.findall(html)
# for tr in trs:
# find_ip = re.compile('<td>(\d+\.\d+\.\d+\.\d+)</td>')
# re_ip_address = find_ip.findall(tr)
# find_port = re.compile('<td>(\d+)</td>')
# re_port = find_port.findall(tr)
# for address,port in zip(re_ip_address, re_port):
# address_port = address+':'+port
# yield address_port.replace(' ','')
# def crawl_ip3366(self):
# for i in range(1, 4):
# start_url = 'http://www.ip3366.net/?stype=1&page={}'.format(i)
# html = getPage(start_url)
# if html:
# find_tr = re.compile('<tr>(.*?)</tr>', re.S)
# trs = find_tr.findall(html)
# for s in range(1, len(trs)):
# find_ip = re.compile('<td>(\d+\.\d+\.\d+\.\d+)</td>')
# re_ip_address = find_ip.findall(trs[s])
# find_port = re.compile('<td>(\d+)</td>')
# re_port = find_port.findall(trs[s])
# for address,port in zip(re_ip_address, re_port):
# address_port = address+':'+port
# yield address_port.replace(' ','')
# def crawl_iphai(self):
# start_url = 'http://www.iphai.com/'
# html = getPage(start_url)
# if html:
# find_tr = re.compile('<tr>(.*?)</tr>', re.S)
# trs = find_tr.findall(html)
# for s in range(1, len(trs)):
# find_ip = re.compile('<td>\s+(\d+\.\d+\.\d+\.\d+)\s+</td>', re.S)
# re_ip_address = find_ip.findall(trs[s])
# find_port = re.compile('<td>\s+(\d+)\s+</td>', re.S)
# re_port = find_port.findall(trs[s])
# for address,port in zip(re_ip_address, re_port):
# address_port = address+':'+port
# yield address_port.replace(' ','')
# def crawl_89ip(self):
# start_url = 'http://www.89ip.cn/apijk/?&tqsl=1000&sxa=&sxb=&tta=&ports=&ktip=&cf=1'
# html = getPage(start_url)
# if html:
# find_ips = re.compile('(\d+\.\d+\.\d+\.\d+:\d+)', re.S)
# ip_ports = find_ips.findall(html)
# for address_port in ip_ports:
# yield address_port
# def crawl_data5u(self):
# start_url = 'http://www.data5u.com/free/gngn/index.shtml'
# headers = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
# 'Accept-Encoding': 'gzip, deflate',
# 'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',
# 'Cache-Control': 'max-age=0',
# 'Connection': 'keep-alive',
# 'Cookie': 'JSESSIONID=47AA0C887112A2D83EE040405F837A86',
# 'Host': 'www.data5u.com',
# 'Referer': 'http://www.data5u.com/free/index.shtml',
# 'Upgrade-Insecure-Requests': '1',
# 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.108 Safari/537.36',
# }
# html = getPage(start_url, options=headers)
# if html:
# ip_address = re.compile('<span><li>(\d+\.\d+\.\d+\.\d+)</li>.*?<li class=\"port.*?>(\d+)</li>', re.S)
# re_ip_address = ip_address.findall(html)
# for address, port in re_ip_address:
# result = address + ':' + port
# yield result.replace(' ', '')
|
the-stack_106_18443
|
import pandas as pd
from .helpers import pandas_to_json
from .consts import profile_col_names
pd.set_option('display.max_columns', 40)
import sys
# data processing
def process_data(inf_dict, friends_dict, profile_dict, lk_dict, final_data_dict):
# convert dicts to pandas dfs
inf_df = pd.DataFrame(inf_dict, index=[0])
friends_df = pd.DataFrame(friends_dict)
profile_df = pd.DataFrame(profile_dict, index=[0])
lk_df = pd.DataFrame(lk_dict)
# rename columns in profile df
profile_df.columns = profile_col_names
# calculate additional infos and clean up dfs
# inf_df:
inf_df['info_rank'] = inf_df['info_rank'].str.split()
inf_df['info_rank'] = inf_df['info_rank'].values.tolist()[0][1]
# profile_df:
profile_df = split_wins_and_losses(profile_df, ':')
# lk_df:
lk_df = lk_df[lk_df['result'] != 'irrelevant']
lk_df['match_type'] = lk_df.apply(lambda x: 'doubles' if x['lk_points'] == '-' else 'singles', axis=1)
# return final data dict
final_data_dict['Info Data'][0]['rows'] = inf_df.values.tolist()
final_data_dict['Friends Data'][0]['rows'] = friends_df.values.tolist()
final_data_dict['Profile Data'][0]['rows'] = profile_df.values.tolist()
final_data_dict['LK Data'][0]['rows'] = lk_df.values.tolist()
return final_data_dict
def split_wins_and_losses(df, separator):
"""
splits a column by given separator and
creates new columns
args:
df: a data frame
separator: separator to split by
returns: new df wit split columns
"""
for col in df:
lst_entry = df[col].str.split(separator)[0]
len_lst_entry = len(lst_entry)
if len_lst_entry > 1:
df[col + '_win'] = lst_entry[0]
df[col + '_loss'] = lst_entry[1]
del df[col]
return df
|
the-stack_106_18447
|
# --------------------------------------------------------
# Pytorch multi-GPU Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Jiasen Lu, Jianwei Yang, based on code from Ross Girshick
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import os
import sys
import numpy as np
import argparse
import pprint
import pdb
import time
import copy
import itertools
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
from torch.utils.data.sampler import Sampler
from roi_data_layer.roidb import combined_roidb
from roi_data_layer.roibatchLoader import roibatchLoader
from roi_data_layer.randcrop_roibatchLoader import randcrop_roibatchLoader
from utils.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from utils.net_utils import weights_normal_init, save_net, load_net, \
adjust_learning_rate, save_checkpoint, clip_gradient
from fast_rcnn.vgg16 import vgg16
from fast_rcnn.resnet import resnet
from fast_rcnn.alexnet import alexnet
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')
parser.add_argument('--dataset', dest='dataset',
help='training dataset',
default='pascal_voc', type=str)
parser.add_argument('--net', dest='net',
help='vgg16, res101, alexnet',
default='vgg16', type=str)
parser.add_argument('--imdb_name', dest='imdb_name',
help='train imdb name',
default='vocweak_2007_trainval/trainvalmini', type=str)
parser.add_argument('--imdbval_name', dest='imdbval_name',
help='validation imdb name',
default='vocweak_2007_test', type=str)
parser.add_argument('--start_epoch', dest='start_epoch',
help='starting epoch',
default=1, type=int)
parser.add_argument('--epochs', dest='max_epochs',
help='number of epochs to train',
default=30, type=int)
parser.add_argument('--save_epoch', dest='save_epoch',
help='step of epochs to save',
default=5, type=int)
parser.add_argument('--model_id', dest='model_id',
help='model id to save',
default=13, type=int)
parser.add_argument('--disp_interval', dest='disp_interval',
help='number of iterations to display',
default=100, type=int)
parser.add_argument('--checkpoint_interval', dest='checkpoint_interval',
help='number of iterations to display',
default=10000, type=int)
parser.add_argument('--save_dir', dest='save_dir',
help='directory to save models', default="snapshots",
type=str)
parser.add_argument('--pretrained_weight', dest='pretrained_weight',
help='pretrained weight', default="",
type=str)
parser.add_argument('--nw', dest='num_workers',
help='number of worker to load data',
default=0, type=int)
parser.add_argument('--cuda', dest='cuda',
help='whether use CUDA',
action='store_true')
parser.add_argument('--ls', dest='large_scale',
help='whether use large imag scale',
action='store_true')
parser.add_argument('--mGPUs', dest='mGPUs',
help='whether use multiple GPUs',
action='store_true')
parser.add_argument('--bs', dest='batch_size',
help='batch_size',
default=1, type=int)
parser.add_argument('--cag', dest='class_agnostic',
help='whether perform class_agnostic bbox regression',
action='store_true')
# config optimization
parser.add_argument('--o', dest='optimizer',
help='training optimizer',
default="sgd", type=str)
parser.add_argument('--lr', dest='lr',
help='starting learning rate',
default=0.001, type=float)
parser.add_argument('--lr_decay_step', dest='lr_decay_step',
help='step to do learning rate decay, unit is epoch',
default=20, type=int)
parser.add_argument('--lr_decay_gamma', dest='lr_decay_gamma',
help='learning rate decay ratio',
default=0.1, type=float)
# set training session
parser.add_argument('--s', dest='session',
help='training session',
default=1, type=int)
# resume trained model
parser.add_argument('--r', dest='resume',
help='resume checkpoint or not',
default=False, type=bool)
parser.add_argument('--checksession', dest='checksession',
help='checksession to load model',
default=1, type=int)
parser.add_argument('--checkepoch', dest='checkepoch',
help='checkepoch to load model',
default=1, type=int)
parser.add_argument('--checkpoint', dest='checkpoint',
help='checkpoint to load model',
default=0, type=int)
# log and diaplay
parser.add_argument('--use_tfb', dest='use_tfboard',
help='whether use tensorboard',
action='store_true')
args = parser.parse_args()
return args
def iterate_once(iterable):
return np.random.permutation(iterable)
def iterate_eternally(indices):
def infinite_shuffles():
while True:
yield np.random.permutation(indices)
return itertools.chain.from_iterable(infinite_shuffles())
class sampler(Sampler):
def __init__(self, strong_inds, weak_inds):
self.strong_inds = strong_inds
self.weak_inds = weak_inds
def __iter__(self):
strong_iter = iterate_once(self.strong_inds)
weak_iter = iterate_eternally(self.weak_inds)
return (
[s_ind, w_ind]
for (s_ind, w_ind) in zip(strong_iter, weak_iter)
)
return iter(self.rand_num_view)
def __len__(self):
return len(self.strong_inds)
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.dataset == "pascal_voc":
# args.imdb_name = "vocweak_2007_trainval/trainvalmid"
# args.imdbval_name = "vocweak_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]',
'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
elif args.dataset == "pascal_voc_0712":
args.imdb_name = "voc_2007_trainval+voc_2012_trainval"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]',
'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
elif args.dataset == "coco":
args.imdb_name = "coco_2014_train+coco_2014_valminusminival"
args.imdbval_name = "coco_2014_minival"
args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]',
'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '50']
elif args.dataset == "imagenet":
args.imdb_name = "imagenet_train"
args.imdbval_name = "imagenet_val"
args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]',
'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '30']
elif args.dataset == "vg":
# train sizes: train, smalltrain, minitrain
# train scale: ['150-50-20', '150-50-50', '500-150-80', '750-250-150', '1750-700-450', '1600-400-20']
args.imdb_name = "vg_150-50-50_minitrain"
args.imdbval_name = "vg_150-50-50_minival"
args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]',
'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '50']
args.cfg_file = "cfgs/{}_ls.yml".format(
args.net) if args.large_scale else "cfgs/{}.yml".format(args.net)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
print('Using config:')
pprint.pprint(cfg)
np.random.seed(cfg.RNG_SEED)
# torch.backends.cudnn.benchmark = True
if torch.cuda.is_available() and not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
# train set
# -- Note: Use validation set and disable the flipped to enable faster loading.
cfg.TRAIN.USE_FLIPPED = True
cfg.USE_GPU_NMS = args.cuda
imdb, roidb, ratio_list, ratio_index = combined_roidb(args.imdb_name)
print('{:d} roidb entries'.format(len(roidb)))
weak_inds = []
strong_inds = []
for i in range(len(roidb)):
if roidb[i]['boxes'].shape[0] > 0:
strong_inds.append(i)
for i in range(len(roidb)):
if roidb[i]['boxes'].shape[0] == 0:
weak_inds.append(i)
train_size = len(strong_inds)
print('{:d} strong entries and {:d} weak entries'.format(len(strong_inds), len(weak_inds)))
output_dir = args.save_dir + "/" + args.net + "/" + args.dataset
if not os.path.exists(output_dir):
os.makedirs(output_dir)
batch_sampler = sampler(strong_inds, weak_inds)
dataset = randcrop_roibatchLoader(roidb, ratio_list, ratio_index, args.batch_size,
imdb.num_classes, training=True)
dataloader = torch.utils.data.DataLoader(dataset, batch_sampler=batch_sampler, num_workers=args.num_workers)
# initilize the tensor holder here.
im_data = torch.FloatTensor(1)
im_info = torch.FloatTensor(1)
num_boxes = torch.LongTensor(1)
gt_boxes = torch.FloatTensor(1)
wnum_boxes = torch.LongTensor(1)
wgt_boxes = torch.FloatTensor(1)
rois = torch.FloatTensor(1)
image_classes = torch.FloatTensor(1)
# ship to cuda
if args.cuda:
im_data = im_data.cuda()
im_info = im_info.cuda()
num_boxes = num_boxes.cuda()
gt_boxes = gt_boxes.cuda()
wnum_boxes = wnum_boxes.cuda()
wgt_boxes = wgt_boxes.cuda()
rois = rois.cuda()
image_classes = image_classes.cuda()
# make variable
im_data = Variable(im_data)
im_info = Variable(im_info)
num_boxes = Variable(num_boxes)
gt_boxes = Variable(gt_boxes)
wnum_boxes = Variable(wnum_boxes)
wgt_boxes = Variable(wgt_boxes)
rois = Variable(rois)
image_classes = Variable(image_classes)
if args.cuda:
cfg.CUDA = True
# initilize the network here.
if args.net == 'vgg16':
fastRCNN = vgg16(imdb.classes, pretrained=True,
class_agnostic=args.class_agnostic,
pretrained_weight=args.pretrained_weight)
elif args.net == 'res101':
fastRCNN = resnet(imdb.classes, 101, pretrained=True,
class_agnostic=args.class_agnostic,
pretrained_weight=args.pretrained_weight)
elif args.net == 'alexnet':
fastRCNN = alexnet(imdb.classes, pretrained=True,
class_agnostic=args.class_agnostic,
pretrained_weight=args.pretrained_weight)
else:
print("network is not defined")
pdb.set_trace()
fastRCNN.create_architecture()
lr = cfg.TRAIN.LEARNING_RATE
lr = args.lr
params = []
for key, value in dict(fastRCNN.named_parameters()).items():
if value.requires_grad:
if 'bias' in key:
params += [{'params': [value], 'lr':lr * (cfg.TRAIN.DOUBLE_BIAS + 1),
'weight_decay': cfg.TRAIN.BIAS_DECAY and cfg.TRAIN.WEIGHT_DECAY or 0}]
else:
params += [{'params': [value], 'lr':lr,
'weight_decay': cfg.TRAIN.WEIGHT_DECAY}]
if args.optimizer == "adam":
lr = lr * 0.1
optimizer = torch.optim.Adam(params)
elif args.optimizer == "sgd":
optimizer = torch.optim.SGD(params, momentum=cfg.TRAIN.MOMENTUM)
if args.cuda:
fastRCNN.cuda()
if args.resume:
load_name = os.path.join(output_dir,
'faster_rcnn_{}_{}_{}.pth'.format(args.checksession, args.checkepoch, args.checkpoint))
print("loading checkpoint %s" % (load_name))
checkpoint = torch.load(load_name)
args.session = checkpoint['session']
args.start_epoch = checkpoint['epoch']
fastRCNN.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
lr = optimizer.param_groups[0]['lr']
if 'pooling_mode' in checkpoint.keys():
cfg.POOLING_MODE = checkpoint['pooling_mode']
print("loaded checkpoint %s" % (load_name))
if args.mGPUs:
fastRCNN = nn.DataParallel(fastRCNN)
iters_per_epoch = int(train_size / args.batch_size)
if args.use_tfboard:
from tensorboardX import SummaryWriter
logger = SummaryWriter("logs")
for epoch in range(args.start_epoch, args.max_epochs + 1):
# setting to train mode
fastRCNN.train()
loss_temp = 0
start = time.time()
if epoch % (args.lr_decay_step + 1) == 0:
adjust_learning_rate(optimizer, args.lr_decay_gamma)
lr *= args.lr_decay_gamma
data_iter = iter(dataloader)
for step in range(iters_per_epoch):
data = next(data_iter)
im_data.data.resize_(data[0].size()).copy_(data[0])
im_info.data.resize_(data[1].size()).copy_(data[1])
gt_boxes.data.resize_(data[2].size()).copy_(data[2])
num_boxes.data.resize_(data[3].size()).copy_(data[3])
wgt_boxes.data.resize_(data[4].size()).copy_(data[4])
wnum_boxes.data.resize_(data[5].size()).copy_(data[5])
rois.data.resize_(data[6].size()).copy_(data[6])
image_classes.data.resize_(data[7].size()).copy_(data[7])
fastRCNN.zero_grad()
rcnn_out_1, rcnn_out_2, rcnn_out_3, rcnn_out_4, rcnn_out_5 = fastRCNN(
im_data, im_info, gt_boxes, num_boxes, wgt_boxes, wnum_boxes, rois, image_classes)
out_rois_1, cls_prob_1, bbox_pred_1, RCNN_loss_cls_1, RCNN_loss_bbox_1, rois_label_1, image_loss_1 = rcnn_out_1
# out_rois_2, cls_prob_2, bbox_pred_2, RCNN_loss_cls_2, RCNN_loss_bbox_2, rois_label_2, image_loss_2 = rcnn_out_2
out_rois_3, cls_prob_3, bbox_pred_3, RCNN_loss_cls_3, RCNN_loss_bbox_3, rois_label_3, image_loss_3 = rcnn_out_3
out_rois_4, cls_prob_4, bbox_pred_4, RCNN_loss_cls_4, RCNN_loss_bbox_4, rois_label_4, image_loss_4 = rcnn_out_4
out_rois_5, cls_prob_5, bbox_pred_5, RCNN_loss_cls_5, RCNN_loss_bbox_5, rois_label_5, image_loss_5 = rcnn_out_5
loss = RCNN_loss_cls_1.mean() + RCNN_loss_bbox_1.mean()
# loss += RCNN_loss_cls_2.mean() + RCNN_loss_bbox_2.mean()
loss += RCNN_loss_cls_3.mean() + RCNN_loss_bbox_3.mean()
loss += RCNN_loss_cls_4.mean() + RCNN_loss_bbox_4.mean()
loss += RCNN_loss_cls_5.mean() + RCNN_loss_bbox_5.mean()
loss += image_loss_1.mean() + image_loss_5.mean()
RCNN_loss_cls = RCNN_loss_cls_1
RCNN_loss_bbox = RCNN_loss_bbox_1
rois_label = rois_label_1
loss_temp += loss.item()
# backward
optimizer.zero_grad()
loss.backward()
if args.net == "vgg16" or args.net == 'alexnet':
clip_gradient(fastRCNN, 10.)
# if args.net == 'res101':
# clip_gradient(fastRCNN, 10.)
optimizer.step()
if step % args.disp_interval == 0:
end = time.time()
if step > 0:
loss_temp /= (args.disp_interval + 1)
if args.mGPUs:
loss_rcnn_cls = RCNN_loss_cls.mean().item()
loss_rcnn_box = RCNN_loss_bbox.mean().item()
loss_image = image_loss_3.mean().item()
fg_cnt = torch.sum(rois_label.data.ne(0))
bg_cnt = rois_label.data.numel() - fg_cnt
else:
loss_rcnn_cls = RCNN_loss_cls.item()
loss_rcnn_box = RCNN_loss_bbox.item()
loss_image = image_loss_3.item()
fg_cnt = torch.sum(rois_label.data.ne(0))
bg_cnt = rois_label.data.numel() - fg_cnt
print("[session %d][epoch %2d][iter %4d/%4d] loss: %.4f, lr: %.2e"
% (args.session, epoch, step, iters_per_epoch, loss_temp, lr))
print("\t\t\tfg/bg=(%d/%d), time cost: %f" %
(fg_cnt, bg_cnt, end - start))
print("\t\t\trcnn_cls: %.4f, rcnn_box %.4f"
% (loss_rcnn_cls, loss_rcnn_box))
print("\t\t\timage_cls: %.4f"
% (loss_image))
if args.use_tfboard:
info = {
'loss': loss_temp,
'loss_rcnn_cls': loss_rcnn_cls,
'loss_rcnn_box': loss_rcnn_box
}
logger.add_scalars(
"logs_s_{}/losses".format(args.session), info, (epoch - 1) * iters_per_epoch + step)
loss_temp = 0
start = time.time()
if epoch % args.save_epoch == 0:
save_name = os.path.join(
output_dir, 'model{}_{}_{}_{}.pth'.format(args.model_id, args.session, epoch, step))
save_checkpoint({
'session': args.session,
'epoch': epoch + 1,
'model': fastRCNN.module.state_dict() if args.mGPUs else fastRCNN.state_dict(),
'optimizer': optimizer.state_dict(),
'pooling_mode': cfg.POOLING_MODE,
'class_agnostic': args.class_agnostic,
}, save_name)
print('save model: {}'.format(save_name))
if args.use_tfboard:
logger.close()
|
the-stack_106_18448
|
from __future__ import print_function
import os
import re
import sys
import json
import time
import argparse
import threading
import subprocess
import traceback
from time import sleep
import datetime
from distutils.version import LooseVersion
import pytz
from google.cloud import storage
from google.api_core.exceptions import PreconditionFailed
from queue import Queue
from contextlib import contextmanager
import urllib3
import requests
import demisto_client.demisto_api
from demisto_client.demisto_api.rest import ApiException
from slackclient import SlackClient
from Tests.mock_server import MITMProxy, AMIConnection
from Tests.test_integration import Docker, test_integration, disable_all_integrations
from Tests.test_dependencies import get_used_integrations, get_tests_allocation_for_threads
from demisto_sdk.commands.common.constants import RUN_ALL_TESTS_FORMAT, FILTER_CONF, PB_Status
from demisto_sdk.commands.common.tools import print_color, print_error, print_warning, \
LOG_COLORS, str2bool
# Disable insecure warnings
urllib3.disable_warnings()
SERVER_URL = "https://{}"
INTEGRATIONS_CONF = "./Tests/integrations_file.txt"
FAILED_MATCH_INSTANCE_MSG = "{} Failed to run.\n There are {} instances of {}, please select one of them by using " \
"the instance_name argument in conf.json. The options are:\n{}"
SERVICE_RESTART_TIMEOUT = 300
SERVICE_RESTART_POLLING_INTERVAL = 5
LOCKS_PATH = 'content-locks'
BUCKET_NAME = os.environ.get('GCS_ARTIFACTS_BUCKET')
CIRCLE_BUILD_NUM = os.environ.get('CIRCLE_BUILD_NUM')
WORKFLOW_ID = os.environ.get('CIRCLE_WORKFLOW_ID')
CIRCLE_STATUS_TOKEN = os.environ.get('CIRCLECI_STATUS_TOKEN')
SLACK_MEM_CHANNEL_ID = 'CM55V7J8K'
PROXY_LOG_FILE_NAME = 'proxy_metrics.csv'
ENV_RESULTS_PATH = './env_results.json'
def options_handler():
parser = argparse.ArgumentParser(description='Utility for batch action on incidents')
parser.add_argument('-k', '--apiKey', help='The Demisto API key for the server', required=True)
parser.add_argument('-s', '--server', help='The server URL to connect to')
parser.add_argument('-c', '--conf', help='Path to conf file', required=True)
parser.add_argument('-e', '--secret', help='Path to secret conf file')
parser.add_argument('-n', '--nightly', type=str2bool, help='Run nightly tests')
parser.add_argument('-t', '--slack', help='The token for slack', required=True)
parser.add_argument('-a', '--circleci', help='The token for circleci', required=True)
parser.add_argument('-b', '--buildNumber', help='The build number', required=True)
parser.add_argument('-g', '--buildName', help='The build name', required=True)
parser.add_argument('-i', '--isAMI', type=str2bool, help='is AMI build or not', default=False)
parser.add_argument('-m', '--memCheck', type=str2bool,
help='Should trigger memory checks or not. The slack channel to check the data is: '
'dmst_content_nightly_memory_data', default=False)
parser.add_argument('-d', '--serverVersion', help='Which server version to run the '
'tests on(Valid only when using AMI)', default="NonAMI")
parser.add_argument('-l', '--testsList', help='List of specific, comma separated'
'tests to run')
options = parser.parse_args()
tests_settings = TestsSettings(options)
return tests_settings
class TestsSettings:
def __init__(self, options):
self.api_key = options.apiKey
self.server = options.server
self.conf_path = options.conf
self.secret_conf_path = options.secret
self.nightly = options.nightly
self.slack = options.slack
self.circleci = options.circleci
self.buildNumber = options.buildNumber
self.buildName = options.buildName
self.isAMI = options.isAMI
self.memCheck = options.memCheck
self.serverVersion = options.serverVersion
self.serverNumericVersion = None
self.specific_tests_to_run = self.parse_tests_list_arg(options.testsList)
self.is_local_run = (self.server is not None)
@staticmethod
def parse_tests_list_arg(tests_list):
tests_to_run = tests_list.split(",") if tests_list else []
return tests_to_run
class PrintJob:
def __init__(self, message_to_print, print_function_to_execute, message_color=None):
self.print_function_to_execute = print_function_to_execute
self.message_to_print = message_to_print
self.message_color = message_color
def execute_print(self):
if self.message_color:
self.print_function_to_execute(self.message_to_print, self.message_color)
else:
self.print_function_to_execute(self.message_to_print)
class ParallelPrintsManager:
def __init__(self, number_of_threads):
self.threads_print_jobs = [[] for i in range(number_of_threads)]
self.print_lock = threading.Lock()
self.threads_last_update_times = [time.time() for i in range(number_of_threads)]
def should_update_thread_status(self, thread_index):
current_time = time.time()
thread_last_update = self.threads_last_update_times[thread_index]
return current_time - thread_last_update > 300
def add_print_job(self, message_to_print, print_function_to_execute, thread_index, message_color=None,
include_timestamp=False):
if include_timestamp:
message_to_print = f'[{datetime.datetime.now(datetime.timezone.utc)}] {message_to_print}'
print_job = PrintJob(message_to_print, print_function_to_execute, message_color=message_color)
self.threads_print_jobs[thread_index].append(print_job)
if self.should_update_thread_status(thread_index):
print("Thread {} is still running.".format(thread_index))
self.threads_last_update_times[thread_index] = time.time()
def execute_thread_prints(self, thread_index):
self.print_lock.acquire()
prints_to_execute = self.threads_print_jobs[thread_index]
for print_job in prints_to_execute:
print_job.execute_print()
self.print_lock.release()
self.threads_print_jobs[thread_index] = []
class TestsDataKeeper:
def __init__(self):
self.succeeded_playbooks = []
self.failed_playbooks = []
self.skipped_tests = []
self.skipped_integrations = []
self.rerecorded_tests = []
self.empty_files = []
self.unmockable_integrations = {}
def add_tests_data(self, succeed_playbooks, failed_playbooks, skipped_tests, skipped_integration,
unmockable_integrations):
# Using multiple appends and not extend since append is guaranteed to be thread safe
for playbook in succeed_playbooks:
self.succeeded_playbooks.append(playbook)
for playbook in failed_playbooks:
self.failed_playbooks.append(playbook)
for playbook in skipped_tests:
self.skipped_tests.append(playbook)
for playbook in skipped_integration:
self.skipped_integrations.append(playbook)
for playbook_id, reason in unmockable_integrations.items():
self.unmockable_integrations[playbook_id] = reason
def add_proxy_related_test_data(self, proxy):
# Using multiple appends and not extend since append is guaranteed to be thread safe
for playbook_id in proxy.rerecorded_tests:
self.rerecorded_tests.append(playbook_id)
for playbook_id in proxy.empty_files:
self.empty_files.append(playbook_id)
def print_test_summary(tests_data_keeper, is_ami=True):
succeed_playbooks = tests_data_keeper.succeeded_playbooks
failed_playbooks = tests_data_keeper.failed_playbooks
skipped_tests = tests_data_keeper.skipped_tests
unmocklable_integrations = tests_data_keeper.unmockable_integrations
skipped_integration = tests_data_keeper.skipped_integrations
rerecorded_tests = tests_data_keeper.rerecorded_tests
empty_files = tests_data_keeper.empty_files
succeed_count = len(succeed_playbooks)
failed_count = len(failed_playbooks)
skipped_count = len(skipped_tests)
rerecorded_count = len(rerecorded_tests) if is_ami else 0
empty_mocks_count = len(empty_files) if is_ami else 0
unmocklable_integrations_count = len(unmocklable_integrations)
print('\nTEST RESULTS:')
tested_playbooks_message = '\t Number of playbooks tested - ' + str(succeed_count + failed_count)
print(tested_playbooks_message)
succeeded_playbooks_message = '\t Number of succeeded tests - ' + str(succeed_count)
print_color(succeeded_playbooks_message, LOG_COLORS.GREEN)
if failed_count > 0:
failed_tests_message = '\t Number of failed tests - ' + str(failed_count) + ':'
print_error(failed_tests_message)
for playbook_id in failed_playbooks:
print_error('\t - ' + playbook_id)
if rerecorded_count > 0:
recording_warning = '\t Tests with failed playback and successful re-recording - ' + str(rerecorded_count) + ':'
print_warning(recording_warning)
for playbook_id in rerecorded_tests:
print_warning('\t - ' + playbook_id)
if empty_mocks_count > 0:
empty_mock_successes_msg = '\t Successful tests with empty mock files - ' + str(empty_mocks_count) + ':'
print(empty_mock_successes_msg)
proxy_explanation = '\t (either there were no http requests or no traffic is passed through the proxy.\n' \
'\t Investigate the playbook and the integrations.\n' \
'\t If the integration has no http traffic, add to unmockable_integrations in conf.json)'
print(proxy_explanation)
for playbook_id in empty_files:
print('\t - ' + playbook_id)
if len(skipped_integration) > 0:
skipped_integrations_warning = '\t Number of skipped integration - ' + str(len(skipped_integration)) + ':'
print_warning(skipped_integrations_warning)
for playbook_id in skipped_integration:
print_warning('\t - ' + playbook_id)
if skipped_count > 0:
skipped_tests_warning = '\t Number of skipped tests - ' + str(skipped_count) + ':'
print_warning(skipped_tests_warning)
for playbook_id in skipped_tests:
print_warning('\t - ' + playbook_id)
if unmocklable_integrations_count > 0:
unmockable_warning = '\t Number of unmockable integrations - ' + str(unmocklable_integrations_count) + ':'
print_warning(unmockable_warning)
for playbook_id, reason in unmocklable_integrations.items():
print_warning('\t - ' + playbook_id + ' - ' + reason)
def update_test_msg(integrations, test_message):
if integrations:
integrations_names = [integration['name'] for integration in
integrations]
test_message = test_message + ' with integration(s): ' + ','.join(
integrations_names)
return test_message
def turn_off_telemetry(xsoar_client):
"""
Turn off telemetry on the AMI instance
:param xsoar_client: Preconfigured client for the XSOAR instance
:return: None
"""
body, status_code, _ = demisto_client.generic_request_func(self=xsoar_client, method='POST',
path='/telemetry?status=notelemetry')
if status_code != 200:
print_error('Request to turn off telemetry failed with status code "{}"\n{}'.format(status_code, body))
sys.exit(1)
def reset_containers(server, demisto_user, demisto_pass, prints_manager, thread_index):
prints_manager.add_print_job('Resetting containers', print, thread_index)
client = demisto_client.configure(base_url=server, username=demisto_user, password=demisto_pass, verify_ssl=False)
body, status_code, _ = demisto_client.generic_request_func(self=client, method='POST',
path='/containers/reset')
if status_code != 200:
error_msg = 'Request to reset containers failed with status code "{}"\n{}'.format(status_code, body)
prints_manager.add_print_job(error_msg, print_error, thread_index)
prints_manager.execute_thread_prints(thread_index)
sys.exit(1)
sleep(10)
def has_unmockable_integration(integrations, unmockable_integrations):
return list(set(x['name'] for x in integrations).intersection(unmockable_integrations.keys()))
def get_docker_limit():
process = subprocess.Popen(['cat', '/sys/fs/cgroup/memory/memory.limit_in_bytes'], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = process.communicate()
return stdout, stderr
def get_docker_processes_data():
process = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = process.communicate()
return stdout, stderr
def get_docker_memory_data():
process = subprocess.Popen(['cat', '/sys/fs/cgroup/memory/memory.usage_in_bytes'], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = process.communicate()
return stdout, stderr
def send_slack_message(slack, chanel, text, user_name, as_user):
sc = SlackClient(slack)
sc.api_call(
"chat.postMessage",
channel=chanel,
username=user_name,
as_user=as_user,
text=text,
mrkdwn='true'
)
def run_test_logic(conf_json_test_details, tests_queue, tests_settings, c, failed_playbooks, integrations, playbook_id,
succeed_playbooks, test_message, test_options, slack, circle_ci, build_number, server_url,
build_name, prints_manager, thread_index=0, is_mock_run=False):
with acquire_test_lock(integrations,
test_options.get('timeout'),
prints_manager,
thread_index,
tests_settings.conf_path) as lock:
if lock:
status, inc_id = test_integration(c, server_url, integrations, playbook_id, prints_manager, test_options,
is_mock_run, thread_index=thread_index)
# c.api_client.pool.close()
if status == PB_Status.COMPLETED:
prints_manager.add_print_job('PASS: {} succeed'.format(test_message), print_color, thread_index,
message_color=LOG_COLORS.GREEN)
succeed_playbooks.append(playbook_id)
elif status == PB_Status.NOT_SUPPORTED_VERSION:
not_supported_version_message = 'PASS: {} skipped - not supported version'.format(test_message)
prints_manager.add_print_job(not_supported_version_message, print, thread_index)
succeed_playbooks.append(playbook_id)
else:
error_message = 'Failed: {} failed'.format(test_message)
prints_manager.add_print_job(error_message, print_error, thread_index)
playbook_id_with_mock = playbook_id
if not is_mock_run:
playbook_id_with_mock += " (Mock Disabled)"
failed_playbooks.append(playbook_id_with_mock)
if not tests_settings.is_local_run:
notify_failed_test(slack, circle_ci, playbook_id, build_number, inc_id, server_url, build_name)
succeed = status in (PB_Status.COMPLETED, PB_Status.NOT_SUPPORTED_VERSION)
else:
tests_queue.put(conf_json_test_details)
succeed = False
return succeed
# run the test using a real instance, record traffic.
def run_and_record(conf_json_test_details, tests_queue, tests_settings, c, proxy, failed_playbooks, integrations,
playbook_id, succeed_playbooks, test_message, test_options, slack, circle_ci, build_number,
server_url, build_name, prints_manager, thread_index=0):
proxy.set_tmp_folder()
proxy.start(playbook_id, record=True, thread_index=thread_index, prints_manager=prints_manager)
succeed = run_test_logic(conf_json_test_details, tests_queue, tests_settings, c, failed_playbooks, integrations,
playbook_id, succeed_playbooks, test_message, test_options, slack, circle_ci, build_number,
server_url, build_name, prints_manager, thread_index=thread_index, is_mock_run=True)
proxy.stop(thread_index=thread_index, prints_manager=prints_manager)
if succeed:
proxy.successful_rerecord_count += 1
proxy.clean_mock_file(playbook_id, thread_index=thread_index, prints_manager=prints_manager)
proxy.move_mock_file_to_repo(playbook_id, thread_index=thread_index, prints_manager=prints_manager)
else:
proxy.failed_rerecord_count += 1
proxy.failed_rerecord_tests.append(playbook_id)
proxy.set_repo_folder()
return succeed
def mock_run(conf_json_test_details, tests_queue, tests_settings, c, proxy, failed_playbooks, integrations,
playbook_id, succeed_playbooks, test_message, test_options, slack, circle_ci, build_number, server_url,
build_name, start_message, prints_manager, thread_index=0):
rerecord = False
if proxy.has_mock_file(playbook_id):
start_mock_message = '{} (Mock: Playback)'.format(start_message)
prints_manager.add_print_job(start_mock_message, print, thread_index, include_timestamp=True)
proxy.start(playbook_id, thread_index=thread_index, prints_manager=prints_manager)
# run test
status, _ = test_integration(c, server_url, integrations, playbook_id, prints_manager, test_options,
is_mock_run=True, thread_index=thread_index)
# use results
proxy.stop(thread_index=thread_index, prints_manager=prints_manager)
if status == PB_Status.COMPLETED:
proxy.successful_tests_count += 1
succeed_message = 'PASS: {} succeed'.format(test_message)
prints_manager.add_print_job(succeed_message, print_color, thread_index, LOG_COLORS.GREEN)
succeed_playbooks.append(playbook_id)
end_mock_message = f'------ Test {test_message} end ------\n'
prints_manager.add_print_job(end_mock_message, print, thread_index, include_timestamp=True)
return
if status == PB_Status.NOT_SUPPORTED_VERSION:
not_supported_version_message = 'PASS: {} skipped - not supported version'.format(test_message)
prints_manager.add_print_job(not_supported_version_message, print, thread_index)
succeed_playbooks.append(playbook_id)
end_mock_message = f'------ Test {test_message} end ------\n'
prints_manager.add_print_job(end_mock_message, print, thread_index, include_timestamp=True)
return
if status == PB_Status.FAILED_DOCKER_TEST:
error_message = 'Failed: {} failed'.format(test_message)
prints_manager.add_print_job(error_message, print_error, thread_index)
failed_playbooks.append(playbook_id)
end_mock_message = f'------ Test {test_message} end ------\n'
prints_manager.add_print_job(end_mock_message, print, thread_index, include_timestamp=True)
return
proxy.failed_tests_count += 1
mock_failed_message = "Test failed with mock, recording new mock file. (Mock: Recording)"
prints_manager.add_print_job(mock_failed_message, print, thread_index)
rerecord = True
else:
mock_recording_message = start_message + ' (Mock: Recording)'
prints_manager.add_print_job(mock_recording_message, print, thread_index, include_timestamp=True)
# Mock recording - no mock file or playback failure.
c = demisto_client.configure(base_url=c.api_client.configuration.host,
api_key=c.api_client.configuration.api_key, verify_ssl=False)
succeed = run_and_record(conf_json_test_details, tests_queue, tests_settings, c, proxy, failed_playbooks,
integrations, playbook_id, succeed_playbooks, test_message, test_options, slack, circle_ci,
build_number, server_url, build_name, prints_manager, thread_index=thread_index)
if rerecord and succeed:
proxy.rerecorded_tests.append(playbook_id)
test_end_message = f'------ Test {test_message} end ------\n'
prints_manager.add_print_job(test_end_message, print, thread_index, include_timestamp=True)
def run_test(conf_json_test_details, tests_queue, tests_settings, demisto_user, demisto_pass, proxy, failed_playbooks,
integrations, unmockable_integrations, playbook_id, succeed_playbooks, test_message, test_options,
slack, circle_ci, build_number, server_url, build_name, prints_manager, is_ami=True, thread_index=0):
start_message = f'------ Test {test_message} start ------'
client = demisto_client.configure(base_url=server_url, username=demisto_user, password=demisto_pass, verify_ssl=False)
if not is_ami or (not integrations or has_unmockable_integration(integrations, unmockable_integrations)):
prints_manager.add_print_job(start_message + ' (Mock: Disabled)', print, thread_index, include_timestamp=True)
run_test_logic(conf_json_test_details, tests_queue, tests_settings, client, failed_playbooks, integrations,
playbook_id, succeed_playbooks, test_message, test_options, slack, circle_ci, build_number,
server_url, build_name, prints_manager, thread_index=thread_index)
prints_manager.add_print_job('------ Test %s end ------\n' % (test_message,), print, thread_index,
include_timestamp=True)
return
mock_run(conf_json_test_details, tests_queue, tests_settings, client, proxy, failed_playbooks, integrations,
playbook_id, succeed_playbooks, test_message, test_options, slack, circle_ci, build_number,
server_url, build_name, start_message, prints_manager, thread_index=thread_index)
def http_request(url, params_dict=None):
try:
res = requests.request("GET",
url,
verify=True,
params=params_dict,
)
res.raise_for_status()
return res.json()
except Exception as e:
raise e
def get_user_name_from_circle(circleci_token, build_number):
url = "https://circleci.com/api/v1.1/project/github/demisto/content/{0}?circle-token={1}".format(build_number,
circleci_token)
res = http_request(url)
user_details = res.get('user', {})
return user_details.get('name', '')
def notify_failed_test(slack, circle_ci, playbook_id, build_number, inc_id, server_url, build_name):
circle_user_name = get_user_name_from_circle(circle_ci, build_number)
sc = SlackClient(slack)
user_id = retrieve_id(circle_user_name, sc)
text = "{0} - {1} Failed\n{2}".format(build_name, playbook_id, server_url) if inc_id == -1 \
else "{0} - {1} Failed\n{2}/#/WorkPlan/{3}".format(build_name, playbook_id, server_url, inc_id)
if user_id:
sc.api_call(
"chat.postMessage",
channel=user_id,
username="Content CircleCI",
as_user="False",
text=text
)
def retrieve_id(circle_user_name, sc):
user_id = ''
res = sc.api_call('users.list')
user_list = res.get('members', [])
for user in user_list:
profile = user.get('profile', {})
name = profile.get('real_name_normalized', '')
if name == circle_user_name:
user_id = user.get('id', '')
return user_id
def create_result_files(tests_data_keeper):
failed_playbooks = tests_data_keeper.failed_playbooks
skipped_integration = tests_data_keeper.skipped_integrations
skipped_tests = tests_data_keeper.skipped_tests
with open("./Tests/failed_tests.txt", "w") as failed_tests_file:
failed_tests_file.write('\n'.join(failed_playbooks))
with open('./Tests/skipped_tests.txt', "w") as skipped_tests_file:
skipped_tests_file.write('\n'.join(skipped_tests))
with open('./Tests/skipped_integrations.txt', "w") as skipped_integrations_file:
skipped_integrations_file.write('\n'.join(skipped_integration))
def change_placeholders_to_values(placeholders_map, config_item):
"""Replaces placeholders in the object to their real values
Args:
placeholders_map: (dict)
Dict that holds the real values to be replaced for each placeholder.
config_item: (json object)
Integration configuration object.
Returns:
dict. json object with the real configuration.
"""
item_as_string = json.dumps(config_item)
for key, value in placeholders_map.items():
item_as_string = item_as_string.replace(key, value)
return json.loads(item_as_string)
def set_integration_params(demisto_api_key, integrations, secret_params, instance_names, playbook_id,
prints_manager, placeholders_map, thread_index=0):
for integration in integrations:
integration_params = [change_placeholders_to_values(placeholders_map, item) for item
in secret_params if item['name'] == integration['name']]
if integration_params:
matched_integration_params = integration_params[0]
if len(integration_params) != 1:
found_matching_instance = False
for item in integration_params:
if item.get('instance_name', 'Not Found') in instance_names:
matched_integration_params = item
found_matching_instance = True
if not found_matching_instance:
optional_instance_names = [optional_integration.get('instance_name', 'None')
for optional_integration in integration_params]
error_msg = FAILED_MATCH_INSTANCE_MSG.format(playbook_id, len(integration_params),
integration['name'],
'\n'.join(optional_instance_names))
prints_manager.add_print_job(error_msg, print_error, thread_index)
return False
integration['params'] = matched_integration_params.get('params', {})
integration['byoi'] = matched_integration_params.get('byoi', True)
integration['instance_name'] = matched_integration_params.get('instance_name', integration['name'])
integration['validate_test'] = matched_integration_params.get('validate_test', True)
elif integration['name'] == 'Demisto REST API':
integration['params'] = {
'url': 'https://localhost',
'apikey': demisto_api_key,
'insecure': True,
}
return True
def collect_integrations(integrations_conf, skipped_integration, skipped_integrations_conf, nightly_integrations):
integrations = []
is_nightly_integration = False
test_skipped_integration = []
for integration in integrations_conf:
if integration in skipped_integrations_conf.keys():
skipped_integration.add("{0} - reason: {1}".format(integration, skipped_integrations_conf[integration]))
test_skipped_integration.append(integration)
if integration in nightly_integrations:
is_nightly_integration = True
# string description
integrations.append({
'name': integration,
'params': {}
})
return test_skipped_integration, integrations, is_nightly_integration
def extract_filtered_tests(is_nightly):
if is_nightly:
# TODO: verify this response
return [], False, True
with open(FILTER_CONF, 'r') as filter_file:
filtered_tests = filter_file.readlines()
filtered_tests = [line.strip('\n') for line in filtered_tests]
is_filter_configured = bool(filtered_tests)
run_all = RUN_ALL_TESTS_FORMAT in filtered_tests
return filtered_tests, is_filter_configured, run_all
def load_conf_files(conf_path, secret_conf_path):
with open(conf_path) as data_file:
conf = json.load(data_file)
secret_conf = None
if secret_conf_path:
with open(secret_conf_path) as data_file:
secret_conf = json.load(data_file)
return conf, secret_conf
def run_test_scenario(tests_queue, tests_settings, t, proxy, default_test_timeout, skipped_tests_conf,
nightly_integrations, skipped_integrations_conf, skipped_integration, is_nightly,
run_all_tests, is_filter_configured, filtered_tests, skipped_tests, secret_params,
failed_playbooks, playbook_skipped_integration, unmockable_integrations,
succeed_playbooks, slack, circle_ci, build_number, server, build_name,
server_numeric_version, demisto_user, demisto_pass, demisto_api_key,
prints_manager, thread_index=0, is_ami=True):
playbook_id = t['playbookID']
nightly_test = t.get('nightly', False)
integrations_conf = t.get('integrations', [])
instance_names_conf = t.get('instance_names', [])
test_message = 'playbook: ' + playbook_id
test_options = {
'timeout': t.get('timeout', default_test_timeout),
'memory_threshold': t.get('memory_threshold', Docker.DEFAULT_CONTAINER_MEMORY_USAGE),
'pid_threshold': t.get('pid_threshold', Docker.DEFAULT_CONTAINER_PIDS_USAGE)
}
if not isinstance(integrations_conf, list):
integrations_conf = [integrations_conf, ]
if not isinstance(instance_names_conf, list):
instance_names_conf = [instance_names_conf, ]
test_skipped_integration, integrations, is_nightly_integration = collect_integrations(
integrations_conf, skipped_integration, skipped_integrations_conf, nightly_integrations)
if playbook_id in filtered_tests:
playbook_skipped_integration.update(test_skipped_integration)
skip_nightly_test = (nightly_test or is_nightly_integration) and not is_nightly
# Skip nightly test
if skip_nightly_test:
prints_manager.add_print_job(f'\n------ Test {test_message} start ------', print, thread_index,
include_timestamp=True)
prints_manager.add_print_job('Skip test', print, thread_index)
prints_manager.add_print_job(f'------ Test {test_message} end ------\n', print, thread_index,
include_timestamp=True)
return
if not run_all_tests:
# Skip filtered test
if is_filter_configured and playbook_id not in filtered_tests:
return
# Skip bad test
if playbook_id in skipped_tests_conf:
skipped_tests.add(f'{playbook_id} - reason: {skipped_tests_conf[playbook_id]}')
return
# Skip integration
if test_skipped_integration:
return
# Skip version mismatch test
test_from_version = t.get('fromversion', '0.0.0')
test_to_version = t.get('toversion', '99.99.99')
if not (LooseVersion(test_from_version) <= LooseVersion(server_numeric_version) <= LooseVersion(test_to_version)):
prints_manager.add_print_job(f'\n------ Test {test_message} start ------', print, thread_index,
include_timestamp=True)
warning_message = 'Test {} ignored due to version mismatch (test versions: {}-{})'.format(test_message,
test_from_version,
test_to_version)
prints_manager.add_print_job(warning_message, print_warning, thread_index)
prints_manager.add_print_job(f'------ Test {test_message} end ------\n', print, thread_index,
include_timestamp=True)
return
placeholders_map = {'%%SERVER_HOST%%': server}
are_params_set = set_integration_params(demisto_api_key, integrations, secret_params, instance_names_conf,
playbook_id, prints_manager, placeholders_map, thread_index=thread_index)
if not are_params_set:
failed_playbooks.append(playbook_id)
return
test_message = update_test_msg(integrations, test_message)
options = options_handler()
stdout, stderr = get_docker_memory_data()
text = 'Memory Usage: {}'.format(stdout) if not stderr else stderr
if options.nightly and options.memCheck and not tests_settings.is_local_run:
send_slack_message(slack, SLACK_MEM_CHANNEL_ID, text, 'Content CircleCI', 'False')
stdout, stderr = get_docker_processes_data()
text = stdout if not stderr else stderr
send_slack_message(slack, SLACK_MEM_CHANNEL_ID, text, 'Content CircleCI', 'False')
run_test(t, tests_queue, tests_settings, demisto_user, demisto_pass, proxy, failed_playbooks,
integrations, unmockable_integrations, playbook_id, succeed_playbooks, test_message,
test_options, slack, circle_ci, build_number, server, build_name, prints_manager,
is_ami, thread_index=thread_index)
def load_env_results_json():
if not os.path.isfile(ENV_RESULTS_PATH):
return {}
with open(ENV_RESULTS_PATH, 'r') as json_file:
return json.load(json_file)
def get_server_numeric_version(ami_env, is_local_run=False):
"""
Gets the current server version
Arguments:
ami_env: (str)
AMI version name.
is_local_run: (bool)
when running locally, assume latest version.
Returns:
(str) Server numeric version
"""
default_version = '99.99.98'
if is_local_run:
print_color(f'Local run, assuming server version is {default_version}', LOG_COLORS.GREEN)
return default_version
env_json = load_env_results_json()
if not env_json:
print_warning(f'Did not find {ENV_RESULTS_PATH} file, assuming server version is {default_version}.')
return default_version
instances_ami_names = {env.get('AmiName') for env in env_json if ami_env in env.get('Role', '')}
if len(instances_ami_names) != 1:
print_warning(f'Did not get one AMI Name, got {instances_ami_names}.'
f' Assuming server version is {default_version}')
return default_version
instances_ami_name = list(instances_ami_names)[0]
extracted_version = re.findall(r'Demisto-(?:Circle-CI|MarketPlace)-Content-[\w-]+-([\d.]+)-[\d]{5}',
instances_ami_name)
if extracted_version:
server_numeric_version = extracted_version[0]
else:
server_numeric_version = default_version
# make sure version is three-part version
if server_numeric_version.count('.') == 1:
server_numeric_version += ".0"
print_color(f'Server version: {server_numeric_version}', LOG_COLORS.GREEN)
return server_numeric_version
def get_instances_ips_and_names(tests_settings):
if tests_settings.server:
return [tests_settings.server]
env_json = load_env_results_json()
instances_ips = [(env.get('Role'), env.get('InstanceDNS')) for env in env_json]
return instances_ips
def get_test_records_of_given_test_names(tests_settings, tests_names_to_search):
conf, secret_conf = load_conf_files(tests_settings.conf_path, tests_settings.secret_conf_path)
tests_records = conf['tests']
test_records_with_supplied_names = []
for test_record in tests_records:
test_name = test_record.get("playbookID")
if test_name and test_name in tests_names_to_search:
test_records_with_supplied_names.append(test_record)
return test_records_with_supplied_names
def get_json_file(path):
with open(path, 'r') as json_file:
return json.loads(json_file.read())
def execute_testing(tests_settings, server_ip, mockable_tests_names, unmockable_tests_names,
tests_data_keeper, prints_manager, thread_index=0, is_ami=True):
server = SERVER_URL.format(server_ip)
server_numeric_version = tests_settings.serverNumericVersion
start_message = "Executing tests with the server {} - and the server ip {}".format(server, server_ip)
prints_manager.add_print_job(start_message, print, thread_index)
is_nightly = tests_settings.nightly
is_memory_check = tests_settings.memCheck
slack = tests_settings.slack
circle_ci = tests_settings.circleci
build_number = tests_settings.buildNumber
build_name = tests_settings.buildName
conf, secret_conf = load_conf_files(tests_settings.conf_path, tests_settings.secret_conf_path)
demisto_api_key = tests_settings.api_key
demisto_user = secret_conf['username']
demisto_pass = secret_conf['userPassword']
default_test_timeout = conf.get('testTimeout', 30)
tests = conf['tests']
skipped_tests_conf = conf['skipped_tests']
nightly_integrations = conf['nightly_integrations']
skipped_integrations_conf = conf['skipped_integrations']
unmockable_integrations = conf['unmockable_integrations']
secret_params = secret_conf['integrations'] if secret_conf else []
filtered_tests, is_filter_configured, run_all_tests = extract_filtered_tests(tests_settings.nightly)
if is_filter_configured and not run_all_tests:
is_nightly = True
if not tests or len(tests) == 0:
prints_manager.add_print_job('no integrations are configured for test', print, thread_index)
prints_manager.execute_thread_prints(thread_index)
return
xsoar_client = demisto_client.configure(base_url=server, username=demisto_user,
password=demisto_pass, verify_ssl=False)
# turn off telemetry
turn_off_telemetry(xsoar_client)
proxy = None
if is_ami:
ami = AMIConnection(server_ip)
ami.clone_mock_data()
proxy = MITMProxy(server_ip)
failed_playbooks = []
succeed_playbooks = []
skipped_tests = set([])
skipped_integration = set([])
playbook_skipped_integration = set([])
disable_all_integrations(xsoar_client, prints_manager, thread_index=thread_index)
prints_manager.execute_thread_prints(thread_index)
mockable_tests = get_test_records_of_given_test_names(tests_settings, mockable_tests_names)
unmockable_tests = get_test_records_of_given_test_names(tests_settings, unmockable_tests_names)
if is_nightly and is_memory_check:
mem_lim, err = get_docker_limit()
send_slack_message(slack, SLACK_MEM_CHANNEL_ID,
f'Build Number: {build_number}\n Server Address: {server}\nMemory Limit: {mem_lim}',
'Content CircleCI', 'False')
try:
# first run the mock tests to avoid mockless side effects in container
if is_ami and mockable_tests:
proxy.configure_proxy_in_demisto(proxy=proxy.ami.docker_ip + ':' + proxy.PROXY_PORT,
username=demisto_user, password=demisto_pass,
server=server)
executed_in_current_round, mockable_tests_queue = initialize_queue_and_executed_tests_set(mockable_tests)
while not mockable_tests_queue.empty():
t = mockable_tests_queue.get()
executed_in_current_round = update_round_set_and_sleep_if_round_completed(executed_in_current_round,
prints_manager,
t,
thread_index,
mockable_tests_queue)
run_test_scenario(mockable_tests_queue, tests_settings, t, proxy, default_test_timeout, skipped_tests_conf,
nightly_integrations, skipped_integrations_conf, skipped_integration, is_nightly,
run_all_tests, is_filter_configured, filtered_tests,
skipped_tests, secret_params, failed_playbooks, playbook_skipped_integration,
unmockable_integrations, succeed_playbooks, slack, circle_ci, build_number, server,
build_name, server_numeric_version, demisto_user, demisto_pass,
demisto_api_key, prints_manager, thread_index=thread_index)
proxy.configure_proxy_in_demisto(username=demisto_user, password=demisto_pass, server=server)
# reset containers after clearing the proxy server configuration
reset_containers(server, demisto_user, demisto_pass, prints_manager, thread_index)
prints_manager.add_print_job("\nRunning mock-disabled tests", print, thread_index)
executed_in_current_round, unmockable_tests_queue = initialize_queue_and_executed_tests_set(unmockable_tests)
while not unmockable_tests_queue.empty():
t = unmockable_tests_queue.get()
executed_in_current_round = update_round_set_and_sleep_if_round_completed(executed_in_current_round,
prints_manager,
t,
thread_index,
unmockable_tests_queue)
run_test_scenario(unmockable_tests_queue, tests_settings, t, proxy, default_test_timeout,
skipped_tests_conf, nightly_integrations, skipped_integrations_conf, skipped_integration,
is_nightly, run_all_tests, is_filter_configured, filtered_tests, skipped_tests,
secret_params, failed_playbooks, playbook_skipped_integration, unmockable_integrations,
succeed_playbooks, slack, circle_ci, build_number, server, build_name,
server_numeric_version, demisto_user, demisto_pass, demisto_api_key,
prints_manager, thread_index, is_ami)
prints_manager.execute_thread_prints(thread_index)
except Exception as exc:
if exc.__class__ == ApiException:
error_message = exc.body
else:
error_message = f'~~ Thread {thread_index + 1} failed ~~\n{str(exc)}\n{traceback.format_exc()}'
prints_manager.add_print_job(error_message, print_error, thread_index)
prints_manager.execute_thread_prints(thread_index)
failed_playbooks.append(f'~~ Thread {thread_index + 1} failed ~~')
raise
finally:
tests_data_keeper.add_tests_data(succeed_playbooks, failed_playbooks, skipped_tests,
skipped_integration, unmockable_integrations)
if is_ami:
tests_data_keeper.add_proxy_related_test_data(proxy)
if build_name == 'master':
updating_mocks_msg = "Pushing new/updated mock files to mock git repo."
prints_manager.add_print_job(updating_mocks_msg, print, thread_index)
ami.upload_mock_files(build_name, build_number)
if playbook_skipped_integration and build_name == 'master':
comment = 'The following integrations are skipped and critical for the test:\n {}'. \
format('\n- '.join(playbook_skipped_integration))
add_pr_comment(comment)
# Sending proxy metrics to GCP
try:
storage_client = storage.Client()
now = datetime.datetime.now().replace(microsecond=0).isoformat()
# each log line will be comprised of the following metrics:
# - Date
# - Count of successful tests
# - Count of failed tests
# - Count of successful rerecords
# - Count of failed rerecords
# - IDs of the playbooks that were rerecorded successfully
# - Ids of the playbooks that have failed rerecording
new_proxy_line = f'{now},' \
f'{proxy.successful_tests_count},' \
f'{proxy.failed_tests_count},' \
f'{proxy.successful_rerecord_count},' \
f'{proxy.failed_rerecord_count},' \
f'{";".join(proxy.rerecorded_tests)},' \
f'{";".join(proxy.failed_rerecord_tests)}\n'
bucket = storage_client.bucket(BUCKET_NAME)
# Google storage objects are immutable and there is no way to append to them.
# The workaround is to create a new temp file and then compose the log file with the new created file
# see here for more info https://cloud.google.com/storage/docs/json_api/v1/objects/compose
new_file_blob = bucket.blob(f'{LOCKS_PATH}/{WORKFLOW_ID}.txt')
new_file_blob.upload_from_string(new_proxy_line)
current_file_blob = bucket.blob(f'{LOCKS_PATH}/{PROXY_LOG_FILE_NAME}')
current_file_blob.compose([current_file_blob, new_file_blob])
new_file_blob.delete()
except Exception:
prints_manager.add_print_job("Failed to save proxy metrics", print, thread_index)
def update_round_set_and_sleep_if_round_completed(executed_in_current_round: set,
prints_manager: ParallelPrintsManager,
t: dict,
thread_index: int,
unmockable_tests_queue: Queue) -> set:
"""
Checks if the string representation of the current test configuration is already in
the executed_in_current_round set.
If it is- it means we have already executed this test and the we have reached a round and there are tests that
were not able to be locked by this execution..
In that case we want to start a new round monitoring by emptying the 'executed_in_current_round' set and sleep
in order to let the tests be unlocked
Args:
executed_in_current_round: A set containing the string representation of all tests configuration as they appear
in conf.json file that were already executed in the current round
prints_manager: ParallelPrintsManager object
t: test configuration as it appears in conf.json file
thread_index: Currently executing thread
unmockable_tests_queue: The queue of remaining tests
Returns:
A new executed_in_current_round set which contains only the current tests configuration if a round was completed
else it just adds the new test to the set.
"""
if str(t) in executed_in_current_round:
prints_manager.add_print_job(
'all tests in the queue were executed, sleeping for 30 seconds to let locked tests get unlocked.',
print,
thread_index)
executed_in_current_round = set()
time.sleep(30)
executed_in_current_round.add(str(t))
return executed_in_current_round
def initialize_queue_and_executed_tests_set(tests):
tests_queue = Queue()
already_executed_test_playbooks = set()
for t in tests:
tests_queue.put(t)
return already_executed_test_playbooks, tests_queue
def get_unmockable_tests(tests_settings):
conf, _ = load_conf_files(tests_settings.conf_path, tests_settings.secret_conf_path)
unmockable_integrations = conf['unmockable_integrations']
tests = conf['tests']
unmockable_tests = []
for test_record in tests:
test_name = test_record.get("playbookID")
integrations_used_in_test = get_used_integrations(test_record)
unmockable_integrations_used = [integration_name for integration_name in integrations_used_in_test if
integration_name in unmockable_integrations]
if test_name and (not integrations_used_in_test or unmockable_integrations_used):
unmockable_tests.append(test_name)
return unmockable_tests
def get_all_tests(tests_settings):
conf, _ = load_conf_files(tests_settings.conf_path, tests_settings.secret_conf_path)
tests_records = conf['tests']
all_tests = []
for test_record in tests_records:
test_name = test_record.get("playbookID")
if test_name:
all_tests.append(test_name)
return all_tests
def manage_tests(tests_settings):
"""
This function manages the execution of Demisto's tests.
Args:
tests_settings (TestsSettings): An object containing all the relevant data regarding how the tests should be ran
"""
tests_settings.serverNumericVersion = get_server_numeric_version(tests_settings.serverVersion,
tests_settings.is_local_run)
instances_ips = get_instances_ips_and_names(tests_settings)
is_nightly = tests_settings.nightly
number_of_instances = len(instances_ips)
prints_manager = ParallelPrintsManager(number_of_instances)
tests_data_keeper = TestsDataKeeper()
if tests_settings.server:
# If the user supplied a server - all tests will be done on that server.
server_ip = tests_settings.server
print_color("Starting tests for {}".format(server_ip), LOG_COLORS.GREEN)
print("Starts tests with server url - https://{}".format(server_ip))
all_tests = get_all_tests(tests_settings)
mockable_tests = []
print(tests_settings.specific_tests_to_run)
unmockable_tests = tests_settings.specific_tests_to_run if tests_settings.specific_tests_to_run else all_tests
execute_testing(tests_settings, server_ip, mockable_tests, unmockable_tests, tests_data_keeper, prints_manager,
thread_index=0, is_ami=False)
elif tests_settings.isAMI:
# Running tests in AMI configuration.
# This is the way we run most tests, including running Circle for PRs and nightly.
if is_nightly:
# If the build is a nightly build, run tests in parallel.
test_allocation = get_tests_allocation_for_threads(number_of_instances, tests_settings.conf_path)
current_thread_index = 0
all_unmockable_tests_list = get_unmockable_tests(tests_settings)
threads_array = []
for ami_instance_name, ami_instance_ip in instances_ips:
if ami_instance_name == tests_settings.serverVersion: # Only run tests for given AMI Role
current_instance = ami_instance_ip
tests_allocation_for_instance = test_allocation[current_thread_index]
unmockable_tests = [test for test in all_unmockable_tests_list
if test in tests_allocation_for_instance]
mockable_tests = [test for test in tests_allocation_for_instance if test not in unmockable_tests]
print_color("Starting tests for {}".format(ami_instance_name), LOG_COLORS.GREEN)
print("Starts tests with server url - https://{}".format(ami_instance_ip))
if number_of_instances == 1:
execute_testing(tests_settings, current_instance, mockable_tests, unmockable_tests,
tests_data_keeper, prints_manager, thread_index=0, is_ami=True)
else:
thread_kwargs = {
"tests_settings": tests_settings,
"server_ip": current_instance,
"mockable_tests_names": mockable_tests,
"unmockable_tests_names": unmockable_tests,
"thread_index": current_thread_index,
"prints_manager": prints_manager,
"tests_data_keeper": tests_data_keeper,
}
t = threading.Thread(target=execute_testing, kwargs=thread_kwargs)
threads_array.append(t)
t.start()
current_thread_index += 1
for t in threads_array:
t.join()
else:
for ami_instance_name, ami_instance_ip in instances_ips:
if ami_instance_name == tests_settings.serverVersion:
print_color("Starting tests for {}".format(ami_instance_name), LOG_COLORS.GREEN)
print("Starts tests with server url - https://{}".format(ami_instance_ip))
all_tests = get_all_tests(tests_settings)
unmockable_tests = get_unmockable_tests(tests_settings)
mockable_tests = [test for test in all_tests if test not in unmockable_tests]
execute_testing(tests_settings, ami_instance_ip, mockable_tests, unmockable_tests,
tests_data_keeper, prints_manager, thread_index=0, is_ami=True)
sleep(8)
else:
# TODO: understand better when this occurs and what will be the settings
# This case is rare, and usually occurs on two cases:
# 1. When someone from Server wants to trigger a content build on their branch.
# 2. When someone from content wants to run tests on a specific build.
server_numeric_version = '99.99.98' # assume latest
print("Using server version: {} (assuming latest for non-ami)".format(server_numeric_version))
instance_ip = instances_ips[0][1]
all_tests = get_all_tests(tests_settings)
execute_testing(tests_settings, instance_ip, [], all_tests,
tests_data_keeper, prints_manager, thread_index=0, is_ami=False)
print_test_summary(tests_data_keeper, tests_settings.isAMI)
create_result_files(tests_data_keeper)
if tests_data_keeper.failed_playbooks:
tests_failed_msg = "Some tests have failed. Not destroying instances."
print(tests_failed_msg)
sys.exit(1)
def add_pr_comment(comment):
token = os.environ['CONTENT_GITHUB_TOKEN']
branch_name = os.environ['CIRCLE_BRANCH']
sha1 = os.environ['CIRCLE_SHA1']
query = '?q={}+repo:demisto/content+org:demisto+is:pr+is:open+head:{}+is:open'.format(sha1, branch_name)
url = 'https://api.github.com/search/issues'
headers = {'Authorization': 'Bearer ' + token}
try:
res = requests.get(url + query, headers=headers, verify=False)
res = handle_github_response(res)
if res and res.get('total_count', 0) == 1:
issue_url = res['items'][0].get('comments_url') if res.get('items', []) else None
if issue_url:
res = requests.post(issue_url, json={'body': comment}, headers=headers, verify=False)
handle_github_response(res)
else:
print_warning('Add pull request comment failed: There is more then one open pull request for branch {}.'
.format(branch_name))
except Exception as e:
print_warning('Add pull request comment failed: {}'.format(e))
def handle_github_response(response):
res_dict = response.json()
if not res_dict.ok:
print_warning('Add pull request comment failed: {}'.
format(res_dict.get('message')))
return res_dict
@contextmanager
def acquire_test_lock(integrations_details: list,
test_timeout: int,
prints_manager: ParallelPrintsManager,
thread_index: int,
conf_json_path: str) -> None:
"""
This is a context manager that handles all the locking and unlocking of integrations.
Execution is as following:
* Attempts to lock the test's integrations and yields the result of this attempt
* If lock attempt has failed - yields False, if it succeeds - yields True
* Once the test is done- will unlock all integrations
Args:
integrations_details: test integrations details
test_timeout: test timeout in seconds
prints_manager: ParallelPrintsManager object
thread_index: The index of the thread that executes the unlocking
conf_json_path: Path to conf.json file
Yields:
A boolean indicating the lock attempt result
"""
locked = safe_lock_integrations(test_timeout,
prints_manager,
integrations_details,
thread_index,
conf_json_path)
try:
yield locked
finally:
if not locked:
return
safe_unlock_integrations(prints_manager, integrations_details, thread_index)
prints_manager.execute_thread_prints(thread_index)
def safe_unlock_integrations(prints_manager: ParallelPrintsManager, integrations_details: list, thread_index: int):
"""
This integration safely unlocks the test's integrations.
If an unexpected error occurs - this method will log it's details and other tests execution will continue
Args:
prints_manager: ParallelPrintsManager object
integrations_details: Details of the currently executed test
thread_index: The index of the thread that executes the unlocking
"""
try:
# executing the test could take a while, re-instancing the storage client
storage_client = storage.Client()
unlock_integrations(integrations_details, prints_manager, storage_client, thread_index)
except Exception as e:
prints_manager.add_print_job(f'attempt to unlock integration failed for unknown reason.\nError: {e}',
print_warning,
thread_index,
include_timestamp=True)
def safe_lock_integrations(test_timeout: int,
prints_manager: ParallelPrintsManager,
integrations_details: list,
thread_index: int,
conf_json_path: str) -> bool:
"""
This integration safely locks the test's integrations and return it's result
If an unexpected error occurs - this method will log it's details and return False
Args:
test_timeout: Test timeout in seconds
prints_manager: ParallelPrintsManager object
integrations_details: test integrations details
thread_index: The index of the thread that executes the unlocking
conf_json_path: Path to conf.json file
Returns:
A boolean indicating the lock attempt result
"""
conf, _ = load_conf_files(conf_json_path, None)
parallel_integrations_names = conf['parallel_integrations']
filtered_integrations_details = [integration for integration in integrations_details if
integration['name'] not in parallel_integrations_names]
integration_names = get_integrations_list(filtered_integrations_details)
if integration_names:
print_msg = f'Attempting to lock integrations {integration_names}, with timeout {test_timeout}'
else:
print_msg = 'No integrations to lock'
prints_manager.add_print_job(print_msg, print, thread_index, include_timestamp=True)
try:
storage_client = storage.Client()
locked = lock_integrations(filtered_integrations_details, test_timeout, storage_client, prints_manager, thread_index)
except Exception as e:
prints_manager.add_print_job(f'attempt to lock integration failed for unknown reason.\nError: {e}',
print_warning,
thread_index,
include_timestamp=True)
locked = False
return locked
def workflow_still_running(workflow_id: str) -> bool:
"""
This method takes a workflow id and checks if the workflow is still running
If given workflow ID is the same as the current workflow, will simply return True
else it will query circleci api for the workflow and return the status
Args:
workflow_id: The ID of the workflow
Returns:
True if the workflow is running, else False
"""
# If this is the current workflow_id
if workflow_id == WORKFLOW_ID:
return True
else:
try:
workflow_details_response = requests.get(f'https://circleci.com/api/v2/workflow/{workflow_id}',
headers={'Accept': 'application/json'},
auth=(CIRCLE_STATUS_TOKEN, ''))
workflow_details_response.raise_for_status()
except Exception as e:
print(f'Failed to get circleci response about workflow with id {workflow_id}, error is: {e}')
return True
return workflow_details_response.json().get('status') not in ('canceled', 'success', 'failed')
def lock_integrations(integrations_details: list,
test_timeout: int,
storage_client: storage.Client,
prints_manager: ParallelPrintsManager,
thread_index: int) -> bool:
"""
Locks all the test's integrations
Args:
integrations_details: List of current test's integrations
test_timeout: Test timeout in seconds
storage_client: The GCP storage client
prints_manager: ParallelPrintsManager object
thread_index: The index of the thread that executes the unlocking
Returns:
True if all the test's integrations were successfully locked, else False
"""
integrations = get_integrations_list(integrations_details)
if not integrations:
return True
existing_integrations_lock_files = get_locked_integrations(integrations, storage_client)
for integration, lock_file in existing_integrations_lock_files.items():
# Each file has content in the form of <circleci-build-number>:<timeout in seconds>
# If it has not expired - it means the integration is currently locked by another test.
workflow_id, build_number, lock_timeout = lock_file.download_as_string().decode().split(':')
if not lock_expired(lock_file, lock_timeout) and workflow_still_running(workflow_id):
# there is a locked integration for which the lock is not expired - test cannot be executed at the moment
prints_manager.add_print_job(
f'Could not lock integration {integration}, another lock file was exist with '
f'build number: {build_number}, timeout: {lock_timeout}, last update at {lock_file.updated}.\n'
f'Delaying test execution',
print,
thread_index,
include_timestamp=True)
return False
integrations_generation_number = {}
# Gathering generation number with which the new file will be created,
# See https://cloud.google.com/storage/docs/generations-preconditions for details.
for integration in integrations:
if integration in existing_integrations_lock_files:
integrations_generation_number[integration] = existing_integrations_lock_files[integration].generation
else:
integrations_generation_number[integration] = 0
return create_lock_files(integrations_generation_number, prints_manager,
storage_client, integrations_details, test_timeout, thread_index)
def get_integrations_list(test_integrations: list) -> list:
"""
Since test details can have one integration as a string and sometimes a list of integrations- this methods
parses the test's integrations into a list of integration names.
Args:
test_integrations: List of current test's integrations
Returns:
the integration names in a list for all the integrations that takes place in the test
specified in test details.
"""
return [integration['name'] for integration in test_integrations]
def create_lock_files(integrations_generation_number: dict,
prints_manager: ParallelPrintsManager,
storage_client: storage.Client,
integrations_details: list,
test_timeout: int,
thread_index: int) -> bool:
"""
This method tries to create a lock files for all integrations specified in 'integrations_generation_number'.
Each file should contain <circle-ci-build-number>:<test-timeout>
where the <circle-ci-build-number> part is for debugging and troubleshooting
and the <test-timeout> part is to be able to unlock revoked test files.
If for any of the integrations, the lock file creation will fail- the already created files will be cleaned.
Args:
integrations_generation_number: A dict in the form of {<integration-name>:<integration-generation>}
prints_manager: ParallelPrintsManager object
storage_client: The GCP storage client
integrations_details: List of current test's integrations
test_timeout: The time out
thread_index:
Returns:
"""
locked_integrations = []
bucket = storage_client.bucket(BUCKET_NAME)
for integration, generation_number in integrations_generation_number.items():
blob = bucket.blob(f'{LOCKS_PATH}/{integration}')
try:
blob.upload_from_string(f'{WORKFLOW_ID}:{CIRCLE_BUILD_NUM}:{test_timeout + 30}',
if_generation_match=generation_number)
prints_manager.add_print_job(f'integration {integration} locked',
print,
thread_index,
include_timestamp=True)
locked_integrations.append(integration)
except PreconditionFailed:
# if this exception occurs it means that another build has locked this integration
# before this build managed to do it.
# we need to unlock all the integrations we have already locked and try again later
prints_manager.add_print_job(
f'Could not lock integration {integration}, Create file with precondition failed.'
f'delaying test execution.',
print_warning,
thread_index,
include_timestamp=True)
unlock_integrations(integrations_details, prints_manager, storage_client, thread_index)
return False
return True
def unlock_integrations(integrations_details: list,
prints_manager: ParallelPrintsManager,
storage_client: storage.Client,
thread_index: int) -> None:
"""
Delete all integration lock files for integrations specified in 'locked_integrations'
Args:
integrations_details: List of current test's integrations
prints_manager: ParallelPrintsManager object
storage_client: The GCP storage client
thread_index: The index of the thread that executes the unlocking
"""
locked_integrations = get_integrations_list(integrations_details)
locked_integration_blobs = get_locked_integrations(locked_integrations, storage_client)
for integration, lock_file in locked_integration_blobs.items():
try:
# Verifying build number is the same as current build number to avoid deleting other tests lock files
_, build_number, _ = lock_file.download_as_string().decode().split(':')
if build_number == CIRCLE_BUILD_NUM:
lock_file.delete(if_generation_match=lock_file.generation)
prints_manager.add_print_job(
f'Integration {integration} unlocked',
print,
thread_index,
include_timestamp=True)
except PreconditionFailed:
prints_manager.add_print_job(f'Could not unlock integration {integration} precondition failure',
print_warning,
thread_index,
include_timestamp=True)
def get_locked_integrations(integrations: list, storage_client: storage.Client) -> dict:
"""
Getting all locked integrations files
Args:
integrations: Integrations that we want to get lock files for
storage_client: The GCP storage client
Returns:
A dict of the form {<integration-name>:<integration-blob-object>} for all integrations that has a blob object.
"""
# Listing all files in lock folder
# Wrapping in 'list' operator because list_blobs return a generator which can only be iterated once
lock_files_ls = list(storage_client.list_blobs(BUCKET_NAME, prefix=f'{LOCKS_PATH}'))
current_integrations_lock_files = {}
# Getting all existing files details for integrations that we want to lock
for integration in integrations:
current_integrations_lock_files.update({integration: [lock_file_blob for lock_file_blob in lock_files_ls if
lock_file_blob.name == f'{LOCKS_PATH}/{integration}']})
# Filtering 'current_integrations_lock_files' from integrations with no files
current_integrations_lock_files = {integration: blob_files[0] for integration, blob_files in
current_integrations_lock_files.items() if blob_files}
return current_integrations_lock_files
def lock_expired(lock_file: storage.Blob, lock_timeout: str) -> bool:
"""
Checks if the time that passed since the creation of the 'lock_file' is more then 'lock_timeout'.
If not- it means that the integration represented by the lock file is currently locked and is tested in another build
Args:
lock_file: The lock file blob object
lock_timeout: The expiration timeout of the lock in seconds
Returns:
True if the lock has expired it's timeout, else False
"""
return datetime.datetime.now(tz=pytz.utc) - lock_file.updated >= datetime.timedelta(seconds=int(lock_timeout))
def main():
print("Time is: {}\n\n\n".format(datetime.datetime.now()))
tests_settings = options_handler()
# should be removed after solving: https://github.com/demisto/etc/issues/21383
# -------------
if 'master' in tests_settings.serverVersion.lower():
print('[{}] sleeping for 30 secs'.format(datetime.datetime.now()))
sleep(45)
# -------------
manage_tests(tests_settings)
if __name__ == '__main__':
main()
|
the-stack_106_18449
|
##
# File: ObjectTransformerTests.py
# Author: J. Westbrook
# Date: 25-Apr-2019
#
# Updates:
#
##
"""
Tests for extractor and updater or selected values from collections (limited tests from mock-data repos)
"""
__docformat__ = "google en"
__author__ = "John Westbrook"
__email__ = "[email protected]"
__license__ = "Apache 2.0"
import logging
import os
import platform
import resource
import time
import unittest
from rcsb.exdb.utils.ObjectTransformer import ObjectTransformer
from rcsb.utils.config.ConfigUtil import ConfigUtil
logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s]-%(module)s.%(funcName)s: %(message)s")
logger = logging.getLogger()
HERE = os.path.abspath(os.path.dirname(__file__))
TOPDIR = os.path.dirname(os.path.dirname(os.path.dirname(HERE)))
class ObjectTransformerTests(unittest.TestCase):
def __init__(self, methodName="runTest"):
super(ObjectTransformerTests, self).__init__(methodName)
self.__verbose = True
def setUp(self):
#
self.__mockTopPath = os.path.join(TOPDIR, "rcsb", "mock-data")
configPath = os.path.join(TOPDIR, "rcsb", "mock-data", "config", "dbload-setup-example.yml")
#
configName = "site_info_configuration"
self.__cfgOb = ConfigUtil(configPath=configPath, defaultSectionName=configName, mockTopPath=self.__mockTopPath)
#
self.__fetchLimit = 5
#
self.__startTime = time.time()
logger.debug("Starting %s at %s", self.id(), time.strftime("%Y %m %d %H:%M:%S", time.localtime()))
def tearDown(self):
unitS = "MB" if platform.system() == "Darwin" else "GB"
rusageMax = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
logger.info("Maximum resident memory size %.4f %s", rusageMax / 10 ** 6, unitS)
endTime = time.time()
logger.info("Completed %s at %s (%.4f seconds)", self.id(), time.strftime("%Y %m %d %H:%M:%S", time.localtime()), endTime - self.__startTime)
def testTranformEntityProteinContent(self):
"""Test case - transform selected entity protein documents"""
try:
databaseName = "pdbx_core"
collectionName = "pdbx_core_polymer_entity"
obTr = ObjectTransformer(self.__cfgOb)
ok = obTr.doTransform(
databaseName=databaseName, collectionName=collectionName, fetchLimit=self.__fetchLimit, selectionQuery={"entity_poly.rcsb_entity_polymer_type": "Protein"}
)
self.assertTrue(ok)
except Exception as e:
logger.exception("Failing with %s", str(e))
self.fail()
def objectTransformerSuite():
suiteSelect = unittest.TestSuite()
suiteSelect.addTest(ObjectTransformerTests("testTransformEntityProteinContent"))
return suiteSelect
if __name__ == "__main__":
mySuite = objectTransformerSuite()
unittest.TextTestRunner(verbosity=2).run(mySuite)
|
the-stack_106_18450
|
import torch.nn as nn
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from mm_modules.DCN.modules.deform_conv2d import DeformConv2dPack
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
self.img_size = img_size
self.patch_size = patch_size
assert img_size[0] % patch_size[0] == 0 and img_size[1] % patch_size[1] == 0, \
f"img_size {img_size} should be divided by patch_size {patch_size}."
self.H, self.W = img_size[0] // patch_size[0], img_size[1] // patch_size[1]
self.num_patches = self.H * self.W
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
self.norm = nn.LayerNorm(embed_dim)
def forward(self, x):
B, C, H, W = x.shape
x = self.proj(x).flatten(2).transpose(1, 2)
x = self.norm(x)
H, W = H // self.patch_size[0], W // self.patch_size[1]
return x, (H, W)
class DeformablePatchEmbed_GELU(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
self.c_in = in_chans
self.c_out = embed_dim
self.img_size = img_size
self.patch_size = patch_size
assert img_size[0] % patch_size[0] == 0 and img_size[1] % patch_size[1] == 0, \
f"img_size {img_size} should be divided by patch_size {patch_size}."
self.H, self.W = img_size[0] // patch_size[0], img_size[1] // patch_size[1]
self.num_patches = self.H * self.W
self.dconv = DeformConv2dPack(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size, padding=0)
self.norm_layer = nn.BatchNorm2d(embed_dim)
self.act_layer = nn.GELU()
for m in self.modules():
if isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x, return_offset=False):
B, C, H, W = x.shape
x, offset = self.dconv(x, return_offset=return_offset)
x = self.act_layer(self.norm_layer(x)).flatten(2).transpose(1, 2)
H, W = H // self.patch_size[0], W // self.patch_size[1]
if return_offset:
return x, (H, W), offset
else:
return x, (H, W)
patch_dict = {
'default': PatchEmbed,
'dcn_v1_bn_gelu': DeformablePatchEmbed_GELU,
}
|
the-stack_106_18451
|
# Copyright 2012 by Wibowo Arindrarto. All rights reserved.
#
# This file is part of the Biopython distribution and governed by your
# choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
# Please see the LICENSE file that should have been included as part of this
# package.
"""Bio.SearchIO parser for BLAST+ XML output formats."""
# for more info: http://www.ncbi.nlm.nih.gov/dtd/NCBI_BlastOutput.mod.dtd
import re
import warnings
from itertools import chain
from xml.etree import ElementTree
from xml.sax.saxutils import XMLGenerator, escape
from Bio import BiopythonParserWarning
from Bio.SearchIO._index import SearchIndexer
from Bio.SearchIO._model import QueryResult, Hit, HSP, HSPFragment
__all__ = ("BlastXmlParser", "BlastXmlIndexer", "BlastXmlWriter")
# element - optional qresult attribute name mapping
_ELEM_QRESULT_OPT = {
"Statistics_db-num": ("stat_db_num", int),
"Statistics_db-len": ("stat_db_len", int),
"Statistics_eff-space": ("stat_eff_space", float),
"Statistics_hsp-len": ("stat_hsp_len", int),
"Statistics_kappa": ("stat_kappa", float),
"Statistics_lambda": ("stat_lambda", float),
"Statistics_entropy": ("stat_entropy", float),
}
# element - hit attribute name mapping
_ELEM_HIT = {
# 'Hit_def': ('description', str), # not set by this dict
"Hit_accession": ("accession", str),
"Hit_len": ("seq_len", int),
}
# element - hsp attribute name mapping
_ELEM_HSP = {
"Hsp_bit-score": ("bitscore", float),
"Hsp_score": ("bitscore_raw", int),
"Hsp_evalue": ("evalue", float),
"Hsp_identity": ("ident_num", int),
"Hsp_positive": ("pos_num", int),
"Hsp_gaps": ("gap_num", int),
"Hsp_density": ("density", float),
}
# element - fragment attribute name mapping
_ELEM_FRAG = {
"Hsp_query-from": ("query_start", int),
"Hsp_query-to": ("query_end", int),
"Hsp_hit-from": ("hit_start", int),
"Hsp_hit-to": ("hit_end", int),
"Hsp_query-frame": ("query_frame", int),
"Hsp_hit-frame": ("hit_frame", int),
"Hsp_align-len": ("aln_span", int),
"Hsp_pattern-from": ("pattern_start", int),
"Hsp_pattern-to": ("pattern_end", int),
"Hsp_hseq": ("hit", str),
"Hsp_qseq": ("query", str),
}
# dictionary for mapping tag name and meta key name
_ELEM_META = {
"BlastOutput_db": ("target", str),
"BlastOutput_program": ("program", str),
"BlastOutput_version": ("version", str),
"BlastOutput_reference": ("reference", str),
"Parameters_expect": ("param_evalue_threshold", float),
"Parameters_entrez-query": ("param_entrez_query", str),
"Parameters_filter": ("param_filter", str),
"Parameters_gap-extend": ("param_gap_extend", int),
"Parameters_gap-open": ("param_gap_open", int),
"Parameters_include": ("param_include", str),
"Parameters_matrix": ("param_matrix", str),
"Parameters_pattern": ("param_pattern", str),
"Parameters_sc-match": ("param_score_match", int),
"Parameters_sc-mismatch": ("param_score_mismatch", int),
}
# these are fallback tags that store information on the first query
# outside the <Iteration> tag
# only used if query_{ID,def,len} is not found in <Iteration>
# (seen in legacy Blast <2.2.14)
_ELEM_QRESULT_FALLBACK = {
"BlastOutput_query-ID": ("id", str),
"BlastOutput_query-def": ("description", str),
"BlastOutput_query-len": ("len", str),
}
# element-attribute maps, for writing
_WRITE_MAPS = {
"preamble": (
("program", "program"),
("version", "version"),
("reference", "reference"),
("db", "target"),
("query-ID", "id"),
("query-def", "description"),
("query-len", "seq_len"),
("param", None),
),
"param": (
("matrix", "param_matrix"),
("expect", "param_evalue_threshold"),
("sc-match", "param_score_match"),
("sc-mismatch", "param_score_mismatch"),
("gap-open", "param_gap_open"),
("gap-extend", "param_gap_extend"),
("filter", "param_filter"),
("pattern", "param_pattern"),
("entrez-query", "param_entrez_query"),
),
"qresult": (
("query-ID", "id"),
("query-def", "description"),
("query-len", "seq_len"),
),
"stat": (
("db-num", "stat_db_num"),
("db-len", "stat_db_len"),
("hsp-len", "stat_hsp_len"),
("eff-space", "stat_eff_space"),
("kappa", "stat_kappa"),
("lambda", "stat_lambda"),
("entropy", "stat_entropy"),
),
"hit": (
("id", "id"),
("def", "description"),
("accession", "accession"),
("len", "seq_len"),
),
"hsp": (
("bit-score", "bitscore"),
("score", "bitscore_raw"),
("evalue", "evalue"),
("query-from", "query_start"),
("query-to", "query_end"),
("hit-from", "hit_start"),
("hit-to", "hit_end"),
("pattern-from", "pattern_start"),
("pattern-to", "pattern_end"),
("query-frame", "query_frame"),
("hit-frame", "hit_frame"),
("identity", "ident_num"),
("positive", "pos_num"),
("gaps", "gap_num"),
("align-len", "aln_span"),
("density", "density"),
("qseq", "query"),
("hseq", "hit"),
("midline", None),
),
}
# optional elements, based on the DTD
_DTD_OPT = (
"BlastOutput_query-seq",
"BlastOutput_mbstat",
"Iteration_query-def",
"Iteration_query-len",
"Iteration-hits",
"Iteration_stat",
"Iteration_message",
"Parameters_matrix",
"Parameters_include",
"Parameters_sc-match",
"Parameters_sc-mismatch",
"Parameters_filter",
"Parameters_pattern",
"Parameters_entrez-query",
"Hit_hsps",
"Hsp_pattern-from",
"Hsp_pattern-to",
"Hsp_query-frame",
"Hsp_hit-frame",
"Hsp_identity",
"Hsp_positive",
"Hsp_gaps",
"Hsp_align-len",
"Hsp_density",
"Hsp_midline",
)
# compile RE patterns
# for capturing BLAST version
_RE_VERSION = re.compile(r"\d+\.\d+\.\d+\+?")
# for splitting ID-description pairs
_RE_ID_DESC_PAIRS_PATTERN = re.compile(" +>")
# for splitting ID and description (must be used with maxsplit = 1)
_RE_ID_DESC_PATTERN = re.compile(" +")
def _extract_ids_and_descs(raw_id, raw_desc):
"""Extract IDs, descriptions, and raw ID from raw values (PRIVATE).
Given values of the ``Hit_id`` and ``Hit_def`` elements, this function
returns a tuple of three elements: all IDs, all descriptions, and the
BLAST-generated ID. The BLAST-generated ID is set to ``None`` if no
BLAST-generated IDs are present.
"""
ids = []
descs = []
blast_gen_id = raw_id
if raw_id.startswith("gnl|BL_ORD_ID|"):
id_desc_line = raw_desc
else:
id_desc_line = raw_id + " " + raw_desc
# create a list of lists, each list containing an ID and description
# or just an ID, if description is not present
id_desc_pairs = [
re.split(_RE_ID_DESC_PATTERN, x, 1)
for x in re.split(_RE_ID_DESC_PAIRS_PATTERN, id_desc_line)
]
# make sure empty descriptions are added as empty strings
# also, we return lists for compatibility reasons between Py2 and Py3
for pair in id_desc_pairs:
if len(pair) != 2:
pair.append("")
ids.append(pair[0])
descs.append(pair[1])
return (ids, descs, blast_gen_id)
class BlastXmlParser:
"""Parser for the BLAST XML format."""
def __init__(self, handle, use_raw_query_ids=False, use_raw_hit_ids=False):
"""Initialize the class."""
self.xml_iter = iter(ElementTree.iterparse(handle, events=("start", "end")))
self._use_raw_query_ids = use_raw_query_ids
self._use_raw_hit_ids = use_raw_hit_ids
self._meta, self._fallback = self._parse_preamble()
def __iter__(self):
"""Iterate over BlastXmlParser object yields query results."""
yield from self._parse_qresult()
def _parse_preamble(self):
"""Parse all tag data prior to the first query result (PRIVATE)."""
# dictionary for containing all information prior to the first query
meta = {}
# dictionary for fallback information
fallback = {}
# parse the preamble part (anything prior to the first result)
for event, elem in self.xml_iter:
# get the tag values, cast appropriately, store into meta
if event == "end" and elem.tag in _ELEM_META:
attr_name, caster = _ELEM_META[elem.tag]
if caster is not str:
meta[attr_name] = caster(elem.text)
else:
meta[attr_name] = elem.text
# delete element after we finish parsing it
elem.clear()
continue
# capture fallback values
# these are used only if the first <Iteration> does not have any
# ID, ref, or len.
elif event == "end" and elem.tag in _ELEM_QRESULT_FALLBACK:
attr_name, caster = _ELEM_QRESULT_FALLBACK[elem.tag]
if caster is not str:
fallback[attr_name] = caster(elem.text)
else:
fallback[attr_name] = elem.text
elem.clear()
continue
if event == "start" and elem.tag == "Iteration":
break
# we only want the version number, sans the program name or date
if meta.get("version") is not None:
meta["version"] = re.search(_RE_VERSION, meta["version"]).group(0)
return meta, fallback
def _parse_qresult(self):
"""Parse query results (PRIVATE)."""
# parse the queries
for event, qresult_elem in self.xml_iter:
# </Iteration> marks the end of a single query
# which means we can process it
if event == "end" and qresult_elem.tag == "Iteration":
# we'll use the following schema
# <!ELEMENT Iteration (
# Iteration_iter-num,
# Iteration_query-ID?,
# Iteration_query-def?,
# Iteration_query-len?,
# Iteration_hits?,
# Iteration_stat?,
# Iteration_message?)>
# assign query attributes with fallbacks
query_id = qresult_elem.findtext("Iteration_query-ID")
if query_id is None:
query_id = self._fallback["id"]
query_desc = qresult_elem.findtext("Iteration_query-def")
if query_desc is None:
query_desc = self._fallback["description"]
query_len = qresult_elem.findtext("Iteration_query-len")
if query_len is None:
query_len = self._fallback["len"]
blast_query_id = query_id
# handle blast searches against databases with Blast's IDs
# 'Query_' marks the beginning of a BLAST+-generated ID,
# 'lcl|' marks the beginning of a BLAST legacy-generated ID
if not self._use_raw_query_ids and (
query_id.startswith("Query_") or query_id.startswith("lcl|")
):
# store the Blast-generated query ID
id_desc = query_desc.split(" ", 1)
query_id = id_desc[0]
try:
query_desc = id_desc[1]
except IndexError:
query_desc = ""
hit_list, key_list = [], []
for hit in self._parse_hit(
qresult_elem.find("Iteration_hits"), query_id
):
if hit:
# need to keep track of hit IDs, since there could be duplicates,
if hit.id in key_list:
warnings.warn(
"Renaming hit ID %r to a BLAST-generated ID "
"%r since the ID was already matched "
"by your query %r. Your BLAST database "
"may contain duplicate entries."
% (hit.id, hit.blast_id, query_id),
BiopythonParserWarning,
)
# fallback to Blast-generated IDs, if the ID is already present
# and restore the desc, too
hit.description = "%s %s" % (hit.id, hit.description)
hit.id = hit.blast_id
# and change the hit_id of the HSPs contained
for hsp in hit:
hsp.hit_id = hit.blast_id
else:
key_list.append(hit.id)
hit_list.append(hit)
# create qresult and assign its attributes
qresult = QueryResult(hit_list, query_id)
qresult.description = query_desc
qresult.seq_len = int(query_len)
qresult.blast_id = blast_query_id
for key, value in self._meta.items():
setattr(qresult, key, value)
# statistics are stored in Iteration_stat's 'grandchildren' with the
# following DTD
# <!ELEMENT Statistics (
# Statistics_db-num,
# Statistics_db-len,
# Statistics_hsp-len,
# Statistics_eff-space,
# Statistics_kappa,
# Statistics_lambda,
# Statistics_entropy)>
stat_iter_elem = qresult_elem.find("Iteration_stat")
if stat_iter_elem is not None:
stat_elem = stat_iter_elem.find("Statistics")
for key, val_info in _ELEM_QRESULT_OPT.items():
value = stat_elem.findtext(key)
if value is not None:
caster = val_info[1]
# recast only if value is not intended to be str
if value is not None and caster is not str:
value = caster(value)
setattr(qresult, val_info[0], value)
# delete element after we finish parsing it
qresult_elem.clear()
yield qresult
def _parse_hit(self, root_hit_elem, query_id):
"""Yield a generator object that transforms Iteration_hits XML elements into Hit objects (PRIVATE).
:param root_hit_elem: root element of the Iteration_hits tag.
:type root_hit_elem: XML element tag
:param query_id: QueryResult ID of this Hit
:type query_id: string
"""
# Hit level processing
# Hits are stored in the Iteration_hits tag, with the following
# DTD
# <!ELEMENT Hit (
# Hit_num,
# Hit_id,
# Hit_def,
# Hit_accession,
# Hit_len,
# Hit_hsps?)>
# feed the loop below an empty list so iteration still works
if root_hit_elem is None:
root_hit_elem = []
for hit_elem in root_hit_elem:
# BLAST sometimes mangles the sequence IDs and descriptions, so we need
# to extract the actual values.
raw_hit_id = hit_elem.findtext("Hit_id")
raw_hit_desc = hit_elem.findtext("Hit_def")
if not self._use_raw_hit_ids:
ids, descs, blast_hit_id = _extract_ids_and_descs(
raw_hit_id, raw_hit_desc
)
else:
ids, descs, blast_hit_id = [raw_hit_id], [raw_hit_desc], raw_hit_id
hit_id, alt_hit_ids = ids[0], ids[1:]
hit_desc, alt_hit_descs = descs[0], descs[1:]
hsps = list(self._parse_hsp(hit_elem.find("Hit_hsps"), query_id, hit_id))
hit = Hit(hsps)
hit.description = hit_desc
hit._id_alt = alt_hit_ids
hit._description_alt = alt_hit_descs
hit.blast_id = blast_hit_id
for key, val_info in _ELEM_HIT.items():
value = hit_elem.findtext(key)
if value is not None:
caster = val_info[1]
# recast only if value is not intended to be str
if value is not None and caster is not str:
value = caster(value)
setattr(hit, val_info[0], value)
# delete element after we finish parsing it
hit_elem.clear()
yield hit
def _parse_hsp(self, root_hsp_frag_elem, query_id, hit_id):
"""Yield a generator object that transforms Hit_hsps XML elements into HSP objects (PRIVATE).
:param root_hsp_frag_elem: the ``Hit_hsps`` tag
:type root_hsp_frag_elem: XML element tag
:param query_id: query ID
:type query_id: string
:param hit_id: hit ID
:type hit_id: string
"""
# Hit_hsps DTD:
# <!ELEMENT Hsp (
# Hsp_num,
# Hsp_bit-score,
# Hsp_score,
# Hsp_evalue,
# Hsp_query-from,
# Hsp_query-to,
# Hsp_hit-from,
# Hsp_hit-to,
# Hsp_pattern-from?,
# Hsp_pattern-to?,
# Hsp_query-frame?,
# Hsp_hit-frame?,
# Hsp_identity?,
# Hsp_positive?,
# Hsp_gaps?,
# Hsp_align-len?,
# Hsp_density?,
# Hsp_qseq,
# Hsp_hseq,
# Hsp_midline?)>
# if value is None, feed the loop below an empty list
if root_hsp_frag_elem is None:
root_hsp_frag_elem = []
for hsp_frag_elem in root_hsp_frag_elem:
coords = {} # temporary container for coordinates
frag = HSPFragment(hit_id, query_id)
for key, val_info in _ELEM_FRAG.items():
value = hsp_frag_elem.findtext(key)
caster = val_info[1]
# adjust 'from' and 'to' coordinates to 0-based ones
if value is not None:
if key.endswith("-from") or key.endswith("-to"):
# store coordinates for further processing
coords[val_info[0]] = caster(value)
continue
# recast only if value is not intended to be str
elif caster is not str:
value = caster(value)
setattr(frag, val_info[0], value)
# set the similarity characters into aln_annotation dict
frag.aln_annotation["similarity"] = hsp_frag_elem.findtext("Hsp_midline")
# process coordinates
# since 'x-from' could be bigger than 'x-to', we need to figure
# out which one is smaller/bigger since 'x_start' is always smaller
# than 'x_end'
for coord_type in ("query", "hit", "pattern"):
start_type = coord_type + "_start"
end_type = coord_type + "_end"
try:
start = coords[start_type]
end = coords[end_type]
except KeyError:
continue
else:
# convert to python range and setattr
setattr(frag, start_type, min(start, end) - 1)
setattr(frag, end_type, max(start, end))
# set molecule type, based on program
prog = self._meta.get("program")
if prog == "blastn":
frag.molecule_type = "DNA"
elif prog in ["blastp", "blastx", "tblastn", "tblastx"]:
frag.molecule_type = "protein"
hsp = HSP([frag])
for key, val_info in _ELEM_HSP.items():
value = hsp_frag_elem.findtext(key)
caster = val_info[1]
if value is not None:
if caster is not str:
value = caster(value)
setattr(hsp, val_info[0], value)
# delete element after we finish parsing it
hsp_frag_elem.clear()
yield hsp
class BlastXmlIndexer(SearchIndexer):
"""Indexer class for BLAST XML output."""
_parser = BlastXmlParser
qstart_mark = b"<Iteration>"
qend_mark = b"</Iteration>"
block_size = 16384
def __init__(self, filename, **kwargs):
"""Initialize the class."""
SearchIndexer.__init__(self, filename)
# TODO: better way to do this?
iter_obj = self._parser(self._handle, **kwargs)
self._meta, self._fallback = iter_obj._meta, iter_obj._fallback
def __iter__(self):
"""Iterate over BlastXmlIndexer yields qstart_id, start_offset, block's length."""
qstart_mark = self.qstart_mark
qend_mark = self.qend_mark
blast_id_mark = b"Query_"
block_size = self.block_size
handle = self._handle
handle.seek(0)
re_desc = re.compile(
b"<Iteration_query-ID>(.*?)"
br"</Iteration_query-ID>\s+?"
b"<Iteration_query-def>"
b"(.*?)</Iteration_query-def>"
)
re_desc_end = re.compile(b"</Iteration_query-def>")
counter = 0
while True:
start_offset = handle.tell()
line = handle.readline()
if not line:
break
if qstart_mark not in line:
continue
# The following requirements are to make supporting BGZF compressed
# BLAST XML files simpler (avoids complex offset manipulations):
assert line.count(qstart_mark) == 1, "XML without line breaks?"
assert line.lstrip().startswith(qstart_mark), line
if qend_mark in line:
# Should cope with <Iteration>...</Iteration> on one long line
block = line
else:
# Load the rest of this block up to and including </Iteration>
block = [line]
while line and qend_mark not in line:
line = handle.readline()
assert qstart_mark not in line, line
block.append(line)
assert line.rstrip().endswith(qend_mark), line
block = b"".join(block)
assert block.count(qstart_mark) == 1, "XML without line breaks? %r" % block
assert block.count(qend_mark) == 1, "XML without line breaks? %r" % block
# Now we have a full <Iteration>...</Iteration> block, find the ID
regx = re.search(re_desc, block)
try:
qstart_desc = regx.group(2)
qstart_id = regx.group(1)
except AttributeError:
# use the fallback values
assert re.search(re_desc_end, block)
qstart_desc = self._fallback["description"].encode()
qstart_id = self._fallback["id"].encode()
if qstart_id.startswith(blast_id_mark):
qstart_id = qstart_desc.split(b" ", 1)[0]
yield qstart_id.decode(), start_offset, len(block)
counter += 1
def _parse(self, handle):
"""Overwrite SearchIndexer parse (PRIVATE).
As we need to set the meta and fallback dictionaries to the parser.
"""
generator = self._parser(handle, **self._kwargs)
generator._meta = self._meta
generator._fallback = self._fallback
return next(iter(generator))
def get_raw(self, offset):
"""Return the raw record from the file as a bytes string."""
qend_mark = self.qend_mark
handle = self._handle
handle.seek(offset)
qresult_raw = handle.readline()
assert qresult_raw.lstrip().startswith(self.qstart_mark)
while qend_mark not in qresult_raw:
qresult_raw += handle.readline()
assert qresult_raw.rstrip().endswith(qend_mark)
assert qresult_raw.count(qend_mark) == 1
# Note this will include any leading and trailing whitespace, in
# general expecting " <Iteration>\n...\n </Iteration>\n"
return qresult_raw
class _BlastXmlGenerator(XMLGenerator):
"""Event-based XML Generator."""
def __init__(self, out, encoding="utf-8", indent=" ", increment=2):
"""Initialize the class."""
XMLGenerator.__init__(self, out, encoding)
# the indentation character
self._indent = indent
# nest level
self._level = 0
# how many indentation character should we increment per level
self._increment = increment
# container for names of tags with children
self._parent_stack = []
# determine writer method
def startDocument(self):
"""Start the XML document."""
self._write(
'<?xml version="1.0"?>\n'
'<!DOCTYPE BlastOutput PUBLIC "-//NCBI//NCBI BlastOutput/EN" '
'"http://www.ncbi.nlm.nih.gov/dtd/NCBI_BlastOutput.dtd">\n'
)
def startElement(self, name, attrs=None, children=False):
"""Start an XML element.
:param name: element name
:type name: string
:param attrs: element attributes
:type attrs: dictionary {string: object}
:param children: whether the element has children or not
:type children: bool
"""
if attrs is None:
attrs = {}
self.ignorableWhitespace(self._indent * self._level)
XMLGenerator.startElement(self, name, attrs)
def endElement(self, name):
"""End and XML element of the given name."""
XMLGenerator.endElement(self, name)
self._write("\n")
def startParent(self, name, attrs=None):
"""Start an XML element which has children.
:param name: element name
:type name: string
:param attrs: element attributes
:type attrs: dictionary {string: object}
"""
if attrs is None:
attrs = {}
self.startElement(name, attrs, children=True)
self._level += self._increment
self._write("\n")
# append the element name, so we can end it later
self._parent_stack.append(name)
def endParent(self):
"""End an XML element with children."""
# the element to end is the one on top of the stack
name = self._parent_stack.pop()
self._level -= self._increment
self.ignorableWhitespace(self._indent * self._level)
self.endElement(name)
def startParents(self, *names):
"""Start XML elements without children."""
for name in names:
self.startParent(name)
def endParents(self, num):
"""End XML elements, according to the given number."""
for i in range(num):
self.endParent()
def simpleElement(self, name, content=None):
"""Create an XML element without children with the given content."""
self.startElement(name, attrs={})
if content:
self.characters(content)
self.endElement(name)
def characters(self, content):
"""Replace quotes and apostrophe."""
content = escape(str(content))
for a, b in (('"', """), ("'", "'")):
content = content.replace(a, b)
self._write(content)
class BlastXmlWriter:
"""Stream-based BLAST+ XML Writer."""
def __init__(self, handle, use_raw_query_ids=True, use_raw_hit_ids=True):
"""Initialize the class."""
self.xml = _BlastXmlGenerator(handle, "utf-8")
self._use_raw_query_ids = use_raw_query_ids
self._use_raw_hit_ids = use_raw_hit_ids
def write_file(self, qresults):
"""Write the XML contents to the output handle."""
xml = self.xml
self.qresult_counter, self.hit_counter, self.hsp_counter, self.frag_counter = (
0,
0,
0,
0,
)
# get the first qresult, since the preamble requires its attr values
first_qresult = next(qresults)
# start the XML document, set the root element, and create the preamble
xml.startDocument()
xml.startParent("BlastOutput")
self._write_preamble(first_qresult)
# and write the qresults
xml.startParent("BlastOutput_iterations")
self._write_qresults(chain([first_qresult], qresults))
xml.endParents(2)
xml.endDocument()
return (
self.qresult_counter,
self.hit_counter,
self.hsp_counter,
self.frag_counter,
)
def _write_elem_block(self, block_name, map_name, obj, opt_dict=None):
"""Write sibling XML elements (PRIVATE).
:param block_name: common element name prefix
:type block_name: string
:param map_name: name of mapping between element and attribute names
:type map_name: string
:param obj: object whose attribute value will be used
:type obj: object
:param opt_dict: custom element-attribute mapping
:type opt_dict: dictionary {string: string}
"""
if opt_dict is None:
opt_dict = {}
for elem, attr in _WRITE_MAPS[map_name]:
elem = block_name + elem
try:
content = str(getattr(obj, attr))
except AttributeError:
# ensure attrs that is not present is optional
if elem not in _DTD_OPT:
raise ValueError(
"Element %r (attribute %r) not found" % (elem, attr)
)
else:
# custom element-attribute mapping, for fallback values
if elem in opt_dict:
content = opt_dict[elem]
self.xml.simpleElement(elem, content)
def _write_preamble(self, qresult):
"""Write the XML file preamble (PRIVATE)."""
xml = self.xml
for elem, attr in _WRITE_MAPS["preamble"]:
elem = "BlastOutput_" + elem
if elem == "BlastOutput_param":
xml.startParent(elem)
self._write_param(qresult)
xml.endParent()
continue
try:
content = str(getattr(qresult, attr))
except AttributeError:
if elem not in _DTD_OPT:
raise ValueError(
"Element %s (attribute %s) not found" % (elem, attr)
)
else:
if elem == "BlastOutput_version":
content = "%s %s" % (qresult.program.upper(), qresult.version)
elif qresult.blast_id:
if elem == "BlastOutput_query-ID":
content = qresult.blast_id
elif elem == "BlastOutput_query-def":
content = " ".join([qresult.id, qresult.description]).strip()
xml.simpleElement(elem, content)
def _write_param(self, qresult):
"""Write the parameter block of the preamble (PRIVATE)."""
xml = self.xml
xml.startParent("Parameters")
self._write_elem_block("Parameters_", "param", qresult)
xml.endParent()
def _write_qresults(self, qresults):
"""Write QueryResult objects into iteration elements (PRIVATE)."""
xml = self.xml
for num, qresult in enumerate(qresults):
xml.startParent("Iteration")
xml.simpleElement("Iteration_iter-num", str(num + 1))
opt_dict = {}
if self._use_raw_query_ids:
query_id = qresult.blast_id
query_desc = qresult.id + " " + qresult.description
else:
query_id = qresult.id
query_desc = qresult.description
opt_dict = {
"Iteration_query-ID": query_id,
"Iteration_query-def": query_desc,
}
self._write_elem_block("Iteration_", "qresult", qresult, opt_dict)
# the Iteration_hits tag only has children if there are hits
if qresult:
xml.startParent("Iteration_hits")
self._write_hits(qresult.hits)
xml.endParent()
# otherwise it's a simple element without any contents
else:
xml.simpleElement("Iteration_hits", "")
xml.startParents("Iteration_stat", "Statistics")
self._write_elem_block("Statistics_", "stat", qresult)
xml.endParents(2)
# there's a message if no hits is present
if not qresult:
xml.simpleElement("Iteration_message", "No hits found")
self.qresult_counter += 1
xml.endParent()
def _write_hits(self, hits):
"""Write Hit objects (PRIVATE)."""
xml = self.xml
for num, hit in enumerate(hits):
xml.startParent("Hit")
xml.simpleElement("Hit_num", str(num + 1))
# use custom hit_id and hit_def mapping if the hit has a
# BLAST-generated ID
opt_dict = {}
if self._use_raw_hit_ids:
hit_id = hit.blast_id
hit_desc = " >".join(
[f"{x} {y}" for x, y in zip(hit.id_all, hit.description_all)]
)
else:
hit_id = hit.id
hit_desc = hit.description + " >".join(
[
f"{x} {y}"
for x, y in zip(hit.id_all[1:], hit.description_all[1:])
]
)
opt_dict = {"Hit_id": hit_id, "Hit_def": hit_desc}
self._write_elem_block("Hit_", "hit", hit, opt_dict)
xml.startParent("Hit_hsps")
self._write_hsps(hit.hsps)
self.hit_counter += 1
xml.endParents(2)
def _write_hsps(self, hsps):
"""Write HSP objects (PRIVATE)."""
xml = self.xml
for num, hsp in enumerate(hsps):
xml.startParent("Hsp")
xml.simpleElement("Hsp_num", str(num + 1))
for elem, attr in _WRITE_MAPS["hsp"]:
elem = "Hsp_" + elem
try:
content = self._adjust_output(hsp, elem, attr)
# make sure any elements that is not present is optional
# in the DTD
except AttributeError:
if elem not in _DTD_OPT:
raise ValueError(
"Element %s (attribute %s) not found" % (elem, attr)
)
else:
xml.simpleElement(elem, str(content))
self.hsp_counter += 1
self.frag_counter += len(hsp.fragments)
xml.endParent()
def _adjust_output(self, hsp, elem, attr):
"""Adjust output to mimic native BLAST+ XML as much as possible (PRIVATE)."""
# adjust coordinates
if attr in (
"query_start",
"query_end",
"hit_start",
"hit_end",
"pattern_start",
"pattern_end",
):
content = getattr(hsp, attr) + 1
if "_start" in attr:
content = getattr(hsp, attr) + 1
else:
content = getattr(hsp, attr)
# adjust for 'from' <--> 'to' flip if it's not a translated search
# and frames are different
# adapted from /src/algo/blast/format/blastxml_format.cpp#L216
if hsp.query_frame != 0 and hsp.hit_frame < 0:
if attr == "hit_start":
content = getattr(hsp, "hit_end")
elif attr == "hit_end":
content = getattr(hsp, "hit_start") + 1
# for seqrecord objects, we only need the sequence string
elif elem in ("Hsp_hseq", "Hsp_qseq"):
content = str(getattr(hsp, attr).seq)
elif elem == "Hsp_midline":
content = hsp.aln_annotation["similarity"]
elif elem in ("Hsp_evalue", "Hsp_bit-score"):
# adapted from src/algo/blast/format/blastxml_format.cpp#L138-140
content = "%.*g" % (6, getattr(hsp, attr))
else:
content = getattr(hsp, attr)
return content
# if not used as a module, run the doctest
if __name__ == "__main__":
from Bio._utils import run_doctest
run_doctest()
|
the-stack_106_18453
|
# main.py - makes it possible to run the program from terminal with
# python3 -m sudokusolver
# from sudokusolver import solver
# put solver.run() in main to run
# Run to solve the board
def solve():
rec_solve()
# Recusive function that solves the board
def rec_solve(row=0, col=0):
next_row, next_col = get_next_coord(row, col)
if board[row][col] == 0:
for i in range(1, 10): # Loop through all possible numbers
if is_valid(row, col, i):
board[row][col] = i
print_board(board)
if next_row == -1 and next_col == -1: # board completed
return True
if not rec_solve(next_row, next_col):
board[row][col] = 0
else:
return True
else:
return rec_solve(next_row, next_col)
# Get the next pposition to look at
def get_next_coord(row, col):
next_row = row
next_col = col + 1
if next_col == 9:
next_col = 0
next_row += 1
if next_row == 9:
return -1, -1
return next_row, next_col
# Check if the value n is valid on picked position
def is_valid(row, col, n):
return not (in_square(get_square(row, col), n) or (
in_row(row, n) or
in_col(col, n)))
# Get square number based on row and col
def get_square(row, col):
return (row // 3) * 3 + col // 3
# Check if value exists in square
def in_square(square, n):
row = square // 3 * 3
col = square % 3 * 3
for i in range(row, row + 3):
for j in range(col, col + 3):
if board[i][j] == n:
return True
return False
# Check if value exists in row
def in_row(row, n):
return n in board[row]
# Check if value exists in col
def in_col(col, n):
for row in board:
if row[col] == n:
return True
return False
# Print out the game board in its current state
def print_board(board):
print("####")
for row in board:
print(row)
print("####")
board_1 = [
[0, 0, 0, 2, 6, 0, 7, 0, 1],
[6, 8, 0, 0, 7, 0, 0, 9, 0],
[1, 9, 0, 0, 0, 4, 5, 0, 0],
[8, 2, 0, 1, 0, 0, 0, 4, 0],
[0, 0, 4, 6, 0, 2, 9, 0, 0],
[0, 5, 0, 0, 0, 3, 0, 2, 8],
[0, 0, 9, 3, 0, 0, 0, 7, 4],
[0, 4, 0, 0, 5, 0, 0, 3, 6],
[7, 0, 3, 0, 1, 8, 0, 0, 0]
]
board_2 = [
[0, 0, 0, 2, 6, 0, 0, 0, 1],
[0, 0, 0, 0, 7, 0, 0, 0, 0],
[0, 9, 0, 0, 0, 4, 5, 0, 0],
[0, 2, 0, 1, 0, 0, 0, 4, 0],
[0, 0, 4, 0, 0, 2, 0, 0, 0],
[0, 0, 0, 0, 0, 3, 0, 2, 8],
[0, 0, 0, 3, 0, 0, 0, 0, 4],
[0, 4, 0, 0, 0, 0, 0, 3, 0],
[7, 0, 3, 0, 1, 0, 0, 0, 0]
]
if __name__ == '__main__':
# Solve the sudoku
board = board_1
solve()
print("\n Solution")
print_board(board)
|
the-stack_106_18455
|
import random
# Tool Commands
def UserInput () :
UserIn = int(input("Enter your 4 digit guess: "))
return UserIn
def ListToString(s):
str1 = " "
return (str1.join(s))
# Inialize Variables
MasterCode = list(str(random.randrange(1000,9999)))
x = 1
# Inital User Interface
print("You have 10 guesses to find the correct 4 digit number.")
print("A 'Y' indicates that the digit is correct, a 'N' indicates that the digit is incorrect")
print("-------------------------------------------------------")
# Command Loop with 10 cycles
while x < 11:
# Empty list to store output correct/incorrect answers
Answer = []
# Prompting the user for a guess
UserGuess = list(str(UserInput()))
if (len(UserGuess)) != 4:
print("Your guess must be exactly 4 digits")
pass
else:
# Loop to check if the answer is correct and adding that to the Answer list
for i in range(0,4):
if (UserGuess[i] == MasterCode[i]):
Answer.append("Y")
else:
Answer.append("N")
print(ListToString(Answer))
print("You now have", 10-x, "guesses left.")
print("-----------------------------------")
# Checking Win condition
if (ListToString(Answer).replace(" ","") == "YYYY"):
print("You Win!!")
break
else:
x+=1
if (x == 10):
print("You Loose!")
break
|
the-stack_106_18457
|
'''
实验名称:2.4寸LCD液晶显示屏
版本:v1.0
日期:2021.9
作者:01Studio
实验平台:pyWiFi-ESP32-S2P
说明:通过编程实现LCD的各种显示功能,包括填充、画点、线、矩形、圆形、显示英文、显示图片等。
'''
#导入相关模块
from tftlcd import LCD24
import time
#定义常用颜色
RED = (255,0,0)
GREEN = (0,255,0)
BLUE = (0,0,255)
BLACK = (0,0,0)
WHITE = (255,255,255)
########################
# 构建2.4寸LCD对象并初始化
########################
d = LCD24(portrait=1) #默认方向竖屏
#填充白色
d.fill(WHITE)
#画点
d.drawPixel(5, 5, RED)
#画线段
d.drawLine(5, 10, 200, 10, RED)
#画矩形
d.drawRect(5, 30, 200, 40, RED, border=5)
#画圆
d.drawCircle(100, 120, 30, RED, border=5)
#写字符,4种尺寸
d.printStr('Hello 01Studio', 10, 200, RED, size=1)
d.printStr('Hello 01Studio', 10, 230, GREEN, size=2)
d.printStr('Hello 01Studio', 10, 270, BLUE, size=3)
time.sleep(5) #等待5秒
#显示图片
d.Picture(0,0,"/picture/1.jpg")
time.sleep(3)
d.Picture(0,0,"/picture/2.jpg")
time.sleep(3)
d.Picture(0,0,"/picture/01studio.jpg")
|
the-stack_106_18459
|
import torch
from configs.experiment_config import FNNConfig as experiment_config
from experiments.deep_experiment import DeepExperiment
# Regression task of one-dimensional data
class D_1_2_1_Experiment(DeepExperiment):
def __init__(self):
super(D_1_2_1_Experiment, self).__init__()
def before_test(self):
super(D_1_2_1_Experiment, self).before_test()
self.logger.info("=" * 10 + "test start" + "=" * 10)
for i, (data, gt) in enumerate(self.test_loader):
self.test_one_batch(data, gt, "float")
self.logger.info("=" * 10 + "testing end" + "=" * 10)
def test_one_batch(self, data, gt, data_type=None):
data = self.prepare_data(data, data_type)
gt = self.prepare_data(gt, data_type)
self.optimizer.zero_grad()
out = self.net_structure(data)
loss = self.loss_function(out, gt)
self.logger.info("One Batch:test_loss is {}".format(loss))
def train_one_epoch(self, epoch):
train_loss = 0
self.net_structure.train()
for sample, label in self.train_loader:
sample = self.prepare_data(sample, 'float')
label = self.prepare_data(label, 'float')
self.optimizer.zero_grad()
out = self.net_structure(sample)
loss = self.loss_function(out, label)
loss.backward()
self.optimizer.step()
train_loss += loss.data
train_loss = train_loss / len(self.train_loader)
self.logger.info("EPOCH:{}\t train_loss:{:.6f}\t".format(epoch, train_loss))
self.scheduler.step()
return train_loss
def valid_one_epoch(self, epoch):
self.net_structure.eval()
with torch.no_grad():
valid_loss = 0
for data, label in self.valid_loader:
data = self.prepare_data(data, 'float')
label = self.prepare_data(label, 'float')
self.net_structure.zero_grad()
predict = self.net_structure(data)
valid_loss += self.loss_function(predict, label)
valid_loss /= len(self.valid_loader)
self.logger.info("Epoch:{}\t valid_loss:{:.6f}".format(epoch, valid_loss))
return valid_loss
|
the-stack_106_18460
|
import itertools
import multiprocessing
import runpy
import sys
from os import path as osp
import pytest
def run_main(*args):
# patch sys.args
sys.argv = list(args)
target = args[0]
# run_path has one difference with invoking Python from command-line:
# if the target is a file (rather than a directory), it does not add its
# parent directory to sys.path. Thus, importing other modules from the
# same directory is broken unless sys.path is patched here.
if osp.isfile(target):
sys.path.insert(0, osp.dirname(target))
runpy.run_path(target, run_name="__main__")
def powerset(iterable):
s = list(iterable)
return itertools.chain.from_iterable(
itertools.combinations(s, r) for r in range(len(s) + 1)
)
def run_main_subproc(args):
# This test needs to be done in its own process as there is a potentially for
# an OpenGL context clash otherwise
mp_ctx = multiprocessing.get_context("spawn")
proc = mp_ctx.Process(target=run_main, args=args)
proc.start()
proc.join()
assert proc.exitcode == 0
@pytest.mark.gfxtest
@pytest.mark.skipif(
not osp.exists("data/scene_datasets/habitat-test-scenes/skokloster-castle.glb")
or not osp.exists("data/scene_datasets/habitat-test-scenes/van-gogh-room.glb"),
reason="Requires the habitat-test-scenes",
)
@pytest.mark.parametrize(
"args",
[
("examples/tutorials/stereo_agent.py", "--no-display"),
("examples/tutorials/lighting_tutorial.py", "--no-show-images"),
("examples/tutorials/new_actions.py",),
(
"examples/tutorials/nb_python/rigid_object_tutorial.py",
"--no-show-video",
"--no-make-video",
),
(
"examples/tutorials/nb_python/ECCV_2020_Navigation.py",
"--no-make-video",
"--no-display",
),
(
"examples/tutorials/nb_python/ECCV_2020_Interactivity.py",
"--no-make-video",
"--no-display",
),
("examples/tutorials/semantic_id_tutorial.py", "--no-show-images"),
],
)
def test_example_modules(args):
run_main_subproc(args)
@pytest.mark.gfxtest
@pytest.mark.skipif(
not osp.exists("data/scene_datasets/habitat-test-scenes/skokloster-castle.glb"),
reason="Requires the habitat-test-scenes",
)
@pytest.mark.parametrize(
"args",
[
["examples/example.py"] + list(p)
for p in powerset(
[
"--compute_shortest_path",
"--compute_action_shortest_path",
"--enable_physics",
"--semantic_sensor",
"--depth_sensor",
"--recompute_navmesh",
]
)
if not (("--compute_action_shortest_path" in p) and ("--enable_physics" in p))
],
)
def test_example_script(args):
run_main_subproc(args)
|
the-stack_106_18465
|
import abc
import os
import pygame
class Plot(abc.ABC):
def draw(self, game:"Game", surface:"pygame.Surface", plot_dir:str,
update:bool) -> None:
# Make the temporary image file path
file_path = os.path.join(plot_dir, f"{self.__class__.__name__}.png")
# Plot the data
if update:
self.plot(game, file_path, surface.get_height(),
surface.get_width())
# Draw the plot image
surface.blit(pygame.image.load(file_path), (0,0))
@abc.abstractmethod
def plot(self, game:"Game", file_path:str, height:int, width:int)\
-> None:
"""Plot the game information saving the plot to the given
file path
Parameters
----------
game: Game
The object that holds all information about the simulation.
file_path: str
The file path to save the plot to.
"""
...
|
the-stack_106_18466
|
import ee
import time
import sys
ee.Initialize()
STATUS = "Status: {}"
def wait_for_completion(task_descripsion, widget_alert):
"""Wait until the selected process is finished. Display some output information
Args:
task_descripsion (str) : name of the running task
widget_alert (v.Alert) : alert to display the output messages
"""
state = 'UNSUBMITTED'
while state != 'COMPLETED':
widget_alert.add_live_msg(STATUS.format(state))
time.sleep(5)
#search for the task in task_list
current_task = isTask(task_descripsion)
state = current_task.state
def isTask(task_descripsion):
"""Search for the described task in the user Task list return None if nothing is find
Args:
task_descripsion (str): the task descripsion
Returns
task (ee.Task) : return the found task else None
"""
tasks_list = ee.batch.Task.list()
current_task = None
for task in tasks_list:
if task.config['description'] == task_descripsion:
current_task = task
break
return current_task
|
the-stack_106_18467
|
pkgname = "pcre"
pkgver = "8.45"
pkgrel = 0
build_style = "gnu_configure"
configure_args = [
"--with-pic",
"--enable-utf8",
"--enable-unicode-properties",
"--enable-pcretest-libedit",
"--enable-pcregrep-libz",
"--enable-pcregrep-libbz2",
"--enable-newline-is-anycrlf",
"--enable-jit",
"--enable-static",
"--disable-stack-for-recursion",
]
hostmakedepends = ["pkgconf"]
makedepends = ["zlib-devel", "libbz2-devel", "libedit-devel"]
pkgdesc = "Perl Compatible Regular Expressions"
maintainer = "q66 <[email protected]>"
license = "BSD-3-Clause"
url = "http://www.pcre.org"
source = f"$(SOURCEFORGE_SITE)/{pkgname}/{pkgname}/{pkgver}/{pkgname}-{pkgver}.tar.bz2"
sha256 = "4dae6fdcd2bb0bb6c37b5f97c33c2be954da743985369cddac3546e3218bffb8"
options = ["!cross"]
def post_install(self):
self.install_license("LICENCE")
@subpackage("libpcrecpp")
def _libpcrecpp(self):
self.pkgdesc = f"{pkgdesc} (C++ shared libraries)"
return ["usr/lib/libpcrecpp.so.*"]
@subpackage("libpcre")
def _libpcre(self):
self.pkgdesc = f"{pkgdesc} (shared libraries)"
return self.default_libs()
@subpackage("pcre-devel")
def _devel(self):
self.depends += ["zlib-devel", "libbz2-devel"]
return self.default_devel(man = True, extra = ["usr/share/doc"])
|
the-stack_106_18468
|
import copy
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Sequence
from typing import Tuple
import warnings
import numpy as np
import optuna
from optuna import distributions
from optuna import samplers
from optuna._imports import try_import
from optuna._study_direction import StudyDirection
from optuna.exceptions import ExperimentalWarning
from optuna.samplers import BaseSampler
from optuna.study import Study
from optuna.trial import FrozenTrial
from optuna.trial import TrialState
with try_import() as _imports:
import skopt
from skopt.space import space
class SkoptSampler(BaseSampler):
"""Sampler using Scikit-Optimize as the backend.
Example:
Optimize a simple quadratic function by using :class:`~optuna.integration.SkoptSampler`.
.. testcode::
import optuna
def objective(trial):
x = trial.suggest_float("x", -10, 10)
y = trial.suggest_int("y", 0, 10)
return x ** 2 + y
sampler = optuna.integration.SkoptSampler()
study = optuna.create_study(sampler=sampler)
study.optimize(objective, n_trials=10)
Args:
independent_sampler:
A :class:`~optuna.samplers.BaseSampler` instance that is used for independent
sampling. The parameters not contained in the relative search space are sampled
by this sampler.
The search space for :class:`~optuna.integration.SkoptSampler` is determined by
:func:`~optuna.samplers.intersection_search_space()`.
If :obj:`None` is specified, :class:`~optuna.samplers.RandomSampler` is used
as the default.
.. seealso::
:class:`optuna.samplers` module provides built-in independent samplers
such as :class:`~optuna.samplers.RandomSampler` and
:class:`~optuna.samplers.TPESampler`.
warn_independent_sampling:
If this is :obj:`True`, a warning message is emitted when
the value of a parameter is sampled by using an independent sampler.
Note that the parameters of the first trial in a study are always sampled
via an independent sampler, so no warning messages are emitted in this case.
skopt_kwargs:
Keyword arguments passed to the constructor of
`skopt.Optimizer <https://scikit-optimize.github.io/#skopt.Optimizer>`_
class.
Note that ``dimensions`` argument in ``skopt_kwargs`` will be ignored
because it is added by :class:`~optuna.integration.SkoptSampler` automatically.
n_startup_trials:
The independent sampling is used until the given number of trials finish in the
same study.
consider_pruned_trials:
If this is :obj:`True`, the PRUNED trials are considered for sampling.
.. note::
Added in v2.0.0 as an experimental feature. The interface may change in newer
versions without prior notice. See
https://github.com/optuna/optuna/releases/tag/v2.0.0.
.. note::
As the number of trials :math:`n` increases, each sampling takes longer and longer
on a scale of :math:`O(n^3)`. And, if this is :obj:`True`, the number of trials
will increase. So, it is suggested to set this flag :obj:`False` when each
evaluation of the objective function is relatively faster than each sampling. On
the other hand, it is suggested to set this flag :obj:`True` when each evaluation
of the objective function is relatively slower than each sampling.
"""
def __init__(
self,
independent_sampler: Optional[BaseSampler] = None,
warn_independent_sampling: bool = True,
skopt_kwargs: Optional[Dict[str, Any]] = None,
n_startup_trials: int = 1,
*,
consider_pruned_trials: bool = False,
) -> None:
_imports.check()
self._skopt_kwargs = skopt_kwargs or {}
if "dimensions" in self._skopt_kwargs:
del self._skopt_kwargs["dimensions"]
self._independent_sampler = independent_sampler or samplers.RandomSampler()
self._warn_independent_sampling = warn_independent_sampling
self._n_startup_trials = n_startup_trials
self._search_space = samplers.IntersectionSearchSpace()
self._consider_pruned_trials = consider_pruned_trials
if self._consider_pruned_trials:
warnings.warn(
"`consider_pruned_trials` option is an experimental feature."
" The interface can change in the future.",
ExperimentalWarning,
)
def reseed_rng(self) -> None:
self._independent_sampler.reseed_rng()
def infer_relative_search_space(
self, study: Study, trial: FrozenTrial
) -> Dict[str, distributions.BaseDistribution]:
search_space = {}
for name, distribution in self._search_space.calculate(study).items():
if distribution.single():
if not isinstance(distribution, distributions.CategoricalDistribution):
# `skopt` cannot handle non-categorical distributions that contain just
# a single value, so we skip this distribution.
#
# Note that `Trial` takes care of this distribution during suggestion.
continue
search_space[name] = distribution
return search_space
def sample_relative(
self,
study: Study,
trial: FrozenTrial,
search_space: Dict[str, distributions.BaseDistribution],
) -> Dict[str, Any]:
self._raise_error_if_multi_objective(study)
if len(search_space) == 0:
return {}
complete_trials = self._get_trials(study)
if len(complete_trials) < self._n_startup_trials:
return {}
optimizer = _Optimizer(search_space, self._skopt_kwargs)
optimizer.tell(study, complete_trials)
return optimizer.ask()
def sample_independent(
self,
study: Study,
trial: FrozenTrial,
param_name: str,
param_distribution: distributions.BaseDistribution,
) -> Any:
self._raise_error_if_multi_objective(study)
if self._warn_independent_sampling:
complete_trials = self._get_trials(study)
if len(complete_trials) >= self._n_startup_trials:
self._log_independent_sampling(trial, param_name)
return self._independent_sampler.sample_independent(
study, trial, param_name, param_distribution
)
def _log_independent_sampling(self, trial: FrozenTrial, param_name: str) -> None:
logger = optuna.logging.get_logger(__name__)
logger.warning(
"The parameter '{}' in trial#{} is sampled independently "
"by using `{}` instead of `SkoptSampler` "
"(optimization performance may be degraded). "
"You can suppress this warning by setting `warn_independent_sampling` "
"to `False` in the constructor of `SkoptSampler`, "
"if this independent sampling is intended behavior.".format(
param_name, trial.number, self._independent_sampler.__class__.__name__
)
)
def _get_trials(self, study: Study) -> List[FrozenTrial]:
complete_trials = []
for t in study.get_trials(deepcopy=False):
if t.state == TrialState.COMPLETE:
complete_trials.append(t)
elif (
t.state == TrialState.PRUNED
and len(t.intermediate_values) > 0
and self._consider_pruned_trials
):
_, value = max(t.intermediate_values.items())
if value is None:
continue
# We rewrite the value of the trial `t` for sampling, so we need a deepcopy.
copied_t = copy.deepcopy(t)
copied_t.value = value
complete_trials.append(copied_t)
return complete_trials
def after_trial(
self,
study: Study,
trial: FrozenTrial,
state: TrialState,
values: Optional[Sequence[float]],
) -> None:
self._independent_sampler.after_trial(study, trial, state, values)
class _Optimizer(object):
def __init__(
self, search_space: Dict[str, distributions.BaseDistribution], skopt_kwargs: Dict[str, Any]
) -> None:
self._search_space = search_space
dimensions = []
for name, distribution in sorted(self._search_space.items()):
if isinstance(distribution, distributions.UniformDistribution):
# Convert the upper bound from exclusive (optuna) to inclusive (skopt).
high = np.nextafter(distribution.high, float("-inf"))
dimension = space.Real(distribution.low, high)
elif isinstance(distribution, distributions.LogUniformDistribution):
# Convert the upper bound from exclusive (optuna) to inclusive (skopt).
high = np.nextafter(distribution.high, float("-inf"))
dimension = space.Real(distribution.low, high, prior="log-uniform")
elif isinstance(distribution, distributions.IntUniformDistribution):
count = (distribution.high - distribution.low) // distribution.step
dimension = space.Integer(0, count)
elif isinstance(distribution, distributions.IntLogUniformDistribution):
low = distribution.low - 0.5
high = distribution.high + 0.5
dimension = space.Real(low, high, prior="log-uniform")
elif isinstance(distribution, distributions.DiscreteUniformDistribution):
count = int((distribution.high - distribution.low) // distribution.q)
dimension = space.Integer(0, count)
elif isinstance(distribution, distributions.CategoricalDistribution):
dimension = space.Categorical(distribution.choices)
else:
raise NotImplementedError(
"The distribution {} is not implemented.".format(distribution)
)
dimensions.append(dimension)
self._optimizer = skopt.Optimizer(dimensions, **skopt_kwargs)
def tell(self, study: Study, complete_trials: List[FrozenTrial]) -> None:
xs = []
ys = []
for trial in complete_trials:
if not self._is_compatible(trial):
continue
x, y = self._complete_trial_to_skopt_observation(study, trial)
xs.append(x)
ys.append(y)
self._optimizer.tell(xs, ys)
def ask(self) -> Dict[str, Any]:
params = {}
param_values = self._optimizer.ask()
for (name, distribution), value in zip(sorted(self._search_space.items()), param_values):
if isinstance(distribution, distributions.DiscreteUniformDistribution):
value = value * distribution.q + distribution.low
if isinstance(distribution, distributions.IntUniformDistribution):
value = value * distribution.step + distribution.low
if isinstance(distribution, distributions.IntLogUniformDistribution):
value = int(np.round(value))
value = min(max(value, distribution.low), distribution.high)
params[name] = value
return params
def _is_compatible(self, trial: FrozenTrial) -> bool:
# Thanks to `intersection_search_space()` function, in sequential optimization,
# the parameters of complete trials are always compatible with the search space.
#
# However, in distributed optimization, incompatible trials may complete on a worker
# just after an intersection search space is calculated on another worker.
for name, distribution in self._search_space.items():
if name not in trial.params:
return False
distributions.check_distribution_compatibility(distribution, trial.distributions[name])
param_value = trial.params[name]
param_internal_value = distribution.to_internal_repr(param_value)
if not distribution._contains(param_internal_value):
return False
return True
def _complete_trial_to_skopt_observation(
self, study: Study, trial: FrozenTrial
) -> Tuple[List[Any], float]:
param_values = []
for name, distribution in sorted(self._search_space.items()):
param_value = trial.params[name]
if isinstance(distribution, distributions.DiscreteUniformDistribution):
param_value = (param_value - distribution.low) // distribution.q
if isinstance(distribution, distributions.IntUniformDistribution):
param_value = (param_value - distribution.low) // distribution.step
param_values.append(param_value)
value = trial.value
assert value is not None
if study.direction == StudyDirection.MAXIMIZE:
value = -value
return param_values, value
|
the-stack_106_18469
|
import sys
from sqlalchemy import MetaData, Column, Table, Integer, String, Text, \
Numeric, CHAR, ForeignKey, INTEGER, Index, UniqueConstraint, \
TypeDecorator, CheckConstraint, text, PrimaryKeyConstraint, \
ForeignKeyConstraint, VARCHAR, DECIMAL, DateTime, BigInteger, BIGINT, \
SmallInteger
from sqlalchemy.types import NULLTYPE
from sqlalchemy.engine.reflection import Inspector
from alembic.operations import ops
from alembic import autogenerate
from alembic.migration import MigrationContext
from alembic.testing import TestBase
from alembic.testing import config
from alembic.testing import assert_raises_message
from alembic.testing.mock import Mock, patch
from alembic.testing import eq_, is_, is_not_
from alembic.util import CommandError
from ._autogen_fixtures import AutogenTest, AutogenFixtureTest
py3k = sys.version_info >= (3, )
class AutogenCrossSchemaTest(AutogenTest, TestBase):
__only_on__ = 'postgresql'
@classmethod
def _get_db_schema(cls):
m = MetaData()
Table('t1', m,
Column('x', Integer)
)
Table('t2', m,
Column('y', Integer),
schema=config.test_schema
)
Table('t6', m,
Column('u', Integer)
)
Table('t7', m,
Column('v', Integer),
schema=config.test_schema
)
return m
@classmethod
def _get_model_schema(cls):
m = MetaData()
Table('t3', m,
Column('q', Integer)
)
Table('t4', m,
Column('z', Integer),
schema=config.test_schema
)
Table('t6', m,
Column('u', Integer)
)
Table('t7', m,
Column('v', Integer),
schema=config.test_schema
)
return m
def test_default_schema_omitted_upgrade(self):
def include_object(obj, name, type_, reflected, compare_to):
if type_ == "table":
return name == "t3"
else:
return True
self._update_context(
object_filters=include_object,
include_schemas=True,
)
uo = ops.UpgradeOps(ops=[])
autogenerate._produce_net_changes(self.autogen_context, uo)
diffs = uo.as_diffs()
eq_(diffs[0][0], "add_table")
eq_(diffs[0][1].schema, None)
def test_alt_schema_included_upgrade(self):
def include_object(obj, name, type_, reflected, compare_to):
if type_ == "table":
return name == "t4"
else:
return True
self._update_context(
object_filters=include_object,
include_schemas=True,
)
uo = ops.UpgradeOps(ops=[])
autogenerate._produce_net_changes(self.autogen_context, uo)
diffs = uo.as_diffs()
eq_(diffs[0][0], "add_table")
eq_(diffs[0][1].schema, config.test_schema)
def test_default_schema_omitted_downgrade(self):
def include_object(obj, name, type_, reflected, compare_to):
if type_ == "table":
return name == "t1"
else:
return True
self._update_context(
object_filters=include_object,
include_schemas=True,
)
uo = ops.UpgradeOps(ops=[])
autogenerate._produce_net_changes(self.autogen_context, uo)
diffs = uo.as_diffs()
eq_(diffs[0][0], "remove_table")
eq_(diffs[0][1].schema, None)
def test_alt_schema_included_downgrade(self):
def include_object(obj, name, type_, reflected, compare_to):
if type_ == "table":
return name == "t2"
else:
return True
self._update_context(
object_filters=include_object,
include_schemas=True,
)
uo = ops.UpgradeOps(ops=[])
autogenerate._produce_net_changes(self.autogen_context, uo)
diffs = uo.as_diffs()
eq_(diffs[0][0], "remove_table")
eq_(diffs[0][1].schema, config.test_schema)
class AutogenDefaultSchemaTest(AutogenFixtureTest, TestBase):
__only_on__ = 'postgresql'
def test_uses_explcit_schema_in_default_one(self):
default_schema = self.bind.dialect.default_schema_name
m1 = MetaData()
m2 = MetaData()
Table('a', m1, Column('x', String(50)))
Table('a', m2, Column('x', String(50)), schema=default_schema)
diffs = self._fixture(m1, m2, include_schemas=True)
eq_(diffs, [])
def test_uses_explcit_schema_in_default_two(self):
default_schema = self.bind.dialect.default_schema_name
m1 = MetaData()
m2 = MetaData()
Table('a', m1, Column('x', String(50)))
Table('a', m2, Column('x', String(50)), schema=default_schema)
Table('a', m2, Column('y', String(50)), schema="test_schema")
diffs = self._fixture(m1, m2, include_schemas=True)
eq_(len(diffs), 1)
eq_(diffs[0][0], "add_table")
eq_(diffs[0][1].schema, "test_schema")
eq_(diffs[0][1].c.keys(), ['y'])
def test_uses_explcit_schema_in_default_three(self):
default_schema = self.bind.dialect.default_schema_name
m1 = MetaData()
m2 = MetaData()
Table('a', m1, Column('y', String(50)), schema="test_schema")
Table('a', m2, Column('x', String(50)), schema=default_schema)
Table('a', m2, Column('y', String(50)), schema="test_schema")
diffs = self._fixture(m1, m2, include_schemas=True)
eq_(len(diffs), 1)
eq_(diffs[0][0], "add_table")
eq_(diffs[0][1].schema, default_schema)
eq_(diffs[0][1].c.keys(), ['x'])
class AutogenDefaultSchemaIsNoneTest(AutogenFixtureTest, TestBase):
__only_on__ = 'sqlite'
def setUp(self):
super(AutogenDefaultSchemaIsNoneTest, self).setUp()
# prerequisite
eq_(self.bind.dialect.default_schema_name, None)
def test_no_default_schema(self):
m1 = MetaData()
m2 = MetaData()
Table('a', m1, Column('x', String(50)))
Table('a', m2, Column('x', String(50)))
def _include_object(obj, name, type_, reflected, compare_to):
if type_ == "table":
return name in 'a' and obj.schema != 'main'
else:
return True
diffs = self._fixture(
m1, m2, include_schemas=True,
object_filters=_include_object)
eq_(len(diffs), 0)
class ModelOne(object):
__requires__ = ('unique_constraint_reflection', )
schema = None
@classmethod
def _get_db_schema(cls):
schema = cls.schema
m = MetaData(schema=schema)
Table('user', m,
Column('id', Integer, primary_key=True),
Column('name', String(50)),
Column('a1', Text),
Column("pw", String(50)),
Index('pw_idx', 'pw')
)
Table('address', m,
Column('id', Integer, primary_key=True),
Column('email_address', String(100), nullable=False),
)
Table('order', m,
Column('order_id', Integer, primary_key=True),
Column("amount", Numeric(8, 2), nullable=False,
server_default=text("0")),
CheckConstraint('amount >= 0', name='ck_order_amount')
)
Table('extra', m,
Column("x", CHAR),
Column('uid', Integer, ForeignKey('user.id'))
)
return m
@classmethod
def _get_model_schema(cls):
schema = cls.schema
m = MetaData(schema=schema)
Table('user', m,
Column('id', Integer, primary_key=True),
Column('name', String(50), nullable=False),
Column('a1', Text, server_default="x")
)
Table('address', m,
Column('id', Integer, primary_key=True),
Column('email_address', String(100), nullable=False),
Column('street', String(50)),
UniqueConstraint('email_address', name="uq_email")
)
Table('order', m,
Column('order_id', Integer, primary_key=True),
Column('amount', Numeric(10, 2), nullable=True,
server_default=text("0")),
Column('user_id', Integer, ForeignKey('user.id')),
CheckConstraint('amount > -1', name='ck_order_amount'),
)
Table('item', m,
Column('id', Integer, primary_key=True),
Column('description', String(100)),
Column('order_id', Integer, ForeignKey('order.order_id')),
CheckConstraint('len(description) > 5')
)
return m
class AutogenerateDiffTest(ModelOne, AutogenTest, TestBase):
__only_on__ = 'sqlite'
def test_diffs(self):
"""test generation of diff rules"""
metadata = self.m2
uo = ops.UpgradeOps(ops=[])
ctx = self.autogen_context
autogenerate._produce_net_changes(
ctx, uo
)
diffs = uo.as_diffs()
eq_(
diffs[0],
('add_table', metadata.tables['item'])
)
eq_(diffs[1][0], 'remove_table')
eq_(diffs[1][1].name, "extra")
eq_(diffs[2][0], "add_column")
eq_(diffs[2][1], None)
eq_(diffs[2][2], "address")
eq_(diffs[2][3], metadata.tables['address'].c.street)
eq_(diffs[3][0], "add_constraint")
eq_(diffs[3][1].name, "uq_email")
eq_(diffs[4][0], "add_column")
eq_(diffs[4][1], None)
eq_(diffs[4][2], "order")
eq_(diffs[4][3], metadata.tables['order'].c.user_id)
eq_(diffs[5][0][0], "modify_type")
eq_(diffs[5][0][1], None)
eq_(diffs[5][0][2], "order")
eq_(diffs[5][0][3], "amount")
eq_(repr(diffs[5][0][5]), "NUMERIC(precision=8, scale=2)")
eq_(repr(diffs[5][0][6]), "Numeric(precision=10, scale=2)")
self._assert_fk_diff(
diffs[6], "add_fk",
"order", ["user_id"],
"user", ["id"]
)
eq_(diffs[7][0][0], "modify_default")
eq_(diffs[7][0][1], None)
eq_(diffs[7][0][2], "user")
eq_(diffs[7][0][3], "a1")
eq_(diffs[7][0][6].arg, "x")
eq_(diffs[8][0][0], 'modify_nullable')
eq_(diffs[8][0][5], True)
eq_(diffs[8][0][6], False)
eq_(diffs[9][0], 'remove_index')
eq_(diffs[9][1].name, 'pw_idx')
eq_(diffs[10][0], 'remove_column')
eq_(diffs[10][3].name, 'pw')
eq_(diffs[10][3].table.name, 'user')
assert isinstance(
diffs[10][3].type, String
)
def test_include_symbol(self):
diffs = []
def include_symbol(name, schema=None):
return name in ('address', 'order')
context = MigrationContext.configure(
connection=self.bind.connect(),
opts={
'compare_type': True,
'compare_server_default': True,
'target_metadata': self.m2,
'include_symbol': include_symbol,
}
)
diffs = autogenerate.compare_metadata(
context, context.opts['target_metadata'])
alter_cols = set([
d[2] for d in self._flatten_diffs(diffs)
if d[0].startswith('modify')
])
eq_(alter_cols, set(['order']))
def test_include_object(self):
def include_object(obj, name, type_, reflected, compare_to):
assert obj.name == name
if type_ == "table":
if reflected:
assert obj.metadata is not self.m2
else:
assert obj.metadata is self.m2
return name in ("address", "order", "user")
elif type_ == "column":
if reflected:
assert obj.table.metadata is not self.m2
else:
assert obj.table.metadata is self.m2
return name != "street"
else:
return True
context = MigrationContext.configure(
connection=self.bind.connect(),
opts={
'compare_type': True,
'compare_server_default': True,
'target_metadata': self.m2,
'include_object': include_object,
}
)
diffs = autogenerate.compare_metadata(
context, context.opts['target_metadata'])
alter_cols = set([
d[2] for d in self._flatten_diffs(diffs)
if d[0].startswith('modify')
]).union(
d[3].name for d in self._flatten_diffs(diffs)
if d[0] == 'add_column'
).union(
d[1].name for d in self._flatten_diffs(diffs)
if d[0] == 'add_table'
)
eq_(alter_cols, set(['user_id', 'order', 'user']))
def test_skip_null_type_comparison_reflected(self):
ac = ops.AlterColumnOp("sometable", "somecol")
autogenerate.compare._compare_type(
self.autogen_context, ac,
None, "sometable", "somecol",
Column("somecol", NULLTYPE),
Column("somecol", Integer()),
)
diff = ac.to_diff_tuple()
assert not diff
def test_skip_null_type_comparison_local(self):
ac = ops.AlterColumnOp("sometable", "somecol")
autogenerate.compare._compare_type(
self.autogen_context, ac,
None, "sometable", "somecol",
Column("somecol", Integer()),
Column("somecol", NULLTYPE),
)
diff = ac.to_diff_tuple()
assert not diff
def test_custom_type_compare(self):
class MyType(TypeDecorator):
impl = Integer
def compare_against_backend(self, dialect, conn_type):
return isinstance(conn_type, Integer)
ac = ops.AlterColumnOp("sometable", "somecol")
autogenerate.compare._compare_type(
self.autogen_context, ac,
None, "sometable", "somecol",
Column("somecol", INTEGER()),
Column("somecol", MyType()),
)
assert not ac.has_changes()
ac = ops.AlterColumnOp("sometable", "somecol")
autogenerate.compare._compare_type(
self.autogen_context, ac,
None, "sometable", "somecol",
Column("somecol", String()),
Column("somecol", MyType()),
)
diff = ac.to_diff_tuple()
eq_(
diff[0][0:4],
('modify_type', None, 'sometable', 'somecol')
)
def test_affinity_typedec(self):
class MyType(TypeDecorator):
impl = CHAR
def load_dialect_impl(self, dialect):
if dialect.name == 'sqlite':
return dialect.type_descriptor(Integer())
else:
return dialect.type_descriptor(CHAR(32))
uo = ops.AlterColumnOp('sometable', 'somecol')
autogenerate.compare._compare_type(
self.autogen_context, uo,
None, "sometable", "somecol",
Column("somecol", Integer, nullable=True),
Column("somecol", MyType())
)
assert not uo.has_changes()
def test_dont_barf_on_already_reflected(self):
from sqlalchemy.util import OrderedSet
inspector = Inspector.from_engine(self.bind)
uo = ops.UpgradeOps(ops=[])
autogenerate.compare._compare_tables(
OrderedSet([(None, 'extra'), (None, 'user')]),
OrderedSet(), inspector,
MetaData(), uo, self.autogen_context
)
eq_(
[(rec[0], rec[1].name) for rec in uo.as_diffs()],
[('remove_table', 'extra'), ('remove_table', 'user')]
)
class AutogenerateDiffTestWSchema(ModelOne, AutogenTest, TestBase):
__only_on__ = 'postgresql'
schema = "test_schema"
def test_diffs(self):
"""test generation of diff rules"""
metadata = self.m2
self._update_context(
include_schemas=True,
)
uo = ops.UpgradeOps(ops=[])
autogenerate._produce_net_changes(self.autogen_context, uo)
diffs = uo.as_diffs()
eq_(
diffs[0],
('add_table', metadata.tables['%s.item' % self.schema])
)
eq_(diffs[1][0], 'remove_table')
eq_(diffs[1][1].name, "extra")
eq_(diffs[2][0], "add_column")
eq_(diffs[2][1], self.schema)
eq_(diffs[2][2], "address")
eq_(diffs[2][3], metadata.tables['%s.address' % self.schema].c.street)
eq_(diffs[3][0], "add_constraint")
eq_(diffs[3][1].name, "uq_email")
eq_(diffs[4][0], "add_column")
eq_(diffs[4][1], self.schema)
eq_(diffs[4][2], "order")
eq_(diffs[4][3], metadata.tables['%s.order' % self.schema].c.user_id)
eq_(diffs[5][0][0], "modify_type")
eq_(diffs[5][0][1], self.schema)
eq_(diffs[5][0][2], "order")
eq_(diffs[5][0][3], "amount")
eq_(repr(diffs[5][0][5]), "NUMERIC(precision=8, scale=2)")
eq_(repr(diffs[5][0][6]), "Numeric(precision=10, scale=2)")
self._assert_fk_diff(
diffs[6], "add_fk",
"order", ["user_id"],
"user", ["id"],
source_schema=config.test_schema
)
eq_(diffs[7][0][0], "modify_default")
eq_(diffs[7][0][1], self.schema)
eq_(diffs[7][0][2], "user")
eq_(diffs[7][0][3], "a1")
eq_(diffs[7][0][6].arg, "x")
eq_(diffs[8][0][0], 'modify_nullable')
eq_(diffs[8][0][5], True)
eq_(diffs[8][0][6], False)
eq_(diffs[9][0], 'remove_index')
eq_(diffs[9][1].name, 'pw_idx')
eq_(diffs[10][0], 'remove_column')
eq_(diffs[10][3].name, 'pw')
class CompareTypeSpecificityTest(TestBase):
def _fixture(self):
from alembic.ddl import impl
from sqlalchemy.engine import default
return impl.DefaultImpl(
default.DefaultDialect(), None, False, True, None, {})
def test_string(self):
t1 = String(30)
t2 = String(40)
t3 = VARCHAR(30)
t4 = Integer
impl = self._fixture()
is_(impl.compare_type(Column('x', t3), Column('x', t1)), False)
is_(impl.compare_type(Column('x', t3), Column('x', t2)), True)
is_(impl.compare_type(Column('x', t3), Column('x', t4)), True)
def test_numeric(self):
t1 = Numeric(10, 5)
t2 = Numeric(12, 5)
t3 = DECIMAL(10, 5)
t4 = DateTime
impl = self._fixture()
is_(impl.compare_type(Column('x', t3), Column('x', t1)), False)
is_(impl.compare_type(Column('x', t3), Column('x', t2)), True)
is_(impl.compare_type(Column('x', t3), Column('x', t4)), True)
def test_integer(self):
t1 = Integer()
t2 = SmallInteger()
t3 = BIGINT()
t4 = String()
t5 = INTEGER()
t6 = BigInteger()
impl = self._fixture()
is_(impl.compare_type(Column('x', t5), Column('x', t1)), False)
is_(impl.compare_type(Column('x', t3), Column('x', t1)), True)
is_(impl.compare_type(Column('x', t3), Column('x', t6)), False)
is_(impl.compare_type(Column('x', t3), Column('x', t2)), True)
is_(impl.compare_type(Column('x', t5), Column('x', t2)), True)
is_(impl.compare_type(Column('x', t1), Column('x', t4)), True)
def test_datetime(self):
t1 = DateTime()
t2 = DateTime(timezone=False)
t3 = DateTime(timezone=True)
impl = self._fixture()
is_(impl.compare_type(Column('x', t1), Column('x', t2)), False)
is_(impl.compare_type(Column('x', t1), Column('x', t3)), True)
is_(impl.compare_type(Column('x', t2), Column('x', t3)), True)
class AutogenerateCustomCompareTypeTest(AutogenTest, TestBase):
__only_on__ = 'sqlite'
@classmethod
def _get_db_schema(cls):
m = MetaData()
Table('sometable', m,
Column('id', Integer, primary_key=True),
Column('value', Integer))
return m
@classmethod
def _get_model_schema(cls):
m = MetaData()
Table('sometable', m,
Column('id', Integer, primary_key=True),
Column('value', String))
return m
def test_uses_custom_compare_type_function(self):
my_compare_type = Mock()
self.context._user_compare_type = my_compare_type
uo = ops.UpgradeOps(ops=[])
ctx = self.autogen_context
autogenerate._produce_net_changes(ctx, uo)
first_table = self.m2.tables['sometable']
first_column = first_table.columns['id']
eq_(len(my_compare_type.mock_calls), 2)
# We'll just test the first call
_, args, _ = my_compare_type.mock_calls[0]
(context, inspected_column, metadata_column,
inspected_type, metadata_type) = args
eq_(context, self.context)
eq_(metadata_column, first_column)
eq_(metadata_type, first_column.type)
eq_(inspected_column.name, first_column.name)
eq_(type(inspected_type), INTEGER)
def test_column_type_not_modified_custom_compare_type_returns_False(self):
my_compare_type = Mock()
my_compare_type.return_value = False
self.context._user_compare_type = my_compare_type
diffs = []
ctx = self.autogen_context
diffs = []
autogenerate._produce_net_changes(ctx, diffs)
eq_(diffs, [])
def test_column_type_modified_custom_compare_type_returns_True(self):
my_compare_type = Mock()
my_compare_type.return_value = True
self.context._user_compare_type = my_compare_type
ctx = self.autogen_context
uo = ops.UpgradeOps(ops=[])
autogenerate._produce_net_changes(ctx, uo)
diffs = uo.as_diffs()
eq_(diffs[0][0][0], 'modify_type')
eq_(diffs[1][0][0], 'modify_type')
class PKConstraintUpgradesIgnoresNullableTest(AutogenTest, TestBase):
__backend__ = True
# test workaround for SQLAlchemy issue #3023, alembic issue #199
@classmethod
def _get_db_schema(cls):
m = MetaData()
Table(
'person_to_role', m,
Column('person_id', Integer, autoincrement=False),
Column('role_id', Integer, autoincrement=False),
PrimaryKeyConstraint('person_id', 'role_id')
)
return m
@classmethod
def _get_model_schema(cls):
return cls._get_db_schema()
def test_no_change(self):
diffs = []
ctx = self.autogen_context
autogenerate._produce_net_changes(ctx, diffs)
eq_(diffs, [])
class AutogenKeyTest(AutogenTest, TestBase):
__only_on__ = 'sqlite'
@classmethod
def _get_db_schema(cls):
m = MetaData()
Table('someothertable', m,
Column('id', Integer, primary_key=True),
Column('value', Integer, key="somekey"),
)
return m
@classmethod
def _get_model_schema(cls):
m = MetaData()
Table('sometable', m,
Column('id', Integer, primary_key=True),
Column('value', Integer, key="someotherkey"),
)
Table('someothertable', m,
Column('id', Integer, primary_key=True),
Column('value', Integer, key="somekey"),
Column("othervalue", Integer, key="otherkey")
)
return m
symbols = ['someothertable', 'sometable']
def test_autogen(self):
uo = ops.UpgradeOps(ops=[])
ctx = self.autogen_context
autogenerate._produce_net_changes(ctx, uo)
diffs = uo.as_diffs()
eq_(diffs[0][0], "add_table")
eq_(diffs[0][1].name, "sometable")
eq_(diffs[1][0], "add_column")
eq_(diffs[1][3].key, "otherkey")
class AutogenVersionTableTest(AutogenTest, TestBase):
__only_on__ = 'sqlite'
version_table_name = 'alembic_version'
version_table_schema = None
@classmethod
def _get_db_schema(cls):
m = MetaData()
Table(
cls.version_table_name, m,
Column('x', Integer), schema=cls.version_table_schema)
return m
@classmethod
def _get_model_schema(cls):
m = MetaData()
return m
def test_no_version_table(self):
diffs = []
ctx = self.autogen_context
autogenerate._produce_net_changes(ctx, diffs)
eq_(diffs, [])
def test_version_table_in_target(self):
diffs = []
Table(
self.version_table_name,
self.m2, Column('x', Integer), schema=self.version_table_schema)
ctx = self.autogen_context
autogenerate._produce_net_changes(ctx, diffs)
eq_(diffs, [])
class AutogenCustomVersionTableSchemaTest(AutogenVersionTableTest):
__only_on__ = 'postgresql'
version_table_schema = 'test_schema'
configure_opts = {'version_table_schema': 'test_schema'}
class AutogenCustomVersionTableTest(AutogenVersionTableTest):
version_table_name = 'my_version_table'
configure_opts = {'version_table': 'my_version_table'}
class AutogenCustomVersionTableAndSchemaTest(AutogenVersionTableTest):
__only_on__ = 'postgresql'
version_table_name = 'my_version_table'
version_table_schema = 'test_schema'
configure_opts = {
'version_table': 'my_version_table',
'version_table_schema': 'test_schema'}
class AutogenerateDiffOrderTest(AutogenTest, TestBase):
__only_on__ = 'sqlite'
@classmethod
def _get_db_schema(cls):
return MetaData()
@classmethod
def _get_model_schema(cls):
m = MetaData()
Table('parent', m,
Column('id', Integer, primary_key=True)
)
Table('child', m,
Column('parent_id', Integer, ForeignKey('parent.id')),
)
return m
def test_diffs_order(self):
"""
Added in order to test that child tables(tables with FKs) are generated
before their parent tables
"""
ctx = self.autogen_context
uo = ops.UpgradeOps(ops=[])
autogenerate._produce_net_changes(ctx, uo)
diffs = uo.as_diffs()
eq_(diffs[0][0], 'add_table')
eq_(diffs[0][1].name, "parent")
eq_(diffs[1][0], 'add_table')
eq_(diffs[1][1].name, "child")
class CompareMetadataTest(ModelOne, AutogenTest, TestBase):
__only_on__ = 'sqlite'
def test_compare_metadata(self):
metadata = self.m2
diffs = autogenerate.compare_metadata(self.context, metadata)
eq_(
diffs[0],
('add_table', metadata.tables['item'])
)
eq_(diffs[1][0], 'remove_table')
eq_(diffs[1][1].name, "extra")
eq_(diffs[2][0], "add_column")
eq_(diffs[2][1], None)
eq_(diffs[2][2], "address")
eq_(diffs[2][3], metadata.tables['address'].c.street)
eq_(diffs[3][0], "add_constraint")
eq_(diffs[3][1].name, "uq_email")
eq_(diffs[4][0], "add_column")
eq_(diffs[4][1], None)
eq_(diffs[4][2], "order")
eq_(diffs[4][3], metadata.tables['order'].c.user_id)
eq_(diffs[5][0][0], "modify_type")
eq_(diffs[5][0][1], None)
eq_(diffs[5][0][2], "order")
eq_(diffs[5][0][3], "amount")
eq_(repr(diffs[5][0][5]), "NUMERIC(precision=8, scale=2)")
eq_(repr(diffs[5][0][6]), "Numeric(precision=10, scale=2)")
self._assert_fk_diff(
diffs[6], "add_fk",
"order", ["user_id"],
"user", ["id"]
)
eq_(diffs[7][0][0], "modify_default")
eq_(diffs[7][0][1], None)
eq_(diffs[7][0][2], "user")
eq_(diffs[7][0][3], "a1")
eq_(diffs[7][0][6].arg, "x")
eq_(diffs[8][0][0], 'modify_nullable')
eq_(diffs[8][0][5], True)
eq_(diffs[8][0][6], False)
eq_(diffs[9][0], 'remove_index')
eq_(diffs[9][1].name, 'pw_idx')
eq_(diffs[10][0], 'remove_column')
eq_(diffs[10][3].name, 'pw')
def test_compare_metadata_include_object(self):
metadata = self.m2
def include_object(obj, name, type_, reflected, compare_to):
if type_ == "table":
return name in ("extra", "order")
elif type_ == "column":
return name != "amount"
else:
return True
context = MigrationContext.configure(
connection=self.bind.connect(),
opts={
'compare_type': True,
'compare_server_default': True,
'include_object': include_object,
}
)
diffs = autogenerate.compare_metadata(context, metadata)
eq_(diffs[0][0], 'remove_table')
eq_(diffs[0][1].name, "extra")
eq_(diffs[1][0], "add_column")
eq_(diffs[1][1], None)
eq_(diffs[1][2], "order")
eq_(diffs[1][3], metadata.tables['order'].c.user_id)
def test_compare_metadata_include_symbol(self):
metadata = self.m2
def include_symbol(table_name, schema_name):
return table_name in ('extra', 'order')
context = MigrationContext.configure(
connection=self.bind.connect(),
opts={
'compare_type': True,
'compare_server_default': True,
'include_symbol': include_symbol,
}
)
diffs = autogenerate.compare_metadata(context, metadata)
eq_(diffs[0][0], 'remove_table')
eq_(diffs[0][1].name, "extra")
eq_(diffs[1][0], "add_column")
eq_(diffs[1][1], None)
eq_(diffs[1][2], "order")
eq_(diffs[1][3], metadata.tables['order'].c.user_id)
eq_(diffs[2][0][0], "modify_type")
eq_(diffs[2][0][1], None)
eq_(diffs[2][0][2], "order")
eq_(diffs[2][0][3], "amount")
eq_(repr(diffs[2][0][5]), "NUMERIC(precision=8, scale=2)")
eq_(repr(diffs[2][0][6]), "Numeric(precision=10, scale=2)")
eq_(diffs[2][1][0], 'modify_nullable')
eq_(diffs[2][1][2], 'order')
eq_(diffs[2][1][5], False)
eq_(diffs[2][1][6], True)
def test_compare_metadata_as_sql(self):
context = MigrationContext.configure(
connection=self.bind.connect(),
opts={'as_sql': True}
)
metadata = self.m2
assert_raises_message(
CommandError,
"autogenerate can't use as_sql=True as it prevents "
"querying the database for schema information",
autogenerate.compare_metadata, context, metadata
)
class PGCompareMetaData(ModelOne, AutogenTest, TestBase):
__only_on__ = 'postgresql'
schema = "test_schema"
def test_compare_metadata_schema(self):
metadata = self.m2
context = MigrationContext.configure(
connection=self.bind.connect(),
opts={
"include_schemas": True
}
)
diffs = autogenerate.compare_metadata(context, metadata)
eq_(
diffs[0],
('add_table', metadata.tables['test_schema.item'])
)
eq_(diffs[1][0], 'remove_table')
eq_(diffs[1][1].name, "extra")
eq_(diffs[2][0], "add_column")
eq_(diffs[2][1], "test_schema")
eq_(diffs[2][2], "address")
eq_(diffs[2][3], metadata.tables['test_schema.address'].c.street)
eq_(diffs[3][0], "add_constraint")
eq_(diffs[3][1].name, "uq_email")
eq_(diffs[4][0], "add_column")
eq_(diffs[4][1], "test_schema")
eq_(diffs[4][2], "order")
eq_(diffs[4][3], metadata.tables['test_schema.order'].c.user_id)
eq_(diffs[5][0][0], 'modify_nullable')
eq_(diffs[5][0][5], False)
eq_(diffs[5][0][6], True)
class OrigObjectTest(TestBase):
def setUp(self):
self.metadata = m = MetaData()
t = Table(
't', m,
Column('id', Integer(), primary_key=True),
Column('x', Integer())
)
self.ix = Index('ix1', t.c.id)
fk = ForeignKeyConstraint(['t_id'], ['t.id'])
q = Table(
'q', m,
Column('t_id', Integer()),
fk
)
self.table = t
self.fk = fk
self.ck = CheckConstraint(t.c.x > 5)
t.append_constraint(self.ck)
self.uq = UniqueConstraint(q.c.t_id)
self.pk = t.primary_key
def test_drop_fk(self):
fk = self.fk
op = ops.DropConstraintOp.from_constraint(fk)
is_(op.to_constraint(), fk)
is_(op.reverse().to_constraint(), fk)
def test_add_fk(self):
fk = self.fk
op = ops.AddConstraintOp.from_constraint(fk)
is_(op.to_constraint(), fk)
is_(op.reverse().to_constraint(), fk)
is_not_(None, op.to_constraint().table)
def test_add_check(self):
ck = self.ck
op = ops.AddConstraintOp.from_constraint(ck)
is_(op.to_constraint(), ck)
is_(op.reverse().to_constraint(), ck)
is_not_(None, op.to_constraint().table)
def test_drop_check(self):
ck = self.ck
op = ops.DropConstraintOp.from_constraint(ck)
is_(op.to_constraint(), ck)
is_(op.reverse().to_constraint(), ck)
is_not_(None, op.to_constraint().table)
def test_add_unique(self):
uq = self.uq
op = ops.AddConstraintOp.from_constraint(uq)
is_(op.to_constraint(), uq)
is_(op.reverse().to_constraint(), uq)
is_not_(None, op.to_constraint().table)
def test_drop_unique(self):
uq = self.uq
op = ops.DropConstraintOp.from_constraint(uq)
is_(op.to_constraint(), uq)
is_(op.reverse().to_constraint(), uq)
is_not_(None, op.to_constraint().table)
def test_add_pk_no_orig(self):
op = ops.CreatePrimaryKeyOp('pk1', 't', ['x', 'y'])
pk = op.to_constraint()
eq_(pk.name, 'pk1')
eq_(pk.table.name, 't')
def test_add_pk(self):
pk = self.pk
op = ops.AddConstraintOp.from_constraint(pk)
is_(op.to_constraint(), pk)
is_(op.reverse().to_constraint(), pk)
is_not_(None, op.to_constraint().table)
def test_drop_pk(self):
pk = self.pk
op = ops.DropConstraintOp.from_constraint(pk)
is_(op.to_constraint(), pk)
is_(op.reverse().to_constraint(), pk)
is_not_(None, op.to_constraint().table)
def test_drop_column(self):
t = self.table
op = ops.DropColumnOp.from_column_and_tablename(None, 't', t.c.x)
is_(op.to_column(), t.c.x)
is_(op.reverse().to_column(), t.c.x)
is_not_(None, op.to_column().table)
def test_add_column(self):
t = self.table
op = ops.AddColumnOp.from_column_and_tablename(None, 't', t.c.x)
is_(op.to_column(), t.c.x)
is_(op.reverse().to_column(), t.c.x)
is_not_(None, op.to_column().table)
def test_drop_table(self):
t = self.table
op = ops.DropTableOp.from_table(t)
is_(op.to_table(), t)
is_(op.reverse().to_table(), t)
is_(self.metadata, op.to_table().metadata)
def test_add_table(self):
t = self.table
op = ops.CreateTableOp.from_table(t)
is_(op.to_table(), t)
is_(op.reverse().to_table(), t)
is_(self.metadata, op.to_table().metadata)
def test_drop_index(self):
op = ops.DropIndexOp.from_index(self.ix)
is_(op.to_index(), self.ix)
is_(op.reverse().to_index(), self.ix)
def test_create_index(self):
op = ops.CreateIndexOp.from_index(self.ix)
is_(op.to_index(), self.ix)
is_(op.reverse().to_index(), self.ix)
|
the-stack_106_18470
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'ar'
import time
import shutil
import os
import math
import matplotlib.pyplot as plt
import skimage.io as skio
import skimage.transform as sktf
import skimage.exposure as skexp
import numpy as np
import keras
from keras.layers import Conv2D, UpSampling2D, \
Flatten, Activation, Reshape, MaxPooling2D, Input, merge
from keras.models import Model
import keras.losses
import keras.callbacks as kall
import pandas as pd
from keras.preprocessing.image import ImageDataGenerator
from keras.utils.vis_utils import plot_model as kplot
from keras.utils import np_utils
from keras.utils.vis_utils import plot_model
from run01_fcncls_channel_train_v2 import readDataImagesCls, prepareCervixAndChannelInfo, buildModelFCNNCLS_UpSampling2D_V3
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()
#####################################################
if __name__ == '__main__':
# (1) Setup Tran/Validation data
fidx = '/home/ar/@Kaggle/01_Intel_&_MobileODT_Cervical_Cancer_Screening/data/test/00_test_original-1024x1024-bordered/idx.txt'
# fidx = '/home/ar/@Kaggle/01_Intel_&_MobileODT_Cervical_Cancer_Screening/data/test/00_test_original-512x512-bordered/idx.txt'
fidxOut = '{0}-fcncls-v5.csv'.format(fidx)
wdirVal = os.path.dirname(fidx)
pathImgs = pd.read_csv(fidx)['path'].as_matrix()
pathImgs = np.array([os.path.join(wdirVal, '{0}'.format(xx)) for xx in pathImgs])
# (2) Input/Output models
sizeImg = 1024
# sizeImg = 512
pathModelRestart = '/home/ar/@Kaggle/01_Intel_&_MobileODT_Cervical_Cancer_Screening/data/train-original-1024x1024-bordered/models_test/model_fcncls_channel_V2_valAcc_v1.h5'
# pathModelRestart = '/home/ar/@Kaggle/01_Intel_&_MobileODT_Cervical_Cancer_Screening/data/train-original-512x512-bordered/model_fcncls_channel_V2_valLoss_v1.h5'
dataShape = (sizeImg, sizeImg, 3)
mskShape = (sizeImg, sizeImg)
numCls = 4
model = buildModelFCNNCLS_UpSampling2D_V3(inpShape=dataShape, numCls=numCls)
model.load_weights(pathModelRestart)
model.summary()
# (5) Preload data
numImg = len(pathImgs)
#
arrResults = np.zeros((numImg, numCls-1))
arrImgIdx = []
for ipath, path in enumerate(pathImgs):
arrImgIdx.append(os.path.basename(path))
fimgMasked = '{0}-msk.png-channel.png'.format(path)
timg4 = skio.imread(fimgMasked)
# tinf = prepareCervixAndChannelInfo(timg)
# dataBatch = preprocImgForInference(timg, tinf,batchSize=64, isRandomize=True)
timg3 = timg4[:,:,:3].astype(np.float32)/127.5 - 1.0
tmsk = timg4[:,:, 3]
ret = model.predict_on_batch(timg3.reshape([1] + list(timg3.shape)))[0]
retProb = ret.reshape((sizeImg, sizeImg, numCls))
retProbTypes = ret.copy()
# retProbTypes[:,0] = 0
retSortIdx = np.argsort(-retProbTypes, axis=1)[:,0].reshape(mskShape)
retSortIdx[tmsk<100]=0
# (1)
if np.sum(retSortIdx>0)>7:
retProbL = retProb.reshape([-1, numCls])
retSortIdxL = retSortIdx.reshape(-1)
tmpProbs = []
for xx in range(3):
tprobCls = np.sum(retProb[:,:,xx+1] * (retSortIdx==(xx+1)) )
tmpProbs.append(tprobCls)
tmpProbs = np.array(tmpProbs)
tmpProbs /= tmpProbs.sum()
tmpProbs[tmpProbs<0.1] = 0.1
tmpProbs /= tmpProbs.sum()
probCls = tmpProbs
else:
probCls = np.array([0.1688, 0.5273, 0.3038])
# (2)
# if np.sum(retSortIdx>0)>7:
# tmpProbs = np.array([float(np.sum(retSortIdx==(xx+1))) for xx in range(3)])
# tmpProbs /= tmpProbs.sum()
# tmpProbs[tmpProbs>0.9] = 0.9
# tmpProbs /= tmpProbs.sum()
# probCls = tmpProbs
# else:
# probCls = np.array([0.1688, 0.5273, 0.3038])
# plt.imshow(retSortIdx)
# plt.show()
#
# (3)
# tmskChn = (tmsk.reshape(-1)==128)
# probOnChn = ret.copy()
# probOnChn[~tmskChn] = 0
#
# if np.sum(probOnChn)>0.001:
# probCls = np.sum(probOnChn, axis=0)[1:]
# else:
# probCls = np.array([0.1688,0.5273,0.3038])
# probClsMean = probCls / np.sum(probCls)
# probClsSMax = softmax(probClsMean)
# arrResults[ipath] = probClsMean
arrResults[ipath] = probCls
print ('\t[{0}/{1}]'.format(ipath, numImg))
print ('---')
with open(fidxOut, 'w') as f:
f.write('image_name,Type_1,Type_2,Type_3\n')
for ii in range(len(arrImgIdx)):
fimgIdx = arrImgIdx[ii]
probs = arrResults[ii]
f.write('{0},{1:0.5f},{2:0.5f},{3:0.5f}\n'.format(fimgIdx, probs[0], probs[1], probs[2]))
print ('-')
|
the-stack_106_18471
|
"""
References:
* https://arxiv.org/abs/2104.13963
* https://github.com/facebookresearch/suncet/blob/master/src/losses.py
Majority of the code comes from here:
https://github.com/facebookresearch/suncet/blob/master/src/losses.py
"""
from . import config
from copy import deepcopy
import tensorflow as tf
def get_paws_loss(multicrop=6, tau=0.1, T=0.25, me_max=True):
"""
Computes PAWS loss
:param multicrop: number of small views
:param tau: cosine temperature
:param T: sharpening temperature
:param me_max: mean entropy maximization flag
:return: PAWS loss
"""
def sharpen(proba):
""" Target sharpening function """
sharp_p = proba ** (1.0 / T)
sharp_p /= tf.reduce_sum(sharp_p, axis=1, keepdims=True)
return sharp_p
def snn(query, supports, labels):
""" Soft Nearest Neighbours similarity classifier """
# Step 1: Normalize embeddings
query = tf.math.l2_normalize(query, axis=1)
supports = tf.math.l2_normalize(supports, axis=1)
# Step 2: Compute similarity
return tf.nn.softmax(query @ tf.transpose(supports) / tau, axis=1) @ labels
def loss(
anchor_views,
anchor_supports,
anchor_support_labels,
target_views,
target_supports,
target_support_labels,
sharpen=sharpen,
snn=snn,
):
# -- NOTE: num views of each unlabeled instance = 2+multicrop
# 2 global views and 6 local views
batch_size = len(anchor_views) // (2 + multicrop)
# Step 1: Compute anchor predictions
probs = snn(anchor_views, anchor_supports, anchor_support_labels)
# Step 2: Compute targets for anchor predictions
targets = tf.stop_gradient(
snn(target_views, target_supports, target_support_labels)
)
targets = tf.stop_gradient(sharpen(targets))
if multicrop > 0:
mc_target = tf.stop_gradient(
0.5 * (targets[:batch_size] + targets[batch_size:])
)
targets = tf.stop_gradient(
tf.concat([targets, *[mc_target for _ in range(multicrop)]], axis=0)
)
# For numerical stability
mask = tf.stop_gradient(tf.math.greater(targets, 1e-4))
mask = tf.stop_gradient(tf.cast(mask, dtype=targets.dtype))
targets *= tf.stop_gradient(mask)
# Step 3: compute cross-entropy loss H(targets, queries)
loss = tf.reduce_mean(tf.reduce_sum(tf.math.log(probs ** (-targets)), axis=1))
# Step 4: compute me-max regularizer
rloss = 0.0
if me_max:
avg_probs = tf.reduce_mean(sharpen(probs), axis=0)
rloss -= tf.reduce_sum(tf.math.log(avg_probs ** (-avg_probs)))
return loss, rloss
return loss
def get_suncet_loss(
num_classes=10, batch_size=64 * config.SUP_VIEWS, temperature=0.1, rank=0
):
"""
Computes supervised noise contrastive estimation loss (refer
https://arxiv.org/abs/2006.10803)
:param num_classes: number of image classes
:param batch_size: number of images per class per batch
:param temperature: cosine temperature
:param rank: denotes the rank of the current deivce (0 in this implementation)
:return: SUNCET loss
"""
local_images = batch_size * num_classes
total_images = deepcopy((local_images))
diag_mask = tf.Variable(tf.ones((local_images, total_images)))
offset = rank * local_images
for i in range(local_images):
diag_mask[i, offset + i].assign(0.0)
diag_mask = tf.convert_to_tensor(diag_mask)
def contrastive_loss(z, labels):
# Step 1: Normalize embeddings
z = tf.math.l2_normalize(z, axis=1)
# Step 2: Compute class predictions
exp_cs = tf.math.exp(z @ tf.transpose(z) / temperature) * diag_mask
exp_cs_sum = tf.reduce_sum(exp_cs, axis=1, keepdims=True)
probs = tf.math.divide(exp_cs, exp_cs_sum) @ labels
# Step 3: compute loss for predictions
targets = labels[offset : offset + local_images]
overlap = probs ** (-targets)
loss = tf.reduce_mean(tf.reduce_sum(tf.math.log(overlap), axis=1))
return loss
return contrastive_loss
|
the-stack_106_18472
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from xnas.search_algorithm.utils import Categorical
from xnas.core.utils import index_to_one_hot, one_hot_to_index
import copy
# SNG + DDPNAS
class MIGO:
"""
Stochastic Natural Gradient for Categorical Distribution
"""
def __init__(self, categories,
delta_init=1., step=3, pruning=True,
init_theta=None, max_mize=True, sample_with_prob=False,
utility_function='picewise', utility_function_hyper=0.5,
momentum=True, gamma=0.9, sampling_number_per_edge=1):
# Categorical distribution
self.p_model = Categorical(categories)
# valid dimension size
self.p_model.C = np.array(self.p_model.C)
self.valid_d = len(self.p_model.C[self.p_model.C > 1])
if init_theta is not None:
self.p_model.theta = init_theta
self.delta = delta_init
self.eps = self.delta
self.sample = []
self.objective = []
self.max_mize = -1 if max_mize else 1
# this is for dynamic distribution
self.sample_with_prob = sample_with_prob
self.ignore_index = []
self.sample_index = []
self.pruned_index = []
self.pruning = pruning
self.steps = step
self.current_step = 1
self.training_finish = False
self.utility_function = utility_function
self.utility_function_hyper = utility_function_hyper
self.Momentum = momentum
self.gamma = gamma
self.velocity = np.zeros(self.p_model.theta.shape)
self.init_record()
self.sampling_number_per_edge = sampling_number_per_edge
def init_record(self):
for i in range(self.p_model.d):
self.ignore_index.append([])
self.sample_index.append(list(range(self.p_model.Cmax)))
self.pruned_index.append([])
def get_delta(self):
return self.delta
def record_information(self, sample, objective):
self.sample.append(sample)
self.objective.append(objective*self.max_mize)
def sampling(self):
if self.sampling_number_per_edge == 1:
return index_to_one_hot(self.sampling_index(), self.p_model.Cmax)
else:
sample = []
sample_one_hot_like = np.zeros([self.p_model.d, self.p_model.Cmax])
for i in range(self.p_model.d):
# get the prob
if self.sample_with_prob:
prob = copy.deepcopy(self.p_model.theta[i, self.sample_index[i]])
prob = prob / prob.sum()
sample.append(np.random.choice(
self.sample_index[i], size=self.sampling_number_per_edge, p=prob, replace=False))
else:
sample.append(np.random.choice(self.sample_index[i],
size=self.sampling_number_per_edge, replace=False))
if len(self.sample_index[i]) > 0:
for j in sample[i]:
self.sample_index[i].remove(int(j))
for j in range(self.sampling_number_per_edge):
sample_one_hot_like[i, int(sample[i][j])] = 1
return sample_one_hot_like
def sampling_best(self):
sample = []
for i in range(self.p_model.d):
sample.append(np.argmax(self.p_model.theta[i]))
sample = np.array(sample)
return index_to_one_hot(sample, self.p_model.Cmax)
def sampling_index(self):
# fairness sampling
sample = []
for i in range(self.p_model.d):
# get the prob
if self.sample_with_prob:
prob = copy.deepcopy(self.p_model.theta[i, self.sample_index[i]])
prob = prob / prob.sum()
sample.append(int(np.random.choice(self.sample_index[i], p=prob)))
else:
sample.append(int(np.random.choice(self.sample_index[i])))
if len(self.sample_index[i]) > 0:
self.sample_index[i].remove(sample[i])
return np.array(sample)
def mle(self):
"""
Get most likely categorical variables (one-hot)
"""
m = self.p_model.theta.argmax(axis=1)
x = np.zeros((self.p_model.d, self.p_model.Cmax))
for i, c in enumerate(m):
x[i, c] = 1
return x
def update(self):
if len(self.sample_index[0]) < self.sampling_number_per_edge:
objective = np.array(self.objective)
sample_array = np.array(self.sample)
self.update_function(sample_array, objective)
self.sample = []
self.objective = []
self.current_step += 1
if self.pruning and self.current_step > self.steps:
# pruning the index
pruned_weight = copy.deepcopy(self.p_model.theta)
for index in range(self.p_model.d):
if not len(self.pruned_index[index]) == 0:
pruned_weight[index, self.pruned_index[index]] = np.nan
self.pruned_index[index].append(np.nanargmin(pruned_weight[index, :]))
if len(self.pruned_index[0]) >= (self.p_model.Cmax - self.sampling_number_per_edge):
self.training_finish = True
self.current_step = 1
self.update_sample_index()
def update_sample_index(self):
for i in range(self.p_model.d):
self.sample_index[i] = list(set(range(self.p_model.Cmax)) - set(self.pruned_index[i]))
def update_function(self, c_one, fxc, range_restriction=True):
aru, idx = self.utility(fxc)
if np.all(aru == 0):
# If all the points have the same f-value,
# nothing happens for theta and breaks.
# In this case, we skip the rest of the code.
return
ng = np.mean(aru[:, np.newaxis, np.newaxis] * (c_one[idx] - self.p_model.theta), axis=0)
# more readable
sl = []
for i, K in enumerate(self.p_model.C):
theta_i = self.p_model.theta[i, :]
s_i = 1. / np.sqrt(theta_i) * ng[i, :]
sl += list(s_i)
sl = np.array(sl)
pnorm = np.sqrt(np.dot(sl, sl)) + 1e-8
self.eps = self.delta / pnorm
if self.Momentum:
self.velocity = self.gamma * self.velocity + (1 - self.gamma) * self.eps * ng
self.p_model.theta += self.velocity
else:
self.p_model.theta += self.eps * ng
for i in range(self.p_model.d):
ci = self.p_model.C[i]
# Constraint for theta (minimum value of theta and sum of theta = 1.0)
theta_min = 1. / (self.valid_d * (ci - 1)) if range_restriction and ci > 1 else 0.
self.p_model.theta[i, :ci] = np.maximum(self.p_model.theta[i, :ci], theta_min)
theta_sum = self.p_model.theta[i, :ci].sum()
tmp = theta_sum - theta_min * ci
self.p_model.theta[i, :ci] -= (theta_sum - 1.) * (self.p_model.theta[i, :ci] - theta_min) / tmp
# Ensure the summation to 1
self.p_model.theta[i, :ci] /= self.p_model.theta[i, :ci].sum()
def utility(self, f, rho=0.25, negative=True):
"""
Ranking Based Utility Transformation
w(f(x)) / lambda =
1/mu if rank(x) <= mu
0 if mu < rank(x) < lambda - mu
-1/mu if lambda - mu <= rank(x)
where rank(x) is the number of at least equally good
points, including it self.
The number of good and bad points, mu, is ceil(lambda/4).
That is,
mu = 1 if lambda = 2
mu = 1 if lambda = 4
mu = 2 if lambda = 6, etc.
If there exist tie points, the utility values are
equally distributed for these points.a
"""
eps = 1e-14
idx = np.argsort(f)
lam = len(f)
mu = int(np.ceil(lam * rho))
if self.utility_function == 'picewise':
_w = np.zeros(lam)
_w[:mu] = 1 / mu
_w[lam - mu:] = -1 / mu if negative else 0
elif self.utility_function == 'log':
_w = np.zeros(lam)
_x = np.array([i+1 for i in range(lam)])
_x = (_x - np.mean(_x)) / np.max(_x) * 1.5
_w = np.flip(np.clip(self.utility_function_hyper * np.log((1+_x)/(1-_x)), a_min=-1, a_max=1))
elif self.utility_function == 'distance_log':
_w = np.zeros(lam)
_f = np.sort(f) - np.min(f)
_x = (_f / np.max(_f) - 0.5)
_w = np.flip(np.clip(self.utility_function_hyper * np.log((1 + _x) / (1 - _x)), a_min=-1, a_max=1))
else:
raise NotImplementedError
w = np.zeros(lam)
istart = 0
for i in range(f.shape[0] - 1):
if f[idx[i + 1]] - f[idx[i]] < eps * f[idx[i]]:
pass
elif istart < i:
w[istart:i + 1] = np.mean(_w[istart:i + 1])
istart = i + 1
else:
w[i] = _w[i]
istart = i + 1
w[istart:] = np.mean(_w[istart:])
return w, idx
def log_header(self, theta_log=False):
header_list = ['delta', 'eps', 'theta_converge']
if theta_log:
for i in range(self.p_model.d):
header_list += ['theta%d_%d' % (i, j) for j in range(self.C[i])]
return header_list
def log(self, theta_log=False):
log_list = [self.delta, self.eps, self.p_model.theta.max(axis=1).mean()]
if theta_log:
for i in range(self.p_model.d):
log_list += ['%f' % self.p_model.theta[i, j] for j in range(self.C[i])]
return log_list
def load_theta_from_log(self, theta_log):
self.p_model.theta = np.zeros((self.p_model.d, self.p_model.Cmax))
k = 0
for i in range(self.p_model.d):
for j in range(self.p_model.C[i]):
self.p_model.theta[i, j] = theta_log[k]
k += 1
|
the-stack_106_18473
|
# -*- coding: utf-8 -*-
import scrapy
class scrapykmbab08Spider(scrapy.Spider):
name = "scrapykmbab08"
allowed_domains = ["ganjoor.net"]
if 2 == 1:
start_urls = ["https://ganjoor.net/hojviri/kashfol-mahjoob/kmbab08/sh"]
else:
start_urls = ["https://ganjoor.net/hojviri/kashfol-mahjoob/kmbab08/sh" + "1"]
order = 1
def parse(self, response):
index = 0
sh = dict()
sh["type"] = "fasl"
sh["text"] = dict()
for i, poem in enumerate(response.css("div.poem>article>*")):
if index == 0:
if 0 == 1:
sh["title"] = "فصل" + " شماره " + str(self.order) + " - " + ''.join(poem.css("div.m1>p::text").extract()).strip()
elif 0 == 2:
sh["title"] = "فصل" + " شماره " + str(self.order) + " - " + ''.join(poem.css("div.m2>p::text").extract()).strip()
elif 0 == 3:
sh["title"] = "فصل" + " شماره " + str(self.order) + " - " + ''.join(response.css("div.poem>article>h2>a::text").extract()).strip() + ': ' + ''.join(poem.css("div.m1>p::text").extract()).strip()
elif 0 == 4:
sh["title"] = "فصل" + " شماره " + str(self.order) + " - " + ''.join(response.css("div.poem>article>h2>a::text").extract()).strip() + ': ' + ''.join(poem.css("div.m2>p::text").extract()).strip()
else:
sh["title"] = ''.join(response.css("div.poem>article>h2>a::text").extract_first()).strip()
if poem.css("p::text").extract_first() is None or 'rel="bookmark"' in poem.css('*').extract_first() or 'class="spacer"' in poem.css('*').extract_first() or '<div style=' in poem.css('*').extract_first():
continue
if len(poem.css("div.m1>p")) == 1:
if poem.css("div.b"):
if '٭٭٭' not in poem.css("div.m1>p::text").extract_first() and ''.join(poem.css("div.m1>p::text").extract()).strip() != '':
sh["text"][index] = dict([
("m1", ''.join(poem.css("div.m1>p::text").extract()).strip()),
("m2", ''.join(poem.css("div.m2>p::text").extract()).strip()),
])
else:
if '٭٭٭' not in poem.css("p:first-child::text").extract_first() and ''.join(poem.css("p:first-child::text").extract()).strip() != '':
sh["text"][index] = dict([
("t1", ''.join(poem.css("p:first-child::text").extract()).strip()),
("t2", ''.join(poem.css("p:last-child::text").extract()).strip()),
])
else:
if poem.css("div.b2"):
if '٭٭٭' not in poem.css("p:first-child::text").extract_first() and ''.join(poem.css("p:first-child::text").extract()).strip() != '':
sh["text"][index] = dict([
("t1", ''.join(poem.css("p:first-child::text").extract()).strip()),
("t2", ''.join(poem.css("p:last-child::text").extract()).strip()),
])
else:
if '٭٭٭' not in poem.css('p::text').extract_first() and ''.join(poem.css('p::text').extract()).strip() != '':
sh['text'][index] = dict([
('p', ''.join(poem.css('p::text').extract()).strip())
])
index = index + 1
sh["order"] = self.order
self.order = self.order + 1
yield sh
# next_page = response.css("div.navigation>div.navleft>a::attr(href)").extract_first()
if self.order < (2 + 1):
next_page = response.urljoin("https://ganjoor.net/hojviri/kashfol-mahjoob/kmbab08/sh" + str(self.order))
yield scrapy.Request(next_page, callback=self.parse)
|
the-stack_106_18475
|
import json
import sys
import asyncio
from typing import Dict
class DataController(object):
def __init__(self):
self.dataFile: str = "data.json"
self.settingsFile: str = "settings.json"
async def get_data(self) -> Dict[str, any]:
try:
with open(self.dataFile, 'r') as f:
return json.load(f)
except (FileNotFoundError, IOError):
print("\x1b[1;31m[ERROR]:\x1b[0m data.json file not found, create a new.")
jsonData: Dict[str, any] = {
"warns": {},
"ticket-channel-ids": [],
"ticket-id-counter": 0,
"users-with-active-tickets": []
}
with open(self.dataFile, 'w') as f:
json.dump(jsonData, f, ensure_ascii=True, indent=4)
return jsonData
async def get_settings(self) -> Dict[str, any]:
try:
with open(self.settingsFile, 'r') as f:
return json.load(f)
except (FileNotFoundError, IOError):
print("\x1b[1;31m[ERROR]:\x1b[0m settings.json not found, create a new with default data, please fill data with yours")
jsonSettings: Dict[str, any] = {
"warns":{
"ban-warns-reach": True,
"max-warns": 3,
"ban-time": 10800
},
"channels": {
"support-channel-id": "000000000000000000",
"support-category-id": "000000000000000000",
"support-log-channeld-id": "000000000000000000"
},
"support-role": "ADMIN",
"enable-channel-logger": True
}
with open(self.settingsFile, 'w+') as f:
json.dump(jsonSettings, f, ensure_ascii=True, indent=4)
return jsonSettings
async def save_data(self, data: dict) -> None:
with open(self.dataFile, 'w') as f:
json.dump(data, f, ensure_ascii=True, indent=4)
|
the-stack_106_18476
|
# from layout2 import Layout
# import layout2
from layout2 import Layout, UPPER_LEFT_CORNER, UPPER_RIGHT_CORNER,\
HORIZONTAL_BAR, VERTICAL_BAR, LOWER_LEFT_CORNER, LOWER_RIGHT_CORNER,\
RTL, RTLPOP
import curses
def test_layout_creation():
board = Layout('src/English.csv')
assert board
def test_layout_creation_missing_file():
try:
board = Layout('src/Klingon.csv')
assert not board
except FileNotFoundError:
assert True
else:
assert False
def test_layout_dump():
board = Layout('src/English.csv')
assert 'a' in board.dump_keyboard()
def test_position_calc_known():
board = Layout('src/English.csv')
row, col = board.calculate_position('1')
assert row == 1 and col == 4, f'row {row} != {1} and col {col} != {4}'
def test_position_calc_unknown():
board = Layout('src/English.csv')
row, col = board.calculate_position('ب')
assert row == -1 and col == -1, f'row {row} != {-1} and col {col} != {-1}'
def test_boxit():
board = Layout('src/English.csv')
result = board.boxit('a')
constructed = UPPER_LEFT_CORNER + HORIZONTAL_BAR +\
UPPER_RIGHT_CORNER + '\n' +\
VERTICAL_BAR + 'a' + VERTICAL_BAR + '\n' +\
LOWER_LEFT_CORNER + HORIZONTAL_BAR + LOWER_RIGHT_CORNER + '\n'
assert result == constructed
def test_boxit_RTL():
board = Layout('src/Farsi_RTL.csv')
result = board.boxit('ا')
constructed = UPPER_LEFT_CORNER + HORIZONTAL_BAR +\
UPPER_RIGHT_CORNER + '\n' +\
VERTICAL_BAR + f'{RTL}ا{RTLPOP}' + VERTICAL_BAR + '\n' +\
LOWER_LEFT_CORNER + HORIZONTAL_BAR + LOWER_RIGHT_CORNER + '\n'
assert result == constructed
def test_key_position_default():
board = Layout('src/English.csv')
board.screen_init()
board.show_keyboard(0, 0)
screen = board.screen_dump()
value = screen['1,4']
assert value == 49, f'Expected 49, got {value} instead.'
board.screen_deinit()
def test_key_hint_on():
board = Layout('src/English.csv')
board.screen_init()
board.show_keyboard(0, 0)
board.key_visibility('1')
screen = board.screen_dump()
value = screen['1,4']
expected = ord('1') | curses.A_STANDOUT
assert value == expected, f'Expected {expected}, got {value} instead.'
board.screen_deinit()
def test_key_hint_off():
board = Layout('src/English.csv')
board.screen_init()
board.show_keyboard(0, 0)
board.key_visibility('1')
board.key_visibility('1', 'OFF')
screen = board.screen_dump()
value = screen['1,4']
expected = ord('1')
assert value == expected, f'Expected {expected}, got {value} instead.'
board.screen_deinit()
|
the-stack_106_18477
|
__author__ = "saeedamen" # Saeed Amen
#
# Copyright 2016 Cuemacro
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on a "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
if __name__ == "__main__":
###### below line CRUCIAL when running Windows, otherwise multiprocessing
# doesn"t work! (not necessary on Linux)
from findatapy.util import SwimPool;
SwimPool()
from findatapy.timeseries import Filter, Calendar
import pandas as pd
calendar = Calendar()
filter = Filter()
# choose run_example = 0 for everything
# run_example = 1 - get holidays for FX, EUR and EURUSD, as well as
# listing weekends
# run_example = 2 - get FX delivery dates and FX option expiries for
# various tenors
# run_example = 3 - get number of days between pandas DatetimeIndex
# run_example = 4 - filter time series by EURUSD holidays
# run_example = 4 - option expiries for USDJPY
run_example = 0
if run_example == 1 or run_example == 0:
# Get the holidays (which aren"t weekends)
print(calendar.get_holidays(start_date="01 Jan 1999 00:50",
end_date="31 Dec 1999", cal="FX"))
print(calendar.get_holidays(start_date="01 Jan 2000 00:10",
end_date="31 Dec 2000", cal="EUR"))
print(calendar.get_holidays(start_date="01 Jan 2000 00:10",
end_date="31 Dec 2000", cal="EURUSD"))
# Get the holidays (which are weekends)
print(calendar.get_holidays(start_date="01 Jan 1999 00:50",
end_date="31 Dec 1999", cal="WKD"))
if run_example == 2 or run_example == 0:
# Get delivery dates for these horizon dates - typically would use
# to get forward maturities
print(calendar.get_delivery_date_from_horizon_date(
pd.to_datetime([pd.Timestamp("02 Nov 2020")]), "ON", cal="EURUSD"))
print(calendar.get_delivery_date_from_horizon_date(
pd.to_datetime([pd.Timestamp("02 Nov 2020")]), "1W", cal="EURUSD"))
print(calendar.get_delivery_date_from_horizon_date(
pd.to_datetime([pd.Timestamp("27 Nov 2020")]), "1W", cal="EURUSD"))
# Get 1M expires for these horizon dates - typically would use to get
# option expiries
print(calendar.get_expiry_date_from_horizon_date(
pd.to_datetime([pd.Timestamp("26 Oct 2020")]), "1M", cal="EURUSD"))
if run_example == 3 or run_example == 0:
# Create a list of business days and one which is + 1 day
bus_days = pd.bdate_range("1 Jan 2020", "30 Jan 2020")
bus_days_plus = bus_days + pd.Timedelta(days=7)
print(calendar.get_delta_between_dates(bus_days, bus_days_plus))
if run_example == 4 or run_example == 0:
# Filter a time series by EURUSD holidays
df = pd.DataFrame(index=pd.bdate_range("1 Jan 2020", "31 Dec 2020"))
df["Prices"] = 1
df_filtered = filter.filter_time_series_by_holidays(df, "EURUSD")
print(len(df.index))
print(len(df_filtered.index))
if run_example == 5 or run_example == 0:
# Get expiry for USDJPY
# Get 1M expires for these horizon dates - typically would use to get
# option expiries
print(calendar.get_expiry_date_from_horizon_date(
pd.to_datetime([pd.Timestamp("26 Nov 2008")]), "1M", cal="USDJPY"))
|
the-stack_106_18478
|
from __future__ import absolute_import
import logging
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from django import forms
from .jira import JIRAClient, JIRAError
log = logging.getLogger(__name__)
class JIRAFormUtils(object):
@staticmethod
def make_choices(x):
return [(y["id"], y["name"] if "name" in y else y["value"]) for y in x] if x else []
class JIRAOptionsForm(forms.Form):
instance_url = forms.CharField(
label=_("JIRA Instance URL"),
widget=forms.TextInput(attrs={'class': 'span6', 'placeholder': 'e.g. "https://jira.atlassian.com"'}),
help_text=_("It must be visible to the Sentry server"),
required=True
)
username = forms.CharField(
label=_("Username"),
widget=forms.TextInput(attrs={'class': 'span6'}),
help_text=_("Ensure the JIRA user has admin perm. on the project"),
required=True
)
password = forms.CharField(
label=_("Password"),
widget=forms.PasswordInput(attrs={'class': 'span6'}),
required=False
)
default_project = forms.ChoiceField(
label=_("Linked Project"),
)
ignored_fields = forms.CharField(
label=_("Ignored Fields"),
widget=forms.Textarea(attrs={'class': 'span11', 'placeholder': 'e.g. "components, security, customfield_10006"'}),
help_text=_("Comma-separated list of properties that you don't want to show in the form"),
required=False
)
default_priority = forms.ChoiceField(
label=_("Default Priority"),
required=False
)
default_issue_type = forms.ChoiceField(
label=_("Default Issue Type"),
required=False,
)
auto_create = forms.BooleanField(
label=_("Auto create JIRA tickets"),
help_text=_("Automatically create a JIRA ticket for EVERY new issue"),
required=False
)
def __init__(self, data=None, *args, **kwargs):
super(JIRAOptionsForm, self).__init__(data=data, *args, **kwargs)
initial = kwargs.get("initial") or {}
for key, value in self.data.items():
initial[key.lstrip(self.prefix or '')] = value
has_credentials = all(initial.get(k) for k in ('instance_url', 'username', 'password'))
project_safe = False
can_auto_create = False
# auto_create is not available on new configurations
has_auto_create = 'auto_create' in initial
if has_credentials:
jira = JIRAClient(initial['instance_url'], initial['username'], initial['password'])
try:
projects_response = jira.get_projects_list()
except JIRAError as e:
if e.status_code == 401:
has_credentials = False
else:
projects = projects_response.json
if projects:
project_choices = [(p.get('key'), "%s (%s)" % (p.get('name'), p.get('key'))) for p in projects]
project_safe = True
can_auto_create = True
self.fields["default_project"].choices = project_choices
if project_safe and has_auto_create:
try:
priorities_response = jira.get_priorities()
except JIRAError as e:
if e.status_code == 401:
has_credentials = False
else:
priorities = priorities_response.json
if priorities:
priority_choices = [(p.get('id'), "%s" % (p.get('name'))) for p in priorities]
self.fields["default_priority"].choices = priority_choices
default_project = initial.get('default_project')
if default_project:
try:
meta = jira.get_create_meta_for_project(default_project)
except JIRAError as e:
if e.status_code == 401:
has_credentials = False
can_auto_create = False
else:
if meta:
self.fields["default_issue_type"].choices = JIRAFormUtils.make_choices(meta["issuetypes"])
else:
can_auto_create = False
if not has_credentials:
self.fields['password'].required = True
else:
self.fields['password'].help_text = _("Only enter a new password if you wish to update the stored value")
if not project_safe:
del self.fields["default_project"]
del self.fields["default_issue_type"]
del self.fields["default_priority"]
del self.fields["ignored_fields"]
if not can_auto_create:
del self.fields["auto_create"]
def clean_password(self):
"""
Don't complain if the field is empty and a password is already stored,
no one wants to type a pw in each time they want to change it.
"""
pw = self.cleaned_data.get("password")
if pw:
return pw
else:
old_pw = self.initial.get("password")
if not old_pw:
raise ValidationError("A Password is Required")
return old_pw
def clean_instance_url(self):
"""
Strip forward slashes off any url passed through the form.
"""
url = self.cleaned_data.get("instance_url")
if url and url[-1:] == "/":
return url[:-1]
else:
return url
def clean_auto_create(self):
cd = self.cleaned_data
if not cd.get('auto_create'):
return False
if not (cd.get('default_priority') and cd.get('default_issue_type')):
raise ValidationError("Default priority and issue type must be configured.")
return cd['auto_create']
def clean(self):
"""
try and build a JIRAClient and make a random call to make sure the
configuration is right.
"""
cd = self.cleaned_data
missing_fields = False
if not cd.get("instance_url"):
self.errors["instance_url"] = ["Instance URL is required"]
missing_fields = True
if not cd.get("username"):
self.errors["username"] = ["Username is required"]
missing_fields = True
if missing_fields:
raise ValidationError("Missing Fields")
if cd.get("password"):
jira = JIRAClient(cd["instance_url"], cd["username"], cd["password"])
try:
sut_response = jira.get_priorities()
except JIRAError as e:
if e.status_code == 403 or e.status_code == 401:
self.errors["username"] = ["Username might be incorrect"]
self.errors["password"] = ["Password might be incorrect"]
raise ValidationError("Unable to connect to JIRA: %s, if you have "
"tried and failed multiple times you may have"
" to enter a CAPTCHA in JIRA to re-enable API"
" logins." % e.status_code)
else:
logging.exception(e)
raise ValidationError("Unable to connect to JIRA: the remote "
"server returned an unhandled %s status "
" code" % e.status_code)
if not sut_response.json:
raise ValidationError("Unable to connect to JIRA: "
"the response did not contain valid JSON, did "
"you enter the correct instance URL?")
return cd
# A list of common builtin custom field types for JIRA for easy reference.
CUSTOM_FIELD_TYPES = {
"select": "com.atlassian.jira.plugin.system.customfieldtypes:select",
"textarea": "com.atlassian.jira.plugin.system.customfieldtypes:textarea",
"multiuserpicker": "com.atlassian.jira.plugin.system.customfieldtypes:multiuserpicker"
}
class JIRAIssueForm(forms.Form):
project = forms.CharField(widget=forms.HiddenInput())
issuetype = forms.ChoiceField(
label="Issue Type",
help_text="Changing the issue type will refresh the page with the required form fields.",
required=True
)
summary = forms.CharField(
label=_("Issue Summary"),
widget=forms.TextInput(attrs={'class': 'span6'})
)
description = forms.CharField(
widget=forms.Textarea(attrs={"class": 'span6'})
)
def __init__(self, *args, **kwargs):
self.ignored_fields = set((kwargs.pop("ignored_fields") or '').split(","))
initial = kwargs.get("initial")
jira_client = kwargs.pop("jira_client")
project_key = kwargs.pop("project_key")
priorities = jira_client.get_priorities().json
versions = jira_client.get_versions(project_key).json
# Returns the metadata the configured JIRA instance requires for
# creating issues for a given project.
# https://developer.atlassian.com/static/rest/jira/5.0.html#id200251
meta = jira_client.get_create_meta(project_key).json
# Early exit, somehow made it here without properly configuring the
# plugin.
if not meta or not priorities:
super(JIRAIssueForm, self).__init__(*args, **kwargs)
self.errors["__all__"] = [
"Error communicating with JIRA, Please check your configuration."]
return
# Early exit #2, no projects available.
if len(meta["projects"]) == 0:
super(JIRAIssueForm, self).__init__(*args, **kwargs)
self.errors["__all__"] = [
"Error in JIRA configuration, no projects found for user {}.".format(jira_client.username)
]
return
# Looking up the project meta by exact key, so it's always the first
# one.
project = meta["projects"][0]
issue_types = project["issuetypes"]
# check if the issuetype was passed as a GET parameter
self.issue_type = initial.get("issuetype")
if self.issue_type:
matching_type = [t for t in issue_types if t["id"] == self.issue_type]
self.issue_type = matching_type[0] if len(matching_type) > 0 else None
# still no issue type? just use the first one.
if not self.issue_type:
self.issue_type = issue_types[0]
# set back after we've played with the inital data
kwargs["initial"] = initial
# call the super to bind self.fields from the defaults.
super(JIRAIssueForm, self).__init__(*args, **kwargs)
self.fields["project"].initial = project["id"]
self.fields["issuetype"].choices = JIRAFormUtils.make_choices(issue_types)
# apply ordering to fields based on some known built-in JIRA fields.
# otherwise weird ordering occurs.
anti_gravity = {"priority": -150,
"fixVersions": -125,
"components": -100,
"security": -50}
dynamic_fields = self.issue_type.get("fields").keys()
dynamic_fields.sort(key=lambda f: anti_gravity.get(f) or 0)
# build up some dynamic fields based on required shit.
for field in dynamic_fields:
if field in self.fields.keys() or field in [x.strip() for x in self.ignored_fields]:
# don't overwrite the fixed fields for the form.
continue
mb_field = self.build_dynamic_field(self.issue_type["fields"][field])
if mb_field:
# apply field to form
self.fields[field] = mb_field
if "priority" in self.fields.keys():
# whenever priorities are available, put the available ones in the list.
# allowedValues for some reason doesn't pass enough info.
self.fields["priority"].choices = JIRAFormUtils.make_choices(priorities)
if "fixVersions" in self.fields.keys():
self.fields["fixVersions"].choices = JIRAFormUtils.make_choices(versions)
def clean_description(self):
"""
Turn code blocks that are in the stack trace into JIRA code blocks.
"""
desc = self.cleaned_data["description"]
return desc.replace("```", "{code}")
def clean(self):
"""
The form clean method needs to take advantage of the loaded issue type
fields and meta info so it can determine the format that the datatypes
should render as.
"""
very_clean = self.cleaned_data
# protect against mis-configured plugin submitting a form without an
# issuetype assigned.
if not very_clean.get("issuetype"):
raise ValidationError("Issue Type is required. Check your plugin configuration.")
fs = self.issue_type["fields"]
for field in fs.keys():
f = fs[field]
if field in ["description", "summary"]:
continue
if field in very_clean.keys():
v = very_clean.get(field)
if v:
schema = f["schema"]
if schema.get("type") == "string" and not schema.get("custom") == CUSTOM_FIELD_TYPES["select"]:
continue # noop
if schema["type"] == "user" or schema.get('items') == "user":
v = {"name": v}
elif schema.get("custom") == CUSTOM_FIELD_TYPES.get("multiuserpicker"):
# custom multi-picker
v = [{"name": v}]
elif schema["type"] == "array" and schema.get('items') != "string":
v = [{"id": vx} for vx in v]
elif schema["type"] == "array" and schema.get('items') == "string":
v = [v]
elif schema.get("custom") == CUSTOM_FIELD_TYPES.get("textarea"):
v = v
elif (schema.get("type") != "string"
or schema.get('items') != "string"
or schema.get("custom") == CUSTOM_FIELD_TYPES.get("select")):
v = {"id": v}
very_clean[field] = v
else:
# We don't want to pass blank data back to the API, so kill
# None values
very_clean.pop(field, None)
if not (isinstance(very_clean["issuetype"], dict)
and "id" in very_clean["issuetype"]):
# something fishy is going on with this field, working on some JIRA
# instances, and some not.
# testing against 5.1.5 and 5.1.4 does not convert (perhaps is no longer included
# in the projectmeta API call, and would normally be converted in the
# above clean method.)
very_clean["issuetype"] = {"id": very_clean["issuetype"]}
return very_clean
def build_dynamic_field(self, field_meta):
"""
Builds a field based on JIRA's meta field information
"""
schema = field_meta["schema"]
# set up some defaults for form fields
fieldtype = forms.CharField
fkwargs = {
'label': field_meta["name"],
'required': field_meta["required"],
'widget': forms.TextInput(attrs={'class': 'span6'})
}
# override defaults based on field configuration
if (schema["type"] in ["securitylevel", "priority"]
or schema.get("custom") == CUSTOM_FIELD_TYPES.get("select")):
fieldtype = forms.ChoiceField
fkwargs["choices"] = JIRAFormUtils.make_choices(field_meta.get('allowedValues'))
fkwargs["widget"] = forms.Select()
elif schema.get("items") == "user" or schema["type"] == "user":
fkwargs["widget"] = forms.TextInput(attrs={
'class': 'user-selector',
'data-autocomplete': field_meta.get("autoCompleteUrl")
})
elif schema["type"] in ["timetracking"]:
# TODO: Implement timetracking (currently unsupported alltogether)
return None
elif schema.get("items") in ["worklog", "attachment"]:
# TODO: Implement worklogs and attachments someday
return None
elif schema["type"] == "array" and schema["items"] != "string":
fieldtype = forms.MultipleChoiceField
fkwargs["choices"] = JIRAFormUtils.make_choices(field_meta.get("allowedValues"))
fkwargs["widget"] = forms.SelectMultiple()
# break this out, since multiple field types could additionally
# be configured to use a custom property instead of a default.
if schema.get("custom"):
if schema["custom"] == CUSTOM_FIELD_TYPES.get("textarea"):
fkwargs["widget"] = forms.Textarea(attrs={'class': 'span6'})
return fieldtype(**fkwargs)
|
the-stack_106_18479
|
# -*- coding: utf-8 -*-
"""
Functions for audio featurization.
"""
import os
import math
import logging
import numpy as np
import soundfile as sf
import librosa
SIGMA_EPS = 1e-12
def stft(frame, _sr, wind, _hop, nfft, synth=False, zphase=False):
if not zphase:
return np.fft.rfft(frame, n=nfft)
fsize = len(wind)
woff = (fsize - (fsize % 2)) // 2
zp = np.zeros(nfft - fsize)
return np.fft.rfft(np.concatenate((frame[woff:], zp, frame[:woff])))
def istft(frame, _sr, wind, nfft, zphase=False):
frame = np.fft.irfft(frame, nfft)
if zphase:
fsize = len(wind)
frame = np.roll(frame, (fsize - (fsize % 2)) // 2)[:fsize]
return frame
def onlineMVN_perframe(
frame_feature, frame_counter, mu, sigmasquare,
frameshift=0.01, tauFeat=3., tauFeatInit=0.1, t_init=0.1):
"""Online mean and variance normalization (per frequency)"""
n_init_frames = math.ceil(t_init / frameshift)
alpha_feat_init = math.exp(-frameshift / tauFeatInit)
alpha_feat = math.exp(-frameshift / tauFeat)
if frame_counter < n_init_frames:
alpha = alpha_feat_init
else:
alpha = alpha_feat
mu = alpha * mu + (1 - alpha) * frame_feature
sigmasquare = alpha * sigmasquare + (1 - alpha) * frame_feature**2
sigma = np.sqrt(np.maximum(sigmasquare - mu**2, SIGMA_EPS)) # limit for sqrt
norm_feature = (frame_feature - mu) / sigma
frame_counter += 1
return norm_feature, mu, sigmasquare, frame_counter
def magphasor(complexspec):
"""Decompose a complex spectrogram into magnitude and unit phasor.
m, p = magphasor(c) such that c == m * p.
"""
mspec = np.abs(complexspec)
pspec = np.empty_like(complexspec)
zero_mag = mspec == 0. # fix zero-magnitude
pspec[zero_mag] = 1.
pspec[~zero_mag] = complexspec[~zero_mag] / mspec[~zero_mag]
return mspec, pspec
def logpow(sig, floor=-30.):
"""Compute log power of complex spectrum.
Floor any -`np.inf` value to (nonzero minimum + `floor`) dB.
If all values are 0s, floor all values to -80 dB.
"""
log10e = np.log10(np.e)
pspec = sig.real**2 + sig.imag**2
zeros = pspec == 0
logp = np.empty_like(pspec)
if np.any(~zeros):
logp[~zeros] = np.log(pspec[~zeros])
logp[zeros] = np.log(pspec[~zeros].min()) + floor / 10 / log10e
else:
logp.fill(-80 / 10 / log10e)
return logp
def hamming(wsize, hop=None):
"Compute the Hamming window"
if hop is None:
return np.hamming(wsize)
# For perfect OLA reconstruction in time
if wsize % 2: # Fix endpoint problem for odd-size window
wind = np.hamming(wsize)
wind[0] /= 2.
wind[-1] /= 2.
else: # even-size window
wind = np.hamming(wsize + 1)
wind = wind[:-1]
assert tnorm(wind, hop), \
"[wsize:{}, hop:{}] violates COLA in time.".format(wsize, hop)
return wind
def tnorm(wind, hop):
amp = tcola(wind, hop)
if amp is None:
return False
wind /= amp
return True
def tcola(wind, _hop):
wsize = len(wind)
hsize = 160
buff = wind.copy() # holds OLA buffer and account for time=0
for wi in range(hsize, wsize, hsize): # window moving forward
wj = wi + wsize
buff[wi:] += wind[:wsize - wi]
for wj in range(wsize - hsize, 0, -hsize): # window moving backward
wi = wj - wsize
buff[:wj] += wind[wsize - wj:]
if np.allclose(buff, buff[0]):
return buff[0]
return None
def audioread(path, sr=None, start=0, stop=None, mono=True, norm=False):
path = os.path.abspath(path)
if not os.path.exists(path):
logging.error('File does not exist: %s', path)
raise ValueError("[{}] does not exist!".format(path))
try:
x, xsr = sf.read(path, start=start, stop=stop)
except RuntimeError: # fix for sph pcm-embedded shortened v2
logging.warning('Audio type not supported for file %s. Trying sph2pipe...', path)
if len(x.shape) == 1: # mono
if sr and xsr != sr:
print("Resampling to sampling rate:", sr)
x = librosa.resample(x, xsr, sr)
xsr = sr
if norm:
print("Normalization input data")
x /= np.max(np.abs(x))
return x, xsr
# multi-channel
x = x.T
if sr and xsr != sr:
x = librosa.resample(x, xsr, sr, axis=1)
xsr = sr
if mono:
x = x.sum(axis=0) / x.shape[0]
if norm:
for chan in range(x.shape[0]):
x[chan, :] /= np.max(np.abs(x[chan, :]))
return x, xsr
def audiowrite(data, sr, outpath, norm=False):
logging.debug("Writing to: %s", outpath)
if np.max(np.abs(data)) == 0: # in case all entries are 0s
logging.warning("All-zero output! Something is not quite right,"
" check your input audio clip and model.")
outpath = os.path.abspath(outpath)
outdir = os.path.dirname(outpath)
if not os.path.exists(outdir):
os.makedirs(outdir)
sf.write(outpath, data, sr)
|
the-stack_106_18480
|
#!/usr/bin/env python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import kfp.dsl as dsl
import kfp.compiler as compiler
import databricks
def create_cluster(cluster_name):
return databricks.CreateClusterOp(
name="createcluster",
cluster_name=cluster_name,
spark_version="5.3.x-scala2.11",
node_type_id="Standard_D3_v2",
spark_conf={
"spark.speculation": "true"
},
num_workers=2
)
def submit_run(run_name, cluster_id, parameter):
return databricks.SubmitRunOp(
name="submitrun",
run_name=run_name,
existing_cluster_id=cluster_id,
libraries=[{"jar": "dbfs:/docs/sparkpi.jar"}],
spark_jar_task={
"main_class_name": "org.apache.spark.examples.SparkPi",
"parameters": [parameter]
}
)
def delete_run(run_name):
return databricks.DeleteRunOp(
name="deleterun",
run_name=run_name
)
def delete_cluster(cluster_name):
return databricks.DeleteClusterOp(
name="deletecluster",
cluster_name=cluster_name
)
@dsl.pipeline(
name="DatabricksCluster",
description="A toy pipeline that computes an approximation to pi with Azure Databricks."
)
def pipeline_calc(cluster_name="test-cluster", run_name="test-run", parameter="10"):
create_cluster_task = create_cluster(cluster_name)
submit_run_task = submit_run(run_name, create_cluster_task.outputs["cluster_id"], parameter)
delete_run_task = delete_run(run_name)
delete_run_task.after(submit_run_task)
delete_cluster_task = delete_cluster(cluster_name)
delete_cluster_task.after(delete_run_task)
if __name__ == "__main__":
compiler.Compiler()._create_and_write_workflow(
pipeline_func=pipeline_calc,
package_path=__file__ + ".tar.gz")
|
the-stack_106_18481
|
#!/usr/bin/env python
from __future__ import print_function
import logging
import os
import signal
import sys
import uuid
import zipfile
import errno
from buck_logging import setup_logging
from buck_tool import ExecuteTarget, install_signal_handlers, \
BuckStatusReporter
from buck_project import BuckProject, NoBuckConfigFoundException
from tracing import Tracing
from subprocutils import propagate_failure
THIS_DIR = os.path.dirname(os.path.realpath(__file__))
# Kill all buck processes
def killall_buck(reporter):
# Linux or macOS
if os.name != 'posix':
message = 'killall is not implemented on: ' + os.name
logging.error(message)
reporter.status_message = message
return 10 # FATAL_GENERIC
for line in os.popen('jps -l'):
split = line.split()
if len(split) != 2:
raise Exception('cannot parse a line in jps -l outout: ' + repr(line))
pid = int(split[0])
name = split[1]
if name != 'com.facebook.buck.cli.bootstrapper.ClassLoaderBootstrapper':
continue
os.kill(pid, signal.SIGTERM)
return 0
def main(argv, reporter):
def get_repo(p):
# Try to detect if we're running a PEX by checking if we were invoked
# via a zip file.
if zipfile.is_zipfile(argv[0]):
from buck_package import BuckPackage
return BuckPackage(p, reporter)
else:
from buck_repo import BuckRepo
return BuckRepo(THIS_DIR, p, reporter)
# If 'killall' is the second argument, shut down all the buckd processes
if sys.argv[1:] == ['killall']:
return killall_buck(reporter)
install_signal_handlers()
try:
tracing_dir = None
build_id = str(uuid.uuid4())
reporter.build_id = build_id
with Tracing("main"):
with BuckProject.from_current_dir() as project:
tracing_dir = os.path.join(project.get_buck_out_log_dir(),
'traces')
with get_repo(project) as buck_repo:
# If 'kill' is the second argument, shut down the buckd
# process
if sys.argv[1:] == ['kill']:
buck_repo.kill_buckd()
return 0
return buck_repo.launch_buck(build_id)
finally:
if tracing_dir:
Tracing.write_to_dir(tracing_dir, build_id)
if __name__ == "__main__":
exit_code = 0
reporter = BuckStatusReporter(sys.argv)
fn_exec = None
exception = None
try:
setup_logging()
exit_code = main(sys.argv, reporter)
except ExecuteTarget as e:
# this is raised once 'buck run' has the binary
# it can get here only if exit_code of corresponding buck build is 0
fn_exec = e.execve
except NoBuckConfigFoundException:
exc_type, exception, exc_traceback = sys.exc_info()
# buck is started outside project root
exit_code = 3 # COMMANDLINE_ERROR
except IOError as e:
exc_type, exception, exc_traceback = sys.exc_info()
if e.errno == errno.ENOSPC:
exit_code = 14 # FATAL_DISK_FULL
elif e.errno == errno.EPIPE:
exit_code = 141 # SIGNAL_PIPE
else:
exit_code = 13 # FATAL_IO
except KeyboardInterrupt:
reporter.status_message = 'Python wrapper keyboard interrupt'
exit_code = 130 # SIGNAL_INTERRUPT
except Exception:
exc_type, exception, exc_traceback = sys.exc_info()
# 11 is fatal bootstrapper error
exit_code = 11
if exception is not None:
logging.error(exception, exc_info=(exc_type, exception, exc_traceback))
if reporter.status_message is None:
reporter.status_message = str(exception)
# report result of Buck call
try:
reporter.report(exit_code)
except Exception as e:
logging.debug(str(e))
# execute 'buck run' target
if fn_exec is not None:
fn_exec()
propagate_failure(exit_code)
|
the-stack_106_18484
|
from chainer_chemistry.dataset.preprocessors.common \
import construct_atomic_number_array
from chainer_chemistry.dataset.preprocessors.common import type_check_num_atoms
from chainer_chemistry.dataset.preprocessors.mol_preprocessor import MolPreprocessor
class AtomicNumberPreprocessor(MolPreprocessor):
"""Atomic number Preprocessor
Args:
max_atoms (int): Max number of atoms for each molecule, if the
number of atoms is more than this value, this data is simply
ignored.
Setting negative value indicates no limit for max atoms.
out_size (int): It specifies the size of array returned by
`get_input_features`.
If the number of atoms in the molecule is less than this value,
the returned arrays is padded to have fixed size.
Setting negative value indicates do not pad returned array.
"""
def __init__(self, max_atoms=-1, out_size=-1):
super(AtomicNumberPreprocessor, self).__init__()
if max_atoms >= 0 and out_size >= 0 and max_atoms > out_size:
raise ValueError('max_atoms {} must be equal to or larger than '
'out_size {}'.format(max_atoms, out_size))
self.max_atoms = max_atoms
self.out_size = out_size
def get_input_features(self, mol):
"""get input features
Args:
mol (Mol):
Returns:
"""
type_check_num_atoms(mol, self.max_atoms)
atom_array = construct_atomic_number_array(mol, out_size=self.out_size)
return atom_array
|
the-stack_106_18485
|
######################################################################
# #
# Copyright 2009-2018 Lucas Heitzmann Gabrielli. #
# This file is part of gdspy, distributed under the terms of the #
# Boost Software License - Version 1.0. See the accompanying #
# LICENSE file or <http://www.boost.org/LICENSE_1_0.txt> #
# #
######################################################################
import numpy
import gdspy
print('Using gdspy module version ' + gdspy.__version__)
# ------------------------------------------------------------------ #
# POLYGONS
# ------------------------------------------------------------------ #
# First we need a cell to add the polygons to.
poly_cell = gdspy.Cell('POLYGONS')
# We define the polygon through its vertices.
points = [(0, 0), (2, 2), (2, 6), (-6, 6), (-6, -6), (-4, -4), (-4, 4), (0, 4)]
# Create the polygon on layer 1.
poly1 = gdspy.Polygon(points, 1)
# Add the new polygon to the cell.
poly_cell.add(poly1)
# Create another polygon from the same set of points, but rotate it
# 180 degrees and add it to the cell.
poly2 = gdspy.Polygon(points, 1).rotate(numpy.pi)
poly_cell.add(poly2)
# To create rectangles we don't need to give the 4 corners, only 2.
# Note that we don't need to create a variable if we are not going to
# use it, just add the rectangle directly to the cell. Create a
# rectangle in layer 2.
poly_cell.add(gdspy.Rectangle((18, 1), (22, 2), 2))
# There are no circles in the GDSII specification, so rounded shapes
# are actually many-sided polygons. Create a circle in layer 2,
# centered at (27, 2), and with radius 2.
poly_cell.add(gdspy.Round((27, 2), 2, layer=2))
# The Round class is quite versatile: it provides circles, pie slices,
# rings and ring sections, like this one in layer 2.
poly_cell.add(
gdspy.Round(
(23.5, 7),
15,
inner_radius=14,
initial_angle=-2.0 * numpy.pi / 3.0,
final_angle=-numpy.pi / 3.0,
layer=2))
# ------------------------------------------------------------------ #
# PATHS
# ------------------------------------------------------------------ #
path_cell = gdspy.Cell('PATHS')
# Start a path from the origin with width 1.
path1 = gdspy.Path(1, (0, 0))
# Add a straight segment to the path in layer 1, datatype 1, with length
# 3, going in the '+x' direction. Since we'll use this layer/datatype
# configuration again, we can setup a dict containing this info.
spec = {'layer': 1, 'datatype': 1}
path1.segment(3, '+x', **spec)
# Add a curve to the path by specifying its radius as 2 and its initial
# and final angles.
path1.arc(2, -numpy.pi / 2.0, numpy.pi / 6.0, **spec)
# Add another segment to the path in layer 1, with length 4 and
# pointing in the direction defined by the last piece we added above.
path1.segment(4, **spec)
# Add a curve using the turn command. We specify the radius 2 and
# turning angle. The agnle can also be specified with 'l' and 'r' for
# left and right turns of 90 degrees, or 'll' and 'rr' for 180 degrees.
path1.turn(2, -2.0 * numpy.pi / 3.0, **spec)
# Final piece of the path. Add a straight segment and tapper the path
# width from the original 1 to 0.5.
path1.segment(3, final_width=0.5, **spec)
path_cell.add(path1)
# We can also create parallel paths simultaneously. Start 2 paths with
# width 0.5 each,nd pitch 1, originating where our last path ended.
path2 = gdspy.Path(0.5, (path1.x, path1.y), number_of_paths=2, distance=1)
# Add a straight segment to the paths gradually increasing their
# distance to 1.5, in the direction in which the last path ended.
spec['layer'] = 2
path2.segment(3, path1.direction, final_distance=1.5, **spec)
# Path commands can be concatenated. Add a turn and a tapper segment
# in one expression, followed by a final turn.
path2.turn(2, -2.0 * numpy.pi / 3.0, **spec).segment(
4, final_distance=1, **spec)
path2.turn(4, numpy.pi / 6.0, **spec)
path_cell.add(path2)
# Create another single path 0.5 wide, starting where the path above
# ended, and add to it a line segment in the 3rd layer in the '-y'
# direction.
path3 = gdspy.Path(0.5, (path2.x, path2.y))
path3.segment(1, '-y', layer=3)
# We can create paths based on parametric curves. First we need to
# define the curve function, with 1 argument. This argument will vary
# from 0 to 1 and the return value should be the (x, y) coordinates of
# the path. This could be a lambda-expression if the function is
# simple enough. We will create a spiral path. Note that the function
# returns (0, 0) when t=0, so that our path is connected.
def spiral(t):
r = 4 - 3 * t
theta = 5 * t * numpy.pi
x = 4 - r * numpy.cos(theta)
y = -r * numpy.sin(theta)
return (x, y)
# We can also create the derivative of the curve to pass to out path
# path member, otherwise it will be numerically calculated. In the
# spiral case we don't want the exact derivative, but the derivative of
# the spiral as if its radius was constant. This will ensure that our
# path is connected at the start (geometric problem of this kind of
# spiral).
def dspiral_dt(t):
theta = 5 * t * numpy.pi
dx_dt = numpy.sin(theta)
dy_dt = -numpy.cos(theta)
return (dx_dt, dy_dt)
# Add the parametric spiral to the path in layer 3. Note that we can
# still tapper the width (linearly or with a taper function). To make
# the curve smoother, we increase the number of evaluations of the
# function (fracture will be performed automatically to ensure polygons
# with less than 200 points).
path3.parametric(
spiral,
dspiral_dt,
final_width=lambda t: 0.1 + abs(0.4 * (1 - 2 * t)**3),
number_of_evaluations=600,
layer=3)
path_cell.add(path3)
# Polygonal paths are defined by the points they pass through. The
# width of the path can be given as a number, representing the path
# width along is whole extension, or as a list, where each element is
# the width of the path at one point. Our path will have width 0.5 in
# all points, except the last, where it will tapper up to 1.5. More
# than 1 path can be defined in parallel as well (useful for buses).
# The distance between the paths work the same way as the width: it's
# either a constant number, or a list. We create 5 parallel paths that
# are larger and further apart on the last point. The paths are put in
# layers 4 and 5. Since we have 5 paths, the list of layers will be
# run more than once, so the 5 paths will actually be in layers 4, 5, 4,
# 5, and 4.
points = [(20, 12), (24, 8), (24, 4), (24, -2)]
widths = [0.5] * (len(points) - 1) + [1.5]
distances = [0.8] * (len(points) - 1) + [2.4]
polypath = gdspy.PolyPath(
points, widths, number_of_paths=5, distance=distances, layer=[4, 5])
# We can round the corners of any Polygon or PolygonSet with the fillet
# method. Here we use a radius of 0.2.
# polypath.fillet(0.2)
path_cell.add(polypath)
# L1Paths use only segments in 'x' and 'y' directions, useful for some
# lithography mask writers. We specify a path composed of 16 segments
# of length 4. The turns after each segment can be either 90 degrees
# CCW (positive) or CW (negative). The absolute value of the turns
# produces a scaling of the path width and distance between paths in
# segments immediately after the turn.
lengths = [4] * 8
turns = [-1, -1, 1, 1, -1, -2, 1, 0.5]
l1path = gdspy.L1Path(
(-1, -11),
'+y',
0.5,
lengths,
turns,
number_of_paths=3,
distance=0.7,
layer=6)
path_cell.add(l1path)
# ------------------------------------------------------------------ #
# POLYGON OPERATIONS
# ------------------------------------------------------------------ #
# Boolean operations can be executed with either gdspy polygons or
# point lists). The operations are union, intersection, subtraction,
# symmetric subtracion (respectively 'or', 'and', 'not', 'xor').
oper_cell = gdspy.Cell('OPERATIONS')
# Here we subtract the previously created spiral from a rectangle with
# the 'not' operation.
oper_cell.add(
gdspy.fast_boolean(
gdspy.Rectangle((10, -4), (17, 4)), path3, 'not', layer=1))
# Polygon offset (inset and outset) can be used, for instance, to
# define safety margins around shapes.
spec = {'layer': 7}
path4 = gdspy.Path(0.5, (21, -5)).segment(3, '+x', **spec)\
.turn(4, 'r', **spec).turn(4, 'rr', **spec)\
.segment(3, **spec)
oper_cell.add(path4)
# Merge all parts into a single polygon.
merged = gdspy.fast_boolean(path4, None, 'or', max_points=0)
# Offset the path shape by 0.5 and add it to the cell.
oper_cell.add(gdspy.offset(merged, 1, layer=8))
# ------------------------------------------------------------------ #
# SLICING POLYGONS
# ------------------------------------------------------------------ #
# If there is the need to cut a polygon or set of polygons, it's better
# to use the slice function than set up a boolean operation, since it
# runs much faster. Slices are multiple cuts perpendicular to an axis.
slice_cell = gdspy.Cell('SLICE')
original = gdspy.Round((0, 0), 10, inner_radius=5)
# Slice the original ring along x = -7 and x = 7.
result = gdspy.slice(original, [-7, 7], 0, layer=1)
# The result is a tuple of polygon sets, one for each slice. To keep
# add the region betwen our 2 cuts, we chose result[1].
slice_cell.add(result[1])
# If the cut needs to be at an angle we can rotate the geometry, slice
# it, and rotate back.
original = gdspy.PolyPath([(12, 0), (12, 8), (28, 8), (28, -8), (12, -8),
(12, 0)], 1, 3, 2)
original.rotate(numpy.pi / 3, center=(20, 0))
result = gdspy.slice(original, 7, 1, layer=2)
result[0].rotate(-numpy.pi / 3, center=(20, 0))
slice_cell.add(result[0])
# ------------------------------------------------------------------ #
# REFERENCES AND TEXT
# ------------------------------------------------------------------ #
# Cells can contain references to other cells.
ref_cell = gdspy.Cell('REFS')
ref_cell.add(gdspy.CellReference(poly_cell, (0, 30), x_reflection=True))
ref_cell.add(gdspy.CellReference(poly_cell, (25, 0), rotation=180))
# References can be whole arrays. Add an array of the operations cell
# with 2 lines and 3 columns and 1st element at (25, 10).
ref_cell.add(
gdspy.CellArray('OPERATIONS', 3, 2, (35, 30), (25, 10), magnification=1.5))
# Text are also sets of polygons. They have edges parallel to 'x' and
# 'y' only.
ref_cell.add(
gdspy.Text(
'Created with gsdpy ' + gdspy.__version__, 7, (-7, -35), layer=6))
# Labels are special text objects which don't define any actual
# geometry, but can be used to annotate the drawing. Rotation,
# magnification and reflection of the text are not supported by the
# included GUI, but they are included in the resulting GDSII file.
ref_cell.add(
gdspy.Label(
'Created with gdspy ' + gdspy.__version__, (-7, -36), 'nw', layer=6))
# ------------------------------------------------------------------ #
# TRANSLATION AND REFLECTION
# ------------------------------------------------------------------ #
trans_cell = gdspy.Cell('TRANS')
# Any geometric object can be translated by providing the distance to
# translate in the x-direction and y-direction: translate(dx, dy)
rect1 = gdspy.Rectangle((80, 0), (81, 1), 1)
rect1.translate(2, 0)
trans_cell.add(rect1)
# Translatable objects can also be copied & translated in the same way.
rect2 = gdspy.Rectangle((80, 0), (81, 1), 2)
rect3 = gdspy.copy(rect2, 0, 3)
trans_cell.add(rect2)
trans_cell.add(rect3)
# Reference Cells are also translatable, and thus copyable.
ref1 = gdspy.CellReference(poly_cell, (25, 0), rotation=180)
ref2 = gdspy.copy(ref1, 30, 30)
trans_cell.add(ref1)
trans_cell.add(ref2)
# Same goes for Labels & Text
text1 = gdspy.Text(
'Created with gdspy ' + gdspy.__version__, 7, (-7, -35), layer=6)
text2 = gdspy.copy(text1, 0, -20)
label1 = gdspy.Label(
'Created with gdspy ' + gdspy.__version__, (-7, -36), 'nw', layer=6)
label2 = gdspy.copy(label1, 0, -20)
trans_cell.add(text1)
trans_cell.add(text2)
trans_cell.add(label1)
trans_cell.add(label2)
# Reflection across a line defined by the intersection of two points
# allows the mirroring of an polygon over an abritrary axis: mirror(p1,p2)
rect4 = gdspy.Rectangle((80, 0), (81, 1), 3)
rect4.mirror((80, 2), (79, 0))
trans_cell.add(rect4)
# ------------------------------------------------------------------ #
# OUTPUT
# ------------------------------------------------------------------ #
# Output the layout to a GDSII file (default to all created cells).
# Set the units we used to micrometers and the precision to nanometers.
gdspy.write_gds('tutorial.gds', unit=1.0e-6, precision=1.0e-9)
# ------------------------------------------------------------------ #
# IMPORT
# ------------------------------------------------------------------ #
# Import the file we just created, and extract the cell 'POLYGONS'. To
# avoid naming conflict, we will rename all cells.
gdsii = gdspy.GdsLibrary()
gdsii.read_gds(
'tutorial.gds',
rename={
'POLYGONS': 'IMPORT_POLY',
'PATHS': 'IMPORT_PATHS',
'OPERATIONS': 'IMPORT_OPER',
'SLICE': 'IMPORT_SLICE',
'REFS': 'IMPORT_REFS',
'TRANS': 'IMPORT_TRANS'
},
layers={
1: 7,
2: 8,
3: 9
})
# Now we extract the cells we want to actually include in our current
# structure. Note that the referenced cells will be automatically
# extracted as well.
gdsii.extract('IMPORT_REFS')
# ------------------------------------------------------------------ #
# VIEWER
# ------------------------------------------------------------------ #
# View the layout using a GUI. Full description of the controls can
# be found in the online help at http://gdspy.sourceforge.net/
gdspy.LayoutViewer()
|
the-stack_106_18486
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function
import os
import sys
import unittest
# noinspection PyProtectedMember
from numpy.testing import (assert_allclose, assert_array_less, assert_equal,
assert_raises)
from pyod.models.kde import KDE
from pyod.utils.data import generate_data
from scipy.stats import rankdata
from sklearn.base import clone
from sklearn.metrics import roc_auc_score
# temporary solution for relative imports in case pyod is not installed
# if pyod is installed, no need to use the following line
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
class TestKDE(unittest.TestCase):
def setUp(self):
self.n_train = 200
self.n_test = 100
self.contamination = 0.1
self.roc_floor = 0.8
self.X_train, self.y_train, self.X_test, self.y_test = generate_data(
n_train=self.n_train,
n_test=self.n_test,
contamination=self.contamination,
random_state=42,
)
self.clf = KDE(contamination=self.contamination)
self.clf.fit(self.X_train)
def test_parameters(self):
assert (
hasattr(self.clf, "decision_scores_")
and self.clf.decision_scores_ is not None
)
assert hasattr(self.clf, "labels_") and self.clf.labels_ is not None
assert hasattr(self.clf, "threshold_") and self.clf.threshold_ is not None
assert hasattr(self.clf, "_mu") and self.clf._mu is not None
assert hasattr(self.clf, "_sigma") and self.clf._sigma is not None
def test_train_scores(self):
assert_equal(len(self.clf.decision_scores_), self.X_train.shape[0])
def test_prediction_scores(self):
pred_scores = self.clf.decision_function(self.X_test)
# check score shapes
assert_equal(pred_scores.shape[0], self.X_test.shape[0])
# check performance
assert roc_auc_score(self.y_test, pred_scores) >= self.roc_floor
def test_prediction_labels(self):
pred_labels = self.clf.predict(self.X_test)
assert_equal(pred_labels.shape, self.y_test.shape)
def test_prediction_proba(self):
pred_proba = self.clf.predict_proba(self.X_test)
assert pred_proba.min() >= 0
assert pred_proba.max() <= 1
def test_prediction_proba_linear(self):
pred_proba = self.clf.predict_proba(self.X_test, method="linear")
assert pred_proba.min() >= 0
assert pred_proba.max() <= 1
def test_prediction_proba_unify(self):
pred_proba = self.clf.predict_proba(self.X_test, method="unify")
assert pred_proba.min() >= 0
assert pred_proba.max() <= 1
def test_prediction_proba_parameter(self):
with assert_raises(ValueError):
self.clf.predict_proba(self.X_test, method="something")
def test_prediction_labels_confidence(self):
pred_labels, confidence = self.clf.predict(self.X_test, return_confidence=True)
assert_equal(pred_labels.shape, self.y_test.shape)
assert_equal(confidence.shape, self.y_test.shape)
assert confidence.min() >= 0
assert confidence.max() <= 1
def test_prediction_proba_linear_confidence(self):
pred_proba, confidence = self.clf.predict_proba(
self.X_test, method="linear", return_confidence=True
)
assert pred_proba.min() >= 0
assert pred_proba.max() <= 1
assert_equal(confidence.shape, self.y_test.shape)
assert confidence.min() >= 0
assert confidence.max() <= 1
def test_fit_predict(self):
pred_labels = self.clf.fit_predict(self.X_train)
assert_equal(pred_labels.shape, self.y_train.shape)
def test_fit_predict_score(self):
self.clf.fit_predict_score(self.X_test, self.y_test)
self.clf.fit_predict_score(self.X_test, self.y_test, scoring="roc_auc_score")
self.clf.fit_predict_score(self.X_test, self.y_test, scoring="prc_n_score")
with assert_raises(NotImplementedError):
self.clf.fit_predict_score(self.X_test, self.y_test, scoring="something")
def test_predict_rank(self):
pred_scores = self.clf.decision_function(self.X_test)
pred_ranks = self.clf._predict_rank(self.X_test)
# assert the order is reserved
assert_allclose(rankdata(pred_ranks), rankdata(pred_scores), atol=4)
assert_array_less(pred_ranks, self.X_train.shape[0] + 1)
assert_array_less(-0.1, pred_ranks)
def test_predict_rank_normalized(self):
pred_scores = self.clf.decision_function(self.X_test)
pred_ranks = self.clf._predict_rank(self.X_test, normalized=True)
# assert the order is reserved
assert_allclose(rankdata(pred_ranks), rankdata(pred_scores), atol=4)
assert_array_less(pred_ranks, 1.01)
assert_array_less(-0.1, pred_ranks)
def test_model_clone(self):
clone_clf = clone(self.clf)
def tearDown(self):
pass
if __name__ == "__main__":
unittest.main()
|
the-stack_106_18487
|
import torch
import torchvision
def yoloParseOutput(model_output, nr_bbox=2):
"""Parses the dense ouput to the predicted values"""
# Get outputs
x_norm_rel = model_output[..., 0:nr_bbox] # Center x
y_norm_rel = model_output[..., nr_bbox:nr_bbox*2] # Center y
h_norm_sqrt = model_output[..., nr_bbox*2:nr_bbox*3] # Height
w_norm_sqrt = model_output[..., nr_bbox*3:nr_bbox*4] # Width
pred_conf = torch.sigmoid(model_output[..., nr_bbox * 4:nr_bbox * 5]) # Object Confidence
pred_cls = model_output[..., nr_bbox * 5:] # Class Score
return x_norm_rel, y_norm_rel, h_norm_sqrt, w_norm_sqrt, pred_conf, pred_cls
def yoloDetect(model_output, input_shape, threshold=None):
"""Computes the detections used in YOLO: https://arxiv.org/pdf/1506.02640.pdf"""
cell_map_shape = torch.tensor(model_output.shape[1:3], device=model_output.device)
cell_shape = input_shape / cell_map_shape
x_norm_rel, y_norm_rel, h_norm_sqrt, w_norm_sqrt, pred_conf, pred_cls_conf = yoloParseOutput(model_output)
h = h_norm_sqrt**2 * input_shape[0]
w = w_norm_sqrt**2 * input_shape[1]
x_rel = x_norm_rel * cell_shape[0]
y_rel = y_norm_rel * cell_shape[1]
cell_top_left = getGrid(input_shape, cell_map_shape)
bbox_center = cell_top_left[None, :, :, None, :] + torch.stack([x_rel, y_rel], dim=-1)
bbox_top_left_corner = bbox_center - torch.stack([h, w], dim=-1) // 2
if threshold is None:
return torch.cat([bbox_top_left_corner, h.unsqueeze(-1), w.unsqueeze(-1), pred_conf.unsqueeze(-1)], dim=-1)
detected_bbox_idx = torch.nonzero((pred_conf > threshold)).split(1, dim=-1)
# detected_bbox_idx = (pred_conf > threshold).nonzero().split(1, dim=-1)
batch_idx = detected_bbox_idx[0]
if batch_idx.shape[0] == 0:
return torch.zeros([0, 7])
detected_top_left_corner = bbox_top_left_corner[detected_bbox_idx].squeeze(1)
detected_h = h[detected_bbox_idx]
detected_w = w[detected_bbox_idx]
pred_conf = pred_conf[detected_bbox_idx]
pred_cls = torch.argmax(pred_cls_conf[detected_bbox_idx[:-1]], dim=-1)
pred_cls_conf = pred_cls_conf[detected_bbox_idx[:-1]].squeeze(1)
pred_cls_conf = pred_cls_conf[torch.arange(pred_cls.shape[0]), pred_cls.squeeze(-1)]
# Convert from x, y to u, v
det_bbox = torch.cat([batch_idx.float(), detected_top_left_corner[:, 1, None].float(),
detected_top_left_corner[:, 0, None].float(), detected_w.float(), detected_h.float(),
pred_cls.float(), pred_cls_conf[:, None].float(), pred_conf], dim=-1)
return cropToFrame(det_bbox, input_shape)
def getGrid(input_shape, cell_map_shape):
"""Constructs a 2D grid with the cell center coordinates."""
cell_shape = input_shape / cell_map_shape
cell_top_left = torch.meshgrid([torch.arange(start=0, end=cell_map_shape[0]*cell_shape[0], step=cell_shape[0],
device=cell_shape.device),
torch.arange(start=0, end=cell_map_shape[1]*cell_shape[1], step=cell_shape[1],
device=cell_shape.device)])
return torch.stack(cell_top_left, dim=-1)
def cropToFrame(bbox, image_shape):
"""Checks if bounding boxes are inside frame. If not crop to border"""
array_width = torch.ones_like(bbox[:, 1]) * image_shape[1] - 1
array_height = torch.ones_like(bbox[:, 2]) * image_shape[0] - 1
bbox[:, 1:3] = torch.max(bbox[:, 1:3], torch.zeros_like(bbox[:, 1:3]))
bbox[:, 1] = torch.min(bbox[:, 1], array_width)
bbox[:, 2] = torch.min(bbox[:, 2], array_height)
bbox[:, 3] = torch.min(bbox[:, 3], array_width - bbox[:, 1])
bbox[:, 4] = torch.min(bbox[:, 4], array_height - bbox[:, 2])
return bbox
def nonMaxSuppression(detected_bbox, iou=0.6):
"""
Iterates over the bboxes to peform non maximum suppression within each batch.
:param detected_bbox[0, :]: [batch_idx, top_left_corner_u, top_left_corner_v, width, height, predicted_class,
predicted class confidence, object_score])
:param iou: intersection over union, threshold for which the bbox are considered overlapping
"""
i_sample = 0
keep_bbox = []
while i_sample < detected_bbox.shape[0]:
same_batch_mask = detected_bbox[:, 0] == detected_bbox[i_sample, 0]
nms_input = detected_bbox[same_batch_mask][:, [1, 2, 3, 4, 7]].clone()
nms_input[:, [2, 3]] += nms_input[:, [0, 1]]
# (u, v) or (x, y) should not matter
keep_idx = torchvision.ops.nms(nms_input[:, :4], nms_input[:, 4], iou)
keep_bbox.append(detected_bbox[same_batch_mask][keep_idx])
i_sample += same_batch_mask.sum()
if len(keep_bbox) != 0:
filtered_bbox = torch.cat(keep_bbox, dim=0)
else:
filtered_bbox = torch.zeros([0, 8])
return filtered_bbox
|
the-stack_106_18488
|
from __future__ import unicode_literals
import pytest
from prompt_toolkit_dev.application import Application
from prompt_toolkit_dev.application.current import set_app
from prompt_toolkit_dev.input.defaults import create_pipe_input
from prompt_toolkit_dev.key_binding.key_bindings import KeyBindings
from prompt_toolkit_dev.key_binding.key_processor import KeyPress, KeyProcessor
from prompt_toolkit_dev.keys import Keys
from prompt_toolkit_dev.layout import Layout, Window
from prompt_toolkit_dev.output import DummyOutput
class Handlers(object):
def __init__(self):
self.called = []
def __getattr__(self, name):
def func(event):
self.called.append(name)
return func
def set_dummy_app():
"""
Return a context manager that makes sure that this dummy application is
active. This is important, because we need an `Application` with
`is_done=False` flag, otherwise no keys will be processed.
"""
app = Application(
layout=Layout(Window()),
output=DummyOutput(),
input=create_pipe_input())
return set_app(app)
@pytest.fixture
def handlers():
return Handlers()
@pytest.fixture
def bindings(handlers):
bindings = KeyBindings()
bindings.add(
Keys.ControlX, Keys.ControlC)(handlers.controlx_controlc)
bindings.add(Keys.ControlX)(handlers.control_x)
bindings.add(Keys.ControlD)(handlers.control_d)
bindings.add(
Keys.ControlSquareClose, Keys.Any)(handlers.control_square_close_any)
return bindings
@pytest.fixture
def processor(bindings):
return KeyProcessor(bindings)
def test_remove_bindings(handlers):
with set_dummy_app():
h = handlers.controlx_controlc
h2 = handlers.controld
# Test passing a handler to the remove() function.
bindings = KeyBindings()
bindings.add(Keys.ControlX, Keys.ControlC)(h)
bindings.add(Keys.ControlD)(h2)
assert len(bindings.bindings) == 2
bindings.remove(h)
assert len(bindings.bindings) == 1
# Test passing a key sequence to the remove() function.
bindings = KeyBindings()
bindings.add(Keys.ControlX, Keys.ControlC)(h)
bindings.add(Keys.ControlD)(h2)
assert len(bindings.bindings) == 2
bindings.remove(Keys.ControlX, Keys.ControlC)
assert len(bindings.bindings) == 1
def test_feed_simple(processor, handlers):
with set_dummy_app():
processor.feed(KeyPress(Keys.ControlX, '\x18'))
processor.feed(KeyPress(Keys.ControlC, '\x03'))
processor.process_keys()
assert handlers.called == ['controlx_controlc']
def test_feed_several(processor, handlers):
with set_dummy_app():
# First an unknown key first.
processor.feed(KeyPress(Keys.ControlQ, ''))
processor.process_keys()
assert handlers.called == []
# Followed by a know key sequence.
processor.feed(KeyPress(Keys.ControlX, ''))
processor.feed(KeyPress(Keys.ControlC, ''))
processor.process_keys()
assert handlers.called == ['controlx_controlc']
# Followed by another unknown sequence.
processor.feed(KeyPress(Keys.ControlR, ''))
processor.feed(KeyPress(Keys.ControlS, ''))
# Followed again by a know key sequence.
processor.feed(KeyPress(Keys.ControlD, ''))
processor.process_keys()
assert handlers.called == ['controlx_controlc', 'control_d']
def test_control_square_closed_any(processor, handlers):
with set_dummy_app():
processor.feed(KeyPress(Keys.ControlSquareClose, ''))
processor.feed(KeyPress('C', 'C'))
processor.process_keys()
assert handlers.called == ['control_square_close_any']
def test_common_prefix(processor, handlers):
with set_dummy_app():
# Sending Control_X should not yet do anything, because there is
# another sequence starting with that as well.
processor.feed(KeyPress(Keys.ControlX, ''))
processor.process_keys()
assert handlers.called == []
# When another key is pressed, we know that we did not meant the longer
# "ControlX ControlC" sequence and the callbacks are called.
processor.feed(KeyPress(Keys.ControlD, ''))
processor.process_keys()
assert handlers.called == ['control_x', 'control_d']
def test_previous_key_sequence(processor):
"""
test whether we receive the correct previous_key_sequence.
"""
with set_dummy_app():
events = []
def handler(event):
events.append(event)
# Build registry.
registry = KeyBindings()
registry.add('a', 'a')(handler)
registry.add('b', 'b')(handler)
processor = KeyProcessor(registry)
# Create processor and feed keys.
processor.feed(KeyPress('a', 'a'))
processor.feed(KeyPress('a', 'a'))
processor.feed(KeyPress('b', 'b'))
processor.feed(KeyPress('b', 'b'))
processor.process_keys()
# Test.
assert len(events) == 2
assert len(events[0].key_sequence) == 2
assert events[0].key_sequence[0].key == 'a'
assert events[0].key_sequence[0].data == 'a'
assert events[0].key_sequence[1].key == 'a'
assert events[0].key_sequence[1].data == 'a'
assert events[0].previous_key_sequence == []
assert len(events[1].key_sequence) == 2
assert events[1].key_sequence[0].key == 'b'
assert events[1].key_sequence[0].data == 'b'
assert events[1].key_sequence[1].key == 'b'
assert events[1].key_sequence[1].data == 'b'
assert len(events[1].previous_key_sequence) == 2
assert events[1].previous_key_sequence[0].key == 'a'
assert events[1].previous_key_sequence[0].data == 'a'
assert events[1].previous_key_sequence[1].key == 'a'
assert events[1].previous_key_sequence[1].data == 'a'
|
the-stack_106_18489
|
import datetime
import logging
import pytz
import os
import random
import re
import time
from pathlib import Path
import logzero
import pandas as pd
import torch
import torch.nn as nn
import numpy as np
from sklearn.metrics import roc_auc_score
from torch.utils.data import DataLoader
from logzero import logger
from torch.utils.tensorboard import SummaryWriter
from torch.optim.lr_scheduler import ReduceLROnPlateau
from model import get_model
from dataset import SetiSimpleDataset, get_transforms
from config import load_config
from util import parse_args, get_folds
from optimizer import get_optimizer
from scheduler import get_scheduler
from module import train_fn, valid_fn, mixup_train_fn
from loss import FocalLoss
import torch.backends.cudnn as cudnn
import horovod.torch as hvd
import torch.utils.data.distributed
import cv2
cv2.setNumThreads(0)
cv2.ocl.setUseOpenCL(False)
from util import bn_to_syncbn, DummySummaryWriter
# Some of the code was adapted from the following URL
# https://www.kaggle.com/yasufuminakama/cassava-resnext50-32x4d-starter-training
ROOT = Path.cwd().parent
INPUT = ROOT / "input"
DATA = INPUT / "sbl"
TRAIN = DATA / "train"
TEST = DATA / "test"
def get_path_label(df: pd.DataFrame, img_dir: str):
"""Get file path and target info."""
path_label = {
"paths": [img_dir / f"{img_id[0]}/{img_id}.npy" for img_id in df["id"].values],
"labels": df[CLASSES].values.astype("f"),
"id": df["id"].values
}
return path_label
def seed_everything(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
class ReduceLROnPlateauPatch(ReduceLROnPlateau):
def get_lr(self):
return [ group['lr'] for group in self.optimizer.param_groups ]
def train_loop(conf, hvd, folds, fold, logger, log_basename, total_epochs,
new_train, new_test):
logger.info(f"=============== fold: {fold} training ===============")
if conf.ckpt_path:
conf.ckpt_path = re.sub('fold._best', f"fold{fold}_best", conf.ckpt_path)
logger.info(f"replace ckpt_path: {conf.ckpt_path}")
# foldsをidでソートしておく(ddpのvalidationのため)
folds = folds.sort_values(by=['id']).reset_index(drop=True)
# loader
trn_idx = folds[folds['fold'] != fold].index
val_idx = folds[folds['fold'] == fold].index
train_folds = folds.loc[trn_idx].reset_index(drop=True)
valid_folds = folds.loc[val_idx].reset_index(drop=True)
if new_train:
valid_folds = pd.read_csv(DATA / 'train_labels.csv')
if new_test:
valid_folds = pd.read_csv(DATA / 'sample_submission.csv')
tb_logname = os.path.join(conf.log_dir, f"{log_basename}_fold{fold}")
if hvd.rank() == 0:
tb_writer = SummaryWriter(log_dir=tb_logname)
else:
tb_writer = DummySummaryWriter()
train_path_label = get_path_label(train_folds, TRAIN)
valid_path_label = get_path_label(valid_folds, TRAIN)
if new_train:
valid_path_label = get_path_label(valid_folds, TRAIN)
if new_test:
valid_path_label = get_path_label(valid_folds, TEST)
# pseudo label
if conf.pseudo_label:
pseudo_folds = pd.read_csv('pseudo_test_labels.csv')
pseudo_path_label = get_path_label(pseudo_folds, TEST)
train_path_label['paths'] = np.concatenate([train_path_label['paths'], pseudo_path_label['paths']])
train_path_label['labels'] = np.concatenate([train_path_label['labels'], pseudo_path_label['labels']])
train_path_label['id'] = np.concatenate([train_path_label['id'], pseudo_path_label['id']])
logger.info("use pseudo labeled data")
train_dataset = SetiSimpleDataset(paths=train_path_label['paths'],
labels=train_path_label['labels'],
ids=train_path_label['id'],
transform=get_transforms(conf, conf.train_trans_mode),
target_only=conf.target_only,
seed=conf.seed)
valid_dataset = SetiSimpleDataset(paths=valid_path_label['paths'],
labels=valid_path_label['labels'],
ids=valid_path_label['id'],
transform=get_transforms(conf, conf.valid_trans_mode),
target_only=conf.target_only,
with_id=True)
train_sampler = torch.utils.data.distributed.DistributedSampler(
train_dataset, num_replicas=hvd.size(), rank=hvd.rank())
valid_sampler = torch.utils.data.distributed.DistributedSampler(
valid_dataset, num_replicas=hvd.size(), rank=hvd.rank())
train_loader = DataLoader(train_dataset,
batch_size=conf.train_bs,
sampler=train_sampler,
num_workers=conf.num_workers,
pin_memory=conf.pin_memory,
drop_last=True)
valid_loader = DataLoader(valid_dataset,
batch_size=conf.valid_bs,
sampler=valid_sampler,
num_workers=conf.num_workers,
pin_memory=conf.pin_memory,
drop_last=False)
if conf.mixup:
# gen second train_dataset
train_dataset2 = SetiSimpleDataset(paths=train_path_label['paths'],
labels=train_path_label['labels'],
ids=train_path_label['id'],
transform=get_transforms(conf, conf.train_trans_mode),
target_only=conf.target_only,
seed=conf.seed+1000)
train_sampler2 = torch.utils.data.distributed.DistributedSampler(
train_dataset2, num_replicas=hvd.size(), rank=hvd.rank())
train_loader2 = DataLoader(train_dataset2,
batch_size=conf.train_bs,
sampler=train_sampler2,
num_workers=conf.num_workers,
pin_memory=conf.pin_memory,
drop_last=True)
# update print_freq
if conf.print_freq == 0:
conf.print_freq = max(2, len(train_loader) // 10)
# model
device = torch.device(conf.device)
if conf.loss_type == 'bce':
criterion = nn.BCEWithLogitsLoss()
elif conf.loss_type == 'focal':
criterion = FocalLoss(gamma=conf.focal_loss_gamma)
else:
raise NotImplementedError(conf.loss_type)
model = get_model(conf, conf.backbone_model_name, logger)
if conf.sync_bn:
model.apply(bn_to_syncbn)
logger.info('convert bn to sync_bn')
if conf.overwrite_gem_p:
model.global_pool.p.data.fill_(conf.overwrite_gem_p)
logger.info(f"overwrite_gem_p: {conf.overwrite_gem_p}")
model = model.to(device)
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
parameters = [
{'params': model.parameters()}
]
named_parameters = list(model.named_parameters())
optimizer = get_optimizer(conf, parameters)
optimizer = hvd.DistributedOptimizer(
optimizer, named_parameters=named_parameters,
compression=hvd.Compression.none)
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
scheduler = get_scheduler(conf, optimizer, train_loader, logger)
global_step = 0
best_score = 0
best_loss = np.inf
current_lr = scheduler.get_last_lr()
logger.info(f"lr: {current_lr}")
tb_writer.add_scalar('Other/LearningRate', current_lr[0], global_step)
best_y_true = None
best_y_preds = None
for epoch in range(conf.epochs):
start_time = time.time()
if conf.train:
if conf.mixup:
avg_loss, global_step = mixup_train_fn(conf, global_step,
train_loader, train_loader2,
model, criterion,
optimizer, epoch, scheduler, device, train_sampler, train_sampler2, logger, tb_writer)
else:
avg_loss, global_step = train_fn(conf, global_step, train_loader, model, criterion,
optimizer, epoch, scheduler, device, train_sampler, logger, tb_writer)
# val
avg_loss, score, y_true, y_preds = valid_fn(conf, global_step, valid_loader, model, criterion,
device, True, hvd, logger, tb_writer, new_test)
if isinstance(scheduler, ReduceLROnPlateau):
if conf.plateau_mode == 'min':
scheduler.step(avg_loss)
elif conf.plateau_mode == 'max':
scheduler.step(score)
current_lr = scheduler.get_last_lr()
logger.info(f"lr: {current_lr}")
tb_writer.add_scalar('Other/LearningRate', current_lr[0], global_step)
if conf.train:
if score > best_score:
best_score = score
logger.info(f'Fold {fold} Epoch {epoch+1} - Save Best Score: {best_score:.4f} Model')
if hvd.rank() == 0:
torch.save({'model': model.state_dict()},
f'{OUTPUT_DIR}/fold{fold}_best_score.pth')
best_y_true = y_true
best_y_preds = y_preds
if avg_loss < best_loss:
best_loss = avg_loss
logger.info(f'Fold {fold} Epoch {epoch+1} - Save Best Loss: {best_loss:.4f} Model')
if hvd.rank() == 0:
torch.save({'model': model.state_dict()},
f'{OUTPUT_DIR}/fold{fold}_best_loss.pth')
if hvd.rank() == 0:
torch.save({'model': model.state_dict()},
f'{OUTPUT_DIR}/fold{fold}_epoch{epoch}.pth')
else:
if score > best_score:
best_score = score
best_y_true = y_true
best_y_preds = y_preds
elapsed = time.time() - start_time
if conf.train:
logger.info(f'Fold {fold} Epoch {epoch+1} - AUROC score: {score:.4f} Best: {best_score:.4f} time: {elapsed:.0f}s')
logger.info(f'output_dir: {OUTPUT_DIR}')
else:
logger.info(f'AUROC score: {score:.4f}')
total_epochs -= 1
full_elapsed = total_epochs * int(elapsed)
time_delta = datetime.timedelta(seconds=full_elapsed)
now = datetime.datetime.now(pytz.timezone('Asia/Tokyo'))
end_time = now + time_delta
logger.info(f"Expected remaining seconds: {full_elapsed} sec")
logger.info(f"Expected end time: {end_time}")
valid_folds['preds'] = best_y_preds
return best_score, best_y_true, best_y_preds, valid_folds
if __name__ == '__main__':
args = parse_args()
conf = load_config(args.conf)
for (key, value) in args._get_kwargs():
if key in ['input_width', 'input_height',
'scale_width', 'scale_height',
'valid_bs', 'valid_trans_mode',
'ckpt_path', 'train_fold',
'overwrite_gem_p',
'tta_hflip', 'tta_vflip', 'tta_sigmoid',
'seed']:
if value:
setattr(conf, key, value)
if args.test or args.new_train or args.new_test:
conf.train = False
conf.epochs = 1
seed_everything(conf.seed)
hvd.init()
torch.manual_seed(conf.seed)
torch.cuda.set_device(hvd.local_rank())
torch.cuda.manual_seed(conf.seed)
cudnn.benchmark = True
FOLD_SEED = conf.fold_seed
CLASSES = ["target",]
N_FOLDS = conf.n_fold
formatter = logging.Formatter('%(message)s')
logzero.formatter(formatter)
if not os.path.exists(conf.log_dir):
os.makedirs(conf.log_dir, exist_ok=True)
if args.new_train:
log_basename = f"new_train_{conf.prefix}-{conf.backbone_model_name}-{datetime.datetime.now().strftime('%Y%m%d%H%M%S')}"
elif args.new_test:
log_basename = f"new_test_{conf.prefix}-{conf.backbone_model_name}-{datetime.datetime.now().strftime('%Y%m%d%H%M%S')}"
else:
log_basename = f"{conf.prefix}-{conf.backbone_model_name}-{datetime.datetime.now().strftime('%Y%m%d%H%M%S')}"
log_filename = f"{log_basename}.log"
logzero.logfile(os.path.join(conf.log_dir, log_filename))
# メインのnode以外のloggerを停止
if hvd.rank() == 0:
logzero.logfile(os.path.join(conf.log_dir, log_filename))
else:
logzero.logfile('', disableStderrLogger=True)
logger.info(conf)
OUTPUT_DIR = f"{conf.model_dir}/{conf.prefix}_{conf.backbone_model_name}"
if hvd.rank() == 0:
os.makedirs(f"{OUTPUT_DIR}", exist_ok=True)
logger.info(f"output_dir: {OUTPUT_DIR}")
train = get_folds(conf, N_FOLDS, FOLD_SEED, DATA, logger)
trn_fold = [int(elem) for elem in conf.train_fold.split(',')]
total_epochs = conf.epochs * len(trn_fold)
oof_y_true = []
oof_y_preds = []
oof_df = pd.DataFrame()
submit_df_list = []
for fold in range(conf.n_fold):
if fold in trn_fold:
best_score, best_y_true, best_y_preds, _oof_df =\
train_loop(conf, hvd, train, fold, logger, log_basename, total_epochs,
args.new_train, args.new_test)
if args.new_train or args.new_test:
if args.ensemble_sigmoid:
_oof_df['preds'] = torch.tensor(_oof_df['preds'].values).sigmoid().numpy()
submit_df_list.append(_oof_df)
if args.new_test:
prefix = 'new_test'
elif args.new_train:
prefix = 'new_train'
elif args.test:
prefix = 'oof'
else:
prefix = 'oof'
_oof_df.to_csv(f"{OUTPUT_DIR}/{prefix}_fold{fold}.csv", index=False)
else:
oof_df = pd.concat([oof_df, _oof_df])
oof_y_true.append(best_y_true)
oof_y_preds.append(best_y_preds)
logger.info(f"fold{fold} Best Score: {best_score:.4f}")
total_epochs -= conf.epochs
if args.new_train or args.new_test:
sub_df = None
if not args.new_test:
for oof_df in submit_df_list:
if sub_df is not None:
sub_df['preds'] = sub_df['preds'] + oof_df['preds']
else:
sub_df = oof_df
score = roc_auc_score(sub_df.target.values, sub_df.preds.values)
logger.info(f"oof test score: {score}")
else:
for oof_df in submit_df_list:
if sub_df is not None:
sub_df['target'] = sub_df['target'] + oof_df['preds']
else:
oof_df = oof_df.drop('target', axis=1)
oof_df.columns = ['id', 'target']
sub_df = oof_df
if hvd.rank() == 0:
sub_df = sub_df.sort_values(by=['id']).reset_index(drop=True)
if args.new_train:
sub_df.to_csv(f"{OUTPUT_DIR}/new_train.csv", index=False)
if args.new_test:
sub_df.to_csv(f"{OUTPUT_DIR}/new_test.csv", index=False)
else:
if len(trn_fold) == N_FOLDS:
oof_y_true = np.concatenate(oof_y_true)
oof_y_preds = np.concatenate(oof_y_preds)
score = roc_auc_score(oof_y_true, oof_y_preds)
logger.info(f"oof score: {score}")
if hvd.rank() == 0:
oof_df = oof_df.sort_values(by=['id']).reset_index(drop=True)
oof_df.to_csv(f"{OUTPUT_DIR}/oof_df.csv", index=False)
logger.info(f"log saved: {os.path.join(conf.log_dir, log_filename)}")
|
the-stack_106_18490
|
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'hackernews.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'', include('stories.urls')),
)
if settings.DEBUG:
urlpatterns += patterns(
'django.views.static',
(r'media/(?P<path>.*)',
'serve',
{'document_root': settings.MEDIA_ROOT}),
)
|
the-stack_106_18491
|
# -*- coding: utf-8 -*-
from mamonsu.plugins.pgsql.plugin import PgsqlPlugin as Plugin
from .pool import Pooler
class Oldest(Plugin):
OldestXidSql = """
select
greatest(max(age(backend_xmin)), max(age(backend_xid)))
from pg_catalog.pg_stat_activity;
"""
OldestXidSql_bootstrap = """
select public.mamonsu_get_oldest_xid();
"""
OldestQuerySql = """
select
extract(epoch from max(now() - xact_start))
from pg_catalog.pg_stat_activity;
"""
OldestQuerySql_bootstrap = """
select public.mamonsu_get_oldest_query();
"""
DEFAULT_CONFIG = {
'max_xid_age': str(5000 * 60 * 60),
'max_query_time': str(5 * 60 * 60)
}
def run(self, zbx):
if Pooler.is_bootstraped() and Pooler.bootstrap_version_greater('2.3.2'):
xid = Pooler.query(self.OldestXidSql_bootstrap)[0][0]
query = Pooler.query(self.OldestQuerySql_bootstrap)[0][0]
else:
xid = Pooler.query(self.OldestXidSql)[0][0]
query = Pooler.query(self.OldestQuerySql)[0][0]
zbx.send('pgsql.oldest[xid_age]', xid)
zbx.send('pgsql.oldest[query_time]', query)
def graphs(self, template):
result = template.graph({
'name': 'PostgreSQL oldest query running time',
'items': [{
'key': 'pgsql.oldest[query_time]',
'color': '00CC00'
}]
})
result += template.graph({
'name': 'PostgreSQL age of oldest xid',
'items': [{
'key': 'pgsql.oldest[xid_age]',
'color': '00CC00'
}]
})
return result
def items(self, template):
return template.item({
'key': 'pgsql.oldest[xid_age]',
'name': 'PostgreSQL: age of oldest xid',
'value_type': Plugin.VALUE_TYPE.numeric_unsigned
}) + template.item({
'key': 'pgsql.oldest[query_time]',
'name': 'PostgreSQL: oldest query running time in sec',
'units': Plugin.UNITS.s
})
def triggers(self, template):
return template.trigger({
'name': 'PostgreSQL oldest xid is too big on {HOSTNAME}',
'expression': '{#TEMPLATE:pgsql.oldest[xid_age]'
'.last()}>' + self.plugin_config('max_xid_age')
}) + template.trigger({
'name': 'PostgreSQL query running is too old on {HOSTNAME}',
'expression': '{#TEMPLATE:pgsql.oldest[query_time]'
'.last()}>' + self.plugin_config('max_query_time')
})
|
the-stack_106_18493
|
import numpy as np
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
# NOTE: Make sure that the class is labeled 'class' in the data file
tpot_data = np.recfromcsv('PATH/TO/DATA/FILE', delimiter='COLUMN_SEPARATOR', dtype=np.float64)
features = np.delete(tpot_data.view(np.float64).reshape(tpot_data.size, -1), tpot_data.dtype.names.index('class'), axis=1)
training_features, testing_features, training_classes, testing_classes = \
train_test_split(features, tpot_data['class'], random_state=42)
exported_pipeline = XGBClassifier(learning_rate=0.01, max_depth=6, min_child_weight=2, n_estimators=100, nthread=1, subsample=0.55)
exported_pipeline.fit(training_features, training_classes)
results = exported_pipeline.predict(testing_features)
|
the-stack_106_18494
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import random
import numpy as np
from functools import reduce
from activators import SigmoidActivator, IdentityActivator
# 全连接层实现类
class FullConnectedLayer(object):
def __init__(self, input_size, output_size,
activator):
'''
构造函数
input_size: 本层输入向量的维度
output_size: 本层输出向量的维度
activator: 激活函数
'''
self.input_size = input_size
self.output_size = output_size
self.activator = activator
# 权重数组W
self.W = np.random.uniform(-0.1, 0.1,
(output_size, input_size))
# 偏置项b
self.b = np.zeros((output_size, 1))
# 输出向量
self.output = np.zeros((output_size, 1))
def forward(self, input_array):
'''
前向计算
input_array: 输入向量,维度必须等于input_size
'''
# 式2
self.input = input_array
self.output = self.activator.forward(
np.dot(self.W, input_array) + self.b)
def backward(self, delta_array):
'''
反向计算W和b的梯度
delta_array: 从上一层传递过来的误差项
'''
# 式8
self.delta = self.activator.backward(self.input) * np.dot(
self.W.T, delta_array)
self.W_grad = np.dot(delta_array, self.input.T)
self.b_grad = delta_array
def update(self, learning_rate):
'''
使用梯度下降算法更新权重
'''
self.W += learning_rate * self.W_grad
self.b += learning_rate * self.b_grad
def dump(self):
print('W: %s\nb:%s' % (self.W, self.b))
# 神经网络类
class Network(object):
def __init__(self, layers):
'''
构造函数
'''
self.layers = []
for i in range(len(layers) - 1):
self.layers.append(
FullConnectedLayer(
layers[i], layers[i+1],
SigmoidActivator()
)
)
def predict(self, sample):
'''
使用神经网络实现预测
sample: 输入样本
'''
output = sample
for layer in self.layers:
layer.forward(output)
output = layer.output
return output
def train(self, labels, data_set, rate, epoch):
'''
训练函数
labels: 样本标签
data_set: 输入样本
rate: 学习速率
epoch: 训练轮数
'''
for i in range(epoch):
for d in range(len(list(data_set))):
self.train_one_sample(labels[d],
data_set[d], rate)
def train_one_sample(self, label, sample, rate):
self.predict(sample)
self.calc_gradient(label)
self.update_weight(rate)
def calc_gradient(self, label):
delta = self.layers[-1].activator.backward(
self.layers[-1].output
) * (label - self.layers[-1].output)
for layer in self.layers[::-1]:
layer.backward(delta)
delta = layer.delta
return delta
def update_weight(self, rate):
for layer in self.layers:
layer.update(rate)
def dump(self):
for layer in self.layers:
layer.dump()
def loss(self, output, label):
return 0.5 * ((label - output) * (label - output)).sum()
def gradient_check(self, sample_feature, sample_label):
'''
梯度检查
network: 神经网络对象
sample_feature: 样本的特征
sample_label: 样本的标签
'''
# 获取网络在当前样本下每个连接的梯度
self.predict(sample_feature)
self.calc_gradient(sample_label)
# 检查梯度
epsilon = 10e-4
for fc in self.layers:
for i in range(fc.W.shape[0]):
for j in range(fc.W.shape[1]):
fc.W[i,j] += epsilon
output = self.predict(sample_feature)
err1 = self.loss(sample_label, output)
fc.W[i,j] -= 2*epsilon
output = self.predict(sample_feature)
err2 = self.loss(sample_label, output)
expect_grad = (err1 - err2) / (2 * epsilon)
fc.W[i,j] += epsilon
print('weights(%d,%d): expected - actural %.4e - %.4e' % (
i, j, expect_grad, fc.W_grad[i,j]))
from bp import train_data_set
def transpose(args):
return map(
lambda arg: map(
lambda line: np.array(line).reshape(len(line), 1)
, arg)
, args
)
class Normalizer(object):
def __init__(self):
self.mask = [
0x1, 0x2, 0x4, 0x8, 0x10, 0x20, 0x40, 0x80
]
def norm(self, number):
data = list(map(lambda m: 0.9 if number & m else 0.1, self.mask))
return np.array(data).reshape(8, 1)
def denorm(self, vec):
binary = list(map(lambda i: 1 if i > 0.5 else 0, vec[:,0]))
for i in range(len(self.mask)):
binary[i] = binary[i] * self.mask[i]
return reduce(lambda x,y: x + y, binary)
def train_data_set():
normalizer = Normalizer()
data_set = []
labels = []
for i in range(0, 256):
n = normalizer.norm(i)
data_set.append(n)
labels.append(n)
return labels, data_set
def correct_ratio(network):
normalizer = Normalizer()
correct = 0.0;
for i in range(256):
if normalizer.denorm(network.predict(normalizer.norm(i))) == i:
correct += 1.0
print('correct_ratio: %.2f%%' % (correct / 256 * 100))
def test():
labels, data_set = list(transpose(train_data_set()))
labels=list(labels)
data_set=list(data_set)
net = Network([8, 3, 8])
rate = 0.5
mini_batch = 20
epoch = 10
for i in range(epoch):
net.train(labels, list(data_set), rate, mini_batch)
print('after epoch %d loss: %f' % (
(i + 1),
net.loss(labels[-1], net.predict(data_set[-1]))
))
rate /= 2
correct_ratio(net)
def gradient_check():
'''
梯度检查
'''
labels, data_set = transpose(train_data_set())
net = Network([8, 3, 8])
net.gradient_check(data_set[0], labels[0])
return net
|
the-stack_106_18495
|
from scipy.optimize import fsolve
from matplotlib import cm, rcParams
from shapely import geometry
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import math, csv, os
""" ToDo : check if this is equivalent to the G-function for weak coupling """
c = ['#aa3863', '#d97020', '#ef9f07', '#449775', '#3b7d86']
rcParams.update({'figure.autolayout': True})
# equation of u1 that must be verified in the system with phi != 0, 1
def u1(phi, T, gamma, beta, I) :
if isinstance(phi, np.ndarray) :
res = (np.exp((1-phi)*T)*(2-2*I) + 2*I)/(1 + np.exp(-2*gamma*(1-phi)*T)) - gamma*beta
else :
res = (math.exp((1-phi)*T)*(2-2*I) + 2*I)/(1 + math.exp(-2*gamma*(1-phi)*T)) - gamma*beta
return res
def u(I, phi, T, gamma, beta) :
res = (np.exp((1-phi)*T)*(2-2*I) + 2*I)/(1 + np.exp(-2*gamma*(1-phi)*T)) - 1
return res
# equation of u1 that must be verified in the system with phi != 0, 1
def u2(phi, T, gamma, beta, I) :
if isinstance(phi, np.ndarray) :
res = (np.exp(phi*T)*(2-2*I) + 2*I)/(1 + np.exp(-2*gamma*phi*T)) - gamma*beta
else :
res = (math.exp(phi*T)*(2-2*I) + 2*I)/(1 + math.exp(-2*gamma*phi*T)) - gamma*beta
return res
def v(I, phi, T, gamma, beta) :
res = (np.exp(phi*T)*(2-2*I) + 2*I)/(1 + np.exp(-2*gamma*phi*T)) -1
return res
# next theoretical value of u2
def theo_u2(phi, T, gamma, beta, I) :
return math.exp(-T+phi*T)*((u1(phi, T, gamma, beta, I)+gamma*beta)/2 - I - math.exp(-2*gamma*(T-phi*T))*(u1(phi, T, gamma, beta, I)+gamma*beta)/2) + I
def if_eq(t, phi, Tm1, gamma, beta, I) :
return math.exp(-t)*((theo_u2(phi, Tm1, gamma, beta, I)+gamma*beta)/2 - I + math.exp(-2*gamma*t)*(theo_u2(phi, Tm1, gamma, beta, I)+gamma*beta)/2) + I - 1
# next theoretical value of u1
def theo_u1(phi, Tm1, gamma, beta, I) :
T = fsolve(if_eq, 1, args=(phi, Tm1, gamma, beta, I))
return math.exp(-T)*((theo_u2(phi, Tm1, gamma, beta, I)+gamma*beta)/2 - I - math.exp(-2*gamma*T)*(theo_u2(phi, Tm1, gamma, beta, I)+gamma*beta)/2) + I
def F_function(phi, T, gamma, beta, I) :
return -math.exp(-(1+2*gamma)*phi*T)*u2(phi,T, gamma, beta, I) - u1(phi,T, gamma, beta, I) +1 - math.exp(-(1+2*gamma)*phi*T)*gamma*beta
def tosolve(i, phi, T, gamma, beta, I) :
return [F_function(i[0], i[1], gamma, beta, I), theo_u2(phi, T, gamma, beta, I) - u2(i[0], i[1], gamma, beta, I)]
def F(T, phi, I) :
if isinstance(phi, np.ndarray) :
res = -np.exp(-(1+2*gamma)*phi*T)*u2(phi,T, gamma, beta, I) - u1(phi,T, gamma, beta, I) +1 - np.exp(-(1+2*gamma)*phi*T)*gamma*beta
else :
res = -math.exp(-(1+2*gamma)*phi*T)*u2(phi,T, gamma, beta, I) - u1(phi,T, gamma, beta, I) +1 - math.exp(-(1+2*gamma)*phi*T)*gamma*beta
return res
min_I = 2
with open('gamma_0.1.dat', newline='') as file:
datareader = csv.reader(file, delimiter=' ')
for row in datareader:
if float(row[1]) == 0.5 and float(row[0]) < min_I :
min_I = float(row[0])
# Only makes sense if I > I_low i.e. when forks stop existing (see bifurcation diagram code or figures) !!!
nb_values = 201
currents = np.linspace(min_I, 2, 201) # currents I, 501
phi = np.linspace(0, 1, nb_values) # phis
gamma = 0.1
beta = 0.2
diffs = []
cycles = []
#epsilon = 10**(-2)
for I in currents :
#T = np.zeros(nb_values)
#for i in range(nb_values) :
#for j in np.linspace(0, 10, 1000) :
#if F(j, phi[i], I) > -epsilon and F(j, phi[i], I) < epsilon :
#T[i] = j
#break
T = fsolve(F, np.ones(nb_values), args=(phi, I))
T[T < 0] = T[np.where(T > 0)[0][0]]
T[T < 0] = 0
#if I < 1.1 :
#T[T == 1] = 0
diff = []
nb_cycles = []
print(I)
print('---')
for k in range(len(phi)):
if u1(phi[k], T[k], gamma, beta, I) + gamma*beta >= 1 or u2(phi[k], T[k], gamma, beta, I) + gamma*beta >= 1 :
nb_cycles.append(np.nan)
elif theo_u1(phi[k], T[k], gamma, beta, I) + gamma*beta >= 1 or theo_u2(phi[k], T[k], gamma, beta, I) + gamma*beta >= 1 or phi[k] == 0 or phi[k] == 1:
if phi[k] <= 0.5 :
nb_cycles.append(T[k])
elif phi[k] > 0.5 :
nb_cycles.append(T[k])
else :
[phi_nxt, T_nxt] = fsolve(tosolve, [phi[k], T[k]], args=(phi[k], T[k], gamma, beta, I))
if (phi[k] < 0.5 and phi_nxt-phi[k] < 0) or (phi[k] > 0.5 and phi_nxt-phi[k] > 0) :
nb_cycles.append(0)
phi_curr, T_curr = phi[k], T[k]
#print('---')
while phi_nxt >= 0 and phi_nxt <= 1 and phi_nxt != phi_curr:
#print("phi curr", phi_curr, T_curr)
#print("phi nxt", phi_nxt, T_nxt)
if phi_nxt >= 0 and phi_nxt <= 1 :
nb_cycles[-1] += T[k]
phi_curr, T_curr = phi_nxt, T_nxt
try:
[phi_nxt, T_nxt] = fsolve(tosolve, [phi_curr, T_curr], args=(phi_curr, T_curr, gamma, beta, I))
except OverflowError:
nb_cycles[-1] += T[k]
break
else :
nb_cycles.append(np.nan)
cycles.append(nb_cycles)
cycles = np.array(cycles)
with np.printoptions(threshold=np.inf):
print('nb_cycles ', nb_cycles)
print('cycles ', cycles)
print('phi ', phi)
print('currents ', currents)
# clev = np.arange(diffs.min(),diffs.max(),.001)
#plt.plot(phi, nb_cycles)
h = plt.contourf(currents, phi, np.transpose(cycles), cmap="viridis")
plt.show()
np.savez('mesh_cycles.npz', I = currents, phi = phi, cycles = np.transpose(cycles))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.