hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
794737c2f1e4f82e7101097d981eb921626ef01a | 7,886 | py | Python | dev/breeze/src/airflow_breeze/build_image/prod/build_prod_image.py | robertoea/airflow | e0389c0d476ebf24ca795962e087f5c678c6c416 | [
"Apache-2.0"
] | null | null | null | dev/breeze/src/airflow_breeze/build_image/prod/build_prod_image.py | robertoea/airflow | e0389c0d476ebf24ca795962e087f5c678c6c416 | [
"Apache-2.0"
] | null | null | null | dev/breeze/src/airflow_breeze/build_image/prod/build_prod_image.py | robertoea/airflow | e0389c0d476ebf24ca795962e087f5c678c6c416 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Command to build PROD image."""
import contextlib
import sys
from typing import Dict
from airflow_breeze.build_image.prod.build_prod_params import BuildProdParams
from airflow_breeze.utils.cache import synchronize_parameters_with_cache
from airflow_breeze.utils.console import console
from airflow_breeze.utils.docker_command_utils import (
construct_docker_build_command,
construct_empty_docker_build_command,
tag_and_push_image,
)
from airflow_breeze.utils.path_utils import AIRFLOW_SOURCES_ROOT, DOCKER_CONTEXT_DIR
from airflow_breeze.utils.registry import login_to_docker_registry
from airflow_breeze.utils.run_utils import filter_out_none, fix_group_permissions, run_command
REQUIRED_PROD_IMAGE_ARGS = [
"python_base_image",
"install_mysql_client",
"install_mssql_client",
"install_postgres_client",
"airflow_version",
"airflow_branch",
"airflow_extras",
"airflow_pre_cached_pip_packages",
"docker_context_files",
"additional_airflow_extras",
"additional_python_deps",
"additional_dev_apt_command",
"additional_dev_apt_deps",
"additional_dev_apt_env",
"additional_runtime_apt_command",
"additional_runtime_apt_deps",
"additional_runtime_apt_env",
"upgrade_to_newer_dependencies",
"constraints_github_repository",
"airflow_constraints",
"airflow_image_repository",
"airflow_image_date_created",
"build_id",
"airflow_image_readme_url",
"install_providers_from_sources",
"install_from_pypi",
"install_from_docker_context_files",
]
OPTIONAL_PROD_IMAGE_ARGS = [
"dev_apt_command",
"dev_apt_deps",
"runtime_apt_command",
"runtime_apt_deps",
]
def clean_docker_context_files():
"""
Cleans up docker context files folder - leaving only README.md there.
"""
with contextlib.suppress(FileNotFoundError):
context_files_to_delete = DOCKER_CONTEXT_DIR.glob('**/*')
for file_to_delete in context_files_to_delete:
if file_to_delete.name != 'README.md':
file_to_delete.unlink()
def check_docker_context_files(install_from_docker_context_files: bool):
"""
Sanity check - if we want to install from docker-context-files we expect some packages there but if
we don't - we don't expect them, and they might invalidate Docker cache.
This method exits with an error if what we see is unexpected for given operation.
:param install_from_docker_context_files: whether we want to install from docker-context-files
"""
context_file = DOCKER_CONTEXT_DIR.glob('**/*')
number_of_context_files = len(
[context for context in context_file if context.is_file() and context.name != 'README.md']
)
if number_of_context_files == 0:
if install_from_docker_context_files:
console.print('[bright_yellow]\nERROR! You want to install packages from docker-context-files')
console.print('[bright_yellow]\n but there are no packages to install in this folder.')
sys.exit(1)
else:
if not install_from_docker_context_files:
console.print(
'[bright_yellow]\n ERROR! There are some extra files in docker-context-files except README.md'
)
console.print('[bright_yellow]\nAnd you did not choose --install-from-docker-context-files flag')
console.print(
'[bright_yellow]\nThis might result in unnecessary cache invalidation and long build times'
)
console.print(
'[bright_yellow]\nExiting now \
- please restart the command with --cleanup-docker-context-files switch'
)
sys.exit(1)
def get_prod_image_build_params(parameters_passed: Dict[str, str]) -> BuildProdParams:
"""
Converts parameters received as dict into BuildProdParams. In case cacheable
parameters are missing, it reads the last used value for that parameter
from the cache and if it is not found, it uses default value for that parameter.
This method updates cached based on parameters passed via Dict.
:param parameters_passed: parameters to use when constructing BuildCiParams
"""
prod_image_params = BuildProdParams(**parameters_passed)
synchronize_parameters_with_cache(prod_image_params, parameters_passed)
return prod_image_params
def build_production_image(verbose: bool, dry_run: bool, **kwargs):
"""
Builds PROD image:
* fixes group permissions for files (to improve caching when umask is 002)
* converts all the parameters received via kwargs into BuildProdParams (including cache)
* prints info about the image to build
* removes docker-context-files if requested
* performs sanity check if the files are present in docker-context-files if expected
* logs int to docker registry on CI if build cache is being executed
* removes "tag" for previously build image so that inline cache uses only remote image
* constructs docker-compose command to run based on parameters passed
* run the build command
* update cached information that the build completed and saves checksums of all files
for quick future check if the build is needed
:param verbose: print commands when running
:param dry_run: do not execute "write" commands - just print what would happen
:param kwargs: arguments passed from the command
"""
fix_group_permissions()
parameters_passed = filter_out_none(**kwargs)
prod_image_params = get_prod_image_build_params(parameters_passed)
prod_image_params.print_info()
if prod_image_params.cleanup_docker_context_files:
clean_docker_context_files()
check_docker_context_files(prod_image_params.install_docker_context_files)
if prod_image_params.prepare_buildx_cache:
login_to_docker_registry(prod_image_params)
run_command(
["docker", "rmi", "--no-prune", "--force", prod_image_params.airflow_image_name],
verbose=verbose,
dry_run=dry_run,
cwd=AIRFLOW_SOURCES_ROOT,
text=True,
check=False,
)
console.print(f"\n[blue]Building PROD Image for Python {prod_image_params.python}\n")
if prod_image_params.empty_image:
console.print(f"\n[blue]Building empty PROD Image for Python {prod_image_params.python}\n")
cmd = construct_empty_docker_build_command(image_params=prod_image_params)
run_command(
cmd, input="FROM scratch\n", verbose=verbose, dry_run=dry_run, cwd=AIRFLOW_SOURCES_ROOT, text=True
)
else:
cmd = construct_docker_build_command(
image_params=prod_image_params,
verbose=verbose,
required_args=REQUIRED_PROD_IMAGE_ARGS,
optional_args=OPTIONAL_PROD_IMAGE_ARGS,
production_image=True,
)
run_command(cmd, verbose=verbose, dry_run=dry_run, cwd=AIRFLOW_SOURCES_ROOT, text=True)
if prod_image_params.push_image:
tag_and_push_image(image_params=prod_image_params, dry_run=dry_run, verbose=verbose)
| 42.171123 | 110 | 0.731423 |
794738ea585b5a400bc7bbf40914ef2fd1949e3c | 3,000 | py | Python | scripts/tool_shed/show_tool_dependency_installation_dir_contents.py | tsungjui/fusionline | 26d5d41e82ac83822ba41df1cd14c54afa112655 | [
"CC-BY-3.0"
] | 1 | 2019-11-03T11:45:43.000Z | 2019-11-03T11:45:43.000Z | scripts/tool_shed/show_tool_dependency_installation_dir_contents.py | tsungjui/fusionline | 26d5d41e82ac83822ba41df1cd14c54afa112655 | [
"CC-BY-3.0"
] | 4 | 2017-05-24T19:36:34.000Z | 2019-08-23T02:49:18.000Z | scripts/tool_shed/show_tool_dependency_installation_dir_contents.py | abretaud/galaxy | 1ad89511540e6800cd2d0da5d878c1c77d8ccfe9 | [
"CC-BY-3.0"
] | null | null | null | import argparse
import os
import sys
sys.path.insert(1, os.path.join( os.path.dirname( __file__ ), os.pardir, os.pardir, 'lib' ) )
import galaxy.config as galaxy_config
import galaxy.model
import galaxy.model.tool_shed_install.mapping as install_mapper
class CleanUpDependencyApplication( object ):
"""Application that enables querying the database using the tool_shed_install model."""
def __init__( self, config ):
self.config = config
# Setup the database engine and ORM
self.model = install_mapper.init( self.config.database_connection, engine_options={}, create_tables=False )
@property
def sa_session( self ):
"""Returns a SQLAlchemy session."""
return self.model.context.current
def shutdown( self ):
pass
def main( args, app ):
if not os.path.exists( args.basepath ):
print 'Tool dependency base path %s does not exist.' % str( args.basepath )
return
print 'Checking tool dependency path %s' % args.basepath
tool_dependency_dirs = get_tool_dependency_dirs( app )
for tool_dependency_dir in tool_dependency_dirs:
path = os.path.join( args.basepath, tool_dependency_dir )
if os.path.exists( path ):
path_contents = os.listdir( path )
if len( path_contents ) > 0:
print 'Found non-empty tool dependency installation directory %s.' % path
print 'Directory has the following contents: \n %s' % '\n '.join( path_contents )
def get_tool_dependency_dirs( app ):
dependency_paths = []
for tool_dependency in app.sa_session.query( galaxy.model.tool_shed_install.ToolDependency ).all():
dependency_paths.append( tool_dependency.installation_directory( app ) )
return dependency_paths
if __name__ == '__main__':
description = 'Clean out or list the contents any tool dependency directory under the provided'
description += 'tool dependency path. Remove any non-empty directories found if the '
description += '--delete command line argument is provided.'
parser = argparse.ArgumentParser( description=description )
parser.add_argument( '--basepath',
dest='basepath',
required=True,
action='store',
metavar='name',
help='The base path where tool dependencies are installed.' )
parser.add_argument( '--dburi',
dest='dburi',
required=True,
action='store',
metavar='dburi',
help='The database URI to connect to.' )
args = parser.parse_args()
database_connection = args.dburi
config_dict = dict( database_connection=database_connection, tool_dependency_dir=args.basepath )
config = galaxy_config.Configuration( **config_dict )
app = CleanUpDependencyApplication( config )
sys.exit( main( args, app ) )
| 40.540541 | 115 | 0.647667 |
7947390bf4953b970f2f05aec02dce3c55df7142 | 5,061 | py | Python | src/podcastClasses.py | jlhourENSAE/podcast-app | 9d92165f79aed2b87702d30a1ae0cc9d94059d37 | [
"MIT"
] | null | null | null | src/podcastClasses.py | jlhourENSAE/podcast-app | 9d92165f79aed2b87702d30a1ae0cc9d94059d37 | [
"MIT"
] | 3 | 2021-04-11T19:23:45.000Z | 2021-04-18T19:04:05.000Z | src/podcastClasses.py | jlhourENSAE/podcast-app | 9d92165f79aed2b87702d30a1ae0cc9d94059d37 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Define the podcast objects
Created on Sat Apr 10 21:10:06 2021
@author: jeremylhour
"""
import yaml
import feedparser
import time
from datetime import datetime
from tinydb import Query
from src.database import getDataBase
# --------------------------
# Object classes definitions
# --------------------------
class Podcast():
"""
Podcast:
Podcast object, created from url of the RSS feed
"""
def __init__(self, url):
"""
init the object and directly parse the url
@param url (str): url for the RSS feed of the podcast
"""
self.url = url
self.feed = feedparser.parse(self.url)
self.title = self.feed.feed.get('title')
def getLastEpisode(self):
"""
getLastEpisode:
A generator to get the last episode, goes from most recent to older.
Queries the latest timestamp from the database.
"""
for feedEntry in self.feed.entries:
publishedDate = datetime.fromtimestamp(time.mktime(feedEntry.published_parsed))
podcastEpisode = Episode(
podcastName=self.title,
title=feedEntry['title'],
date=publishedDate,
summary=feedEntry['summary'],
audioUrl=_extractAudioUrl(feedEntry)
)
podcastEpisode.loadFromDataBase()
yield podcastEpisode
class Episode():
"""
Episode:
A class for episodes from a podcast
"""
def __init__(self, podcastName, title, date, summary, audioUrl):
"""
@param podcastName (str): title of the podcast
@param title (str): title of the episode
@param date (datetime.datetime): date of publication
@param summary (str): summary of the podcast
@param audiorUrl (str): url to the audio file
"""
self.podcastName = podcastName
self.title = title
self.date = date
self.summary = summary
self.audioUrl = audioUrl
self.timestamp = 0
def displayInfos(self):
"""
displayInfos:
print episode infos to the screen
"""
print('\n')
print(f'Title : \n {self.title}')
print(f'Date : \n {self.date.strftime("%d %b %Y, %H:%M")}')
print(f'Summary : \n {self.summary} \n')
db = getDataBase()
User = Query()
result = db.get(User.audioUrl == self.audioUrl)
if result is None:
print(">> This is a new episode.")
def toDict(self):
"""
toDict:
passes arg to dict, so it can be saved to database
"""
dico = {
'podcastName': self.podcastName,
'title': self.title,
'date': self.date.strftime("%d/%m/%Y, %H:%M:%S"),
'audioUrl': self.audioUrl,
'timestamp': self.timestamp
}
return dico
def loadFromDataBase(self):
"""
loadFromDataBase:
check if the episode already exists in the database,
and if so, loads the correct timestamp.
"""
db = getDataBase()
User = Query()
result = db.get(User.audioUrl == self.audioUrl)
if result is not None:
self.updateTimestamp(result['timestamp'])
return None
def saveToDataBase(self):
"""
saveToDataBase:
save the current episode to database
"""
db = getDataBase()
dico = self.toDict()
User = Query()
db.upsert(dico, User.audioUrl == self.audioUrl) # audioUrl is the key of the database
def updateTimestamp(self, newTimestamp):
"""
updateTimestamp:
update the Timestamp when the podcast stops playing
@param newTimestamp (int): new starting time
"""
self.timestamp = newTimestamp
def resetTimestamp(self):
"""
resetTimestamp:
reset the timestamp
"""
self.timestamp = 0
# -------
# Utils
# -------
def _extractAudioUrl(feedEntry):
"""
extractAudioUrl:
extract the url of the audio episode from the feed entry
@param feedEntry (dict): a dict that might have an url linking to an audio
"""
for link in feedEntry['links']:
if 'audio' in link['type']:
audioLink = link['href']
return audioLink
if __name__=='__main__':
print("="*80)
print("THIS IS A TEST")
print("="*80)
config_file = 'subscriptions.yml'
with open(config_file, 'r') as stream:
config = yaml.safe_load(stream)
url = config['subscriptions']['Flagrant 2']
podcast = Podcast(url)
print(f'The current podcast is: {podcast.title}')
history = podcast.getLastEpisode()
print('Here is the most recent episode')
newEpisode = next(history)
newEpisode.displayInfos()
print('Here is the second most recent episode')
newEpisode2 = next(history)
newEpisode2.displayInfos() | 27.356757 | 93 | 0.571429 |
79473978313615c383a995dadf7b25fe16e7ddac | 5,072 | py | Python | extractor.py | jan-gerling/code2vec | 504ad6772f681c09cfe4864624c3553d0ee26dc4 | [
"MIT"
] | null | null | null | extractor.py | jan-gerling/code2vec | 504ad6772f681c09cfe4864624c3553d0ee26dc4 | [
"MIT"
] | null | null | null | extractor.py | jan-gerling/code2vec | 504ad6772f681c09cfe4864624c3553d0ee26dc4 | [
"MIT"
] | null | null | null | import os
import time
from py4j.java_gateway import JavaGateway, GatewayParameters
class Extractor:
def __init__(self, config, jar_path, max_path_length, max_path_width):
self.config = config
self.max_path_length = max_path_length
self.max_path_width = max_path_width
self.jar_path = jar_path
def extract_processed(self, out, err, hash_to_string_dict, result):
output = out.splitlines()
if len(output) == 0:
raise ValueError(err)
for i, line in enumerate(output):
parts = line.rstrip().split(' ')
method_name = parts[0]
current_result_line_parts = [method_name]
contexts = parts[1:]
for context in contexts[:self.config.MAX_CONTEXTS]:
context_parts = context.split(',')
context_word1 = context_parts[0]
context_path = context_parts[1]
context_word2 = context_parts[2]
hashed_path = str(self.java_string_hashcode(context_path))
hash_to_string_dict[hashed_path] = context_path
current_result_line_parts += ['%s,%s,%s' % (context_word1, hashed_path, context_word2)]
space_padding = ' ' * (self.config.MAX_CONTEXTS - len(contexts))
result_line = ' '.join(current_result_line_parts) + space_padding
result.append(result_line)
return result, hash_to_string_dict
def extract_java(self, path, hash_to_string_dict, result):
gateway = JavaGateway(gateway_parameters=GatewayParameters(port=25335))
javaextractor = gateway.entry_point
f = open(path, "r", encoding="utf8")
code = f.read()
f.close()
out = javaextractor.extractCode(self.max_path_length, self.max_path_width, code)
return self.extract_processed(str(out), "", hash_to_string_dict, result)
def validateInput(self, path):
failingFiles = []
for (dirpath, dirnames, filenames) in os.walk(path):
print("Validating input at:", dirpath)
for filename in filenames:
filepath = os.path.normpath(dirpath + '/' + filename)
if os.path.isfile(filepath):
currentResult = True
gateway = JavaGateway(gateway_parameters=GatewayParameters(port=25335))
syntaxChecker = gateway.entry_point
f = open(filepath, "r", encoding="utf8")
currentFile = True
while currentFile:
line1 = f.readline()
line2 = f.readline()
currentFile = line2 and line1
if len(line1) > 1 and len(line2) > 1:
if not syntaxChecker.validSyntax(line1 + line2):
currentResult = False
gateway.close()
f.close()
if not currentResult:
failingFiles.append(filename)
if len(failingFiles) > 0:
print("Input validation failed for:", failingFiles)
return len(failingFiles) == 0;
def extract_paths(self, inputType, path):
if inputType == '--dir' and self.validateInput(path):
result = []
hash_to_string_dict = {}
for (dirpath, dirnames, filenames) in os.walk(path):
# print("Processing all java files at", dirpath, '.')
for filename in filenames:
startTime = time.time()
filepath = os.path.normpath(dirpath + '/' + filename)
if os.path.isfile(filepath):
result, hash_to_string_dict = self.extract_java(dirpath + '/' + filename, hash_to_string_dict, result)
endTime = time.time()
executionTime = endTime - startTime
# print("Processing", filename, 'at', dirpath, 'took', round(executionTime, 3), 'seconds.')
# else:
# print("Incorrect filepath:", filepath)
# print("Processed all java files at", dirpath, '.')
return result, hash_to_string_dict
elif inputType == '--file':
return self.extract_java(path, {}, [])
elif inputType == '--processed':
print("Read processed java code from:", path)
f = open(path, "r", encoding="utf8")
out = f.read()
f.close()
return self.extract_processed(out, "", {}, [])
else:
raise ValueError("Invalid input with: ", inputType, "at", path)
@staticmethod
def java_string_hashcode(s):
"""
Imitating Java's String#hashCode, because the model is trained on hashed paths but we wish to
Present the path attention on un-hashed paths.
"""
h = 0
for c in s:
h = (31 * h + ord(c)) & 0xFFFFFFFF
return ((h + 0x80000000) & 0xFFFFFFFF) - 0x80000000
| 44.491228 | 126 | 0.558951 |
794739d7b1a17f885cf767d02cf06645ed163c20 | 7,987 | py | Python | docs/conf.py | mirzazulfan/nsupdate.info | fdd12e8f47d084969e23517fce4b8efa3212dd9e | [
"BSD-3-Clause"
] | 774 | 2015-01-01T23:24:50.000Z | 2022-03-29T01:40:41.000Z | docs/conf.py | mirzazulfan/nsupdate.info | fdd12e8f47d084969e23517fce4b8efa3212dd9e | [
"BSD-3-Clause"
] | 272 | 2015-01-02T12:23:41.000Z | 2022-02-21T14:18:11.000Z | docs/conf.py | mirzazulfan/nsupdate.info | fdd12e8f47d084969e23517fce4b8efa3212dd9e | [
"BSD-3-Clause"
] | 100 | 2015-03-05T15:11:09.000Z | 2022-03-09T18:39:39.000Z | # -*- coding: utf-8 -*-
#
# nsupdate.info documentation build configuration file, created by
# sphinx-quickstart on Sat Sep 28 02:21:52 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'nsupdate.info'
copyright = u'2013-2018, The nsupdate.info Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
from nsupdate import version as nsupdate_version
# The short X.Y version.
version = '%d.%d' % nsupdate_version[:2]
# The full version, including alpha/beta/rc tags.
release = str(nsupdate_version)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%Y-%m-%d'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%Y-%m-%d'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'nsupdateinfodoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'nsupdateinfo.tex', u'nsupdate.info Documentation',
u'nsupdate.info team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'nsupdateinfo', u'nsupdate.info Documentation',
[u'nsupdate.info team'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'nsupdateinfo', u'nsupdate.info Documentation',
u'nsupdate.info team', 'nsupdateinfo', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| 32.6 | 100 | 0.718042 |
79473a043b362f23e6ca4da702b57ea2448341fd | 663 | py | Python | adv/xander.py | Zeiin/dl | bce5e239dc751baa9266aa5adbe7c8d078d8a9ac | [
"Apache-2.0"
] | null | null | null | adv/xander.py | Zeiin/dl | bce5e239dc751baa9266aa5adbe7c8d078d8a9ac | [
"Apache-2.0"
] | null | null | null | adv/xander.py | Zeiin/dl | bce5e239dc751baa9266aa5adbe7c8d078d8a9ac | [
"Apache-2.0"
] | null | null | null | from core.advbase import *
from slot.a import *
def module():
return Xander
class Xander(Adv):
comment = 'c2+fs'
a3 = ('fs',0.50)
conf = {}
conf['slots.a'] = TSO()+Primal_Crisis()
conf['slots.frostbite.a'] = conf['slots.a']
conf['acl'] = """
`dragon.act('c3 s end')
`s3, not self.s3_buff
`s1, fsc
`s2, fsc
`fs, x=2
"""
coab = ['Blade', 'Yurius', 'Dagger']
def s1_proc(self, e):
self.dmg_make(f'o_{e.name}_boost',self.conf[f'{e.name}.dmg']*0.05*self.buffcount)
if __name__ == '__main__':
from core.simulate import test_with_argv
test_with_argv(None, *sys.argv)
| 21.387097 | 89 | 0.564103 |
79473c134741fae3ac0e792d8acc9fd9c32902d7 | 2,691 | py | Python | PlotlyandPython/Lessons/(06) Barcharts/Notebooks/Python Scripts/Barcharts (01) - What is a barchart.py | peternewman22/Python_Courses | 07a798b6f264fc6069eb1205c9d429f00fb54bc5 | [
"MIT"
] | null | null | null | PlotlyandPython/Lessons/(06) Barcharts/Notebooks/Python Scripts/Barcharts (01) - What is a barchart.py | peternewman22/Python_Courses | 07a798b6f264fc6069eb1205c9d429f00fb54bc5 | [
"MIT"
] | null | null | null | PlotlyandPython/Lessons/(06) Barcharts/Notebooks/Python Scripts/Barcharts (01) - What is a barchart.py | peternewman22/Python_Courses | 07a798b6f264fc6069eb1205c9d429f00fb54bc5 | [
"MIT"
] | null | null | null |
# coding: utf-8
# # Barcharts (1) - What is a barchart?
#
# In this section we're going to learn how to create barcharts with Plotly. A barchart is used to display categorical data, with the size of the bar representing the quantity (or sometimes proportion) in that particular category. Barcharts make it easy to compare different categories because we can easily assess which bars are longer than others.
#
# Barcharts can be horizontal or vertical. The example below from Plotly user <a href="https://plot.ly/~elliotk">elliotk</a> shows the reasons people reject an employment offer:
# In[7]:
import plotly.plotly as py
import plotly.offline as pyo
pyo.init_notebook_mode()
pyo.iplot(py.get_figure("elliotk", 21))
py.image.save_as(py.get_figure("elliotk", 21), r"C:\Users\Rytch\Google Drive\Financial\Passive Income\Online courses\Plotly\Course Content\Lessons\(06) Barcharts\Notebooks\images\Barcharts (01) - What is a barchart\pyo.iplot-0.png")
#
#
# ### Stacked bar charts
#
# We can also use bar charts to show more complex categorical data, by stacking the bars, as in this example from the Higher Education Funding Council for England which shows the proportion of staff by gender and job type:
# In[6]:
pyo.iplot(py.get_figure("hefceplots", 33))
py.image.save_as(py.get_figure("hefceplots", 33), r"C:\Users\Rytch\Google Drive\Financial\Passive Income\Online courses\Plotly\Course Content\Lessons\(06) Barcharts\Notebooks\images\Barcharts (01) - What is a barchart\pyo.iplot-1.png")
#
# ### Grouped bar charts
# Bar charts can also be grouped, as in this example from the ONS which shows people's feelings about financial debt:
# <img src="http://visual.ons.gov.uk/wp-content/uploads/2016/01/Debt-Week_FINAL-051.png"/>
#
# It's also conceptually possible to create a stacked, grouped bar chart. However, at the time of writing, Plotly does not have this functionality.
#
#
# ### What will I learn in this section?
#
# In this section you'll learn how to create vertical and horizontal bar charts as well as how to create stacked and grouped bar charts, as well as stacked proportional bar charts. You'll also learn about the different styling options available for bar charts. You'll also find out how to combine different types of traces on the same plot; we'll combine a Bar Chart with a Scatterplot. Finally, we'll use the Plotly <code>make_subplots()</code> function to create our first dashboard. Throughout this section we'll be using data from NASA on the number of meteroites that have been found across the world.
# If you have any questions, please ask in the comments section or email <a href="mailto:[email protected]">[email protected]</a>
| 58.5 | 606 | 0.765515 |
79473c6409a3ff5078c77ace43099bb2c09fd454 | 23,355 | py | Python | sdk/python/pulumi_azure_native/compute/v20210301/virtual_machine_scale_set_vm_extension.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/compute/v20210301/virtual_machine_scale_set_vm_extension.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/compute/v20210301/virtual_machine_scale_set_vm_extension.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['VirtualMachineScaleSetVMExtensionArgs', 'VirtualMachineScaleSetVMExtension']
@pulumi.input_type
class VirtualMachineScaleSetVMExtensionArgs:
def __init__(__self__, *,
instance_id: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
vm_scale_set_name: pulumi.Input[str],
auto_upgrade_minor_version: Optional[pulumi.Input[bool]] = None,
enable_automatic_upgrade: Optional[pulumi.Input[bool]] = None,
force_update_tag: Optional[pulumi.Input[str]] = None,
instance_view: Optional[pulumi.Input['VirtualMachineExtensionInstanceViewArgs']] = None,
protected_settings: Optional[Any] = None,
publisher: Optional[pulumi.Input[str]] = None,
settings: Optional[Any] = None,
type: Optional[pulumi.Input[str]] = None,
type_handler_version: Optional[pulumi.Input[str]] = None,
vm_extension_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a VirtualMachineScaleSetVMExtension resource.
:param pulumi.Input[str] instance_id: The instance ID of the virtual machine.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] vm_scale_set_name: The name of the VM scale set.
:param pulumi.Input[bool] auto_upgrade_minor_version: Indicates whether the extension should use a newer minor version if one is available at deployment time. Once deployed, however, the extension will not upgrade minor versions unless redeployed, even with this property set to true.
:param pulumi.Input[bool] enable_automatic_upgrade: Indicates whether the extension should be automatically upgraded by the platform if there is a newer version of the extension available.
:param pulumi.Input[str] force_update_tag: How the extension handler should be forced to update even if the extension configuration has not changed.
:param pulumi.Input['VirtualMachineExtensionInstanceViewArgs'] instance_view: The virtual machine extension instance view.
:param Any protected_settings: The extension can contain either protectedSettings or protectedSettingsFromKeyVault or no protected settings at all.
:param pulumi.Input[str] publisher: The name of the extension handler publisher.
:param Any settings: Json formatted public settings for the extension.
:param pulumi.Input[str] type: Specifies the type of the extension; an example is "CustomScriptExtension".
:param pulumi.Input[str] type_handler_version: Specifies the version of the script handler.
:param pulumi.Input[str] vm_extension_name: The name of the virtual machine extension.
"""
pulumi.set(__self__, "instance_id", instance_id)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "vm_scale_set_name", vm_scale_set_name)
if auto_upgrade_minor_version is not None:
pulumi.set(__self__, "auto_upgrade_minor_version", auto_upgrade_minor_version)
if enable_automatic_upgrade is not None:
pulumi.set(__self__, "enable_automatic_upgrade", enable_automatic_upgrade)
if force_update_tag is not None:
pulumi.set(__self__, "force_update_tag", force_update_tag)
if instance_view is not None:
pulumi.set(__self__, "instance_view", instance_view)
if protected_settings is not None:
pulumi.set(__self__, "protected_settings", protected_settings)
if publisher is not None:
pulumi.set(__self__, "publisher", publisher)
if settings is not None:
pulumi.set(__self__, "settings", settings)
if type is not None:
pulumi.set(__self__, "type", type)
if type_handler_version is not None:
pulumi.set(__self__, "type_handler_version", type_handler_version)
if vm_extension_name is not None:
pulumi.set(__self__, "vm_extension_name", vm_extension_name)
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> pulumi.Input[str]:
"""
The instance ID of the virtual machine.
"""
return pulumi.get(self, "instance_id")
@instance_id.setter
def instance_id(self, value: pulumi.Input[str]):
pulumi.set(self, "instance_id", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="vmScaleSetName")
def vm_scale_set_name(self) -> pulumi.Input[str]:
"""
The name of the VM scale set.
"""
return pulumi.get(self, "vm_scale_set_name")
@vm_scale_set_name.setter
def vm_scale_set_name(self, value: pulumi.Input[str]):
pulumi.set(self, "vm_scale_set_name", value)
@property
@pulumi.getter(name="autoUpgradeMinorVersion")
def auto_upgrade_minor_version(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether the extension should use a newer minor version if one is available at deployment time. Once deployed, however, the extension will not upgrade minor versions unless redeployed, even with this property set to true.
"""
return pulumi.get(self, "auto_upgrade_minor_version")
@auto_upgrade_minor_version.setter
def auto_upgrade_minor_version(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "auto_upgrade_minor_version", value)
@property
@pulumi.getter(name="enableAutomaticUpgrade")
def enable_automatic_upgrade(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether the extension should be automatically upgraded by the platform if there is a newer version of the extension available.
"""
return pulumi.get(self, "enable_automatic_upgrade")
@enable_automatic_upgrade.setter
def enable_automatic_upgrade(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_automatic_upgrade", value)
@property
@pulumi.getter(name="forceUpdateTag")
def force_update_tag(self) -> Optional[pulumi.Input[str]]:
"""
How the extension handler should be forced to update even if the extension configuration has not changed.
"""
return pulumi.get(self, "force_update_tag")
@force_update_tag.setter
def force_update_tag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "force_update_tag", value)
@property
@pulumi.getter(name="instanceView")
def instance_view(self) -> Optional[pulumi.Input['VirtualMachineExtensionInstanceViewArgs']]:
"""
The virtual machine extension instance view.
"""
return pulumi.get(self, "instance_view")
@instance_view.setter
def instance_view(self, value: Optional[pulumi.Input['VirtualMachineExtensionInstanceViewArgs']]):
pulumi.set(self, "instance_view", value)
@property
@pulumi.getter(name="protectedSettings")
def protected_settings(self) -> Optional[Any]:
"""
The extension can contain either protectedSettings or protectedSettingsFromKeyVault or no protected settings at all.
"""
return pulumi.get(self, "protected_settings")
@protected_settings.setter
def protected_settings(self, value: Optional[Any]):
pulumi.set(self, "protected_settings", value)
@property
@pulumi.getter
def publisher(self) -> Optional[pulumi.Input[str]]:
"""
The name of the extension handler publisher.
"""
return pulumi.get(self, "publisher")
@publisher.setter
def publisher(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "publisher", value)
@property
@pulumi.getter
def settings(self) -> Optional[Any]:
"""
Json formatted public settings for the extension.
"""
return pulumi.get(self, "settings")
@settings.setter
def settings(self, value: Optional[Any]):
pulumi.set(self, "settings", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the type of the extension; an example is "CustomScriptExtension".
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="typeHandlerVersion")
def type_handler_version(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the version of the script handler.
"""
return pulumi.get(self, "type_handler_version")
@type_handler_version.setter
def type_handler_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type_handler_version", value)
@property
@pulumi.getter(name="vmExtensionName")
def vm_extension_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the virtual machine extension.
"""
return pulumi.get(self, "vm_extension_name")
@vm_extension_name.setter
def vm_extension_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vm_extension_name", value)
class VirtualMachineScaleSetVMExtension(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
auto_upgrade_minor_version: Optional[pulumi.Input[bool]] = None,
enable_automatic_upgrade: Optional[pulumi.Input[bool]] = None,
force_update_tag: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
instance_view: Optional[pulumi.Input[pulumi.InputType['VirtualMachineExtensionInstanceViewArgs']]] = None,
protected_settings: Optional[Any] = None,
publisher: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
settings: Optional[Any] = None,
type: Optional[pulumi.Input[str]] = None,
type_handler_version: Optional[pulumi.Input[str]] = None,
vm_extension_name: Optional[pulumi.Input[str]] = None,
vm_scale_set_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Describes a VMSS VM Extension.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] auto_upgrade_minor_version: Indicates whether the extension should use a newer minor version if one is available at deployment time. Once deployed, however, the extension will not upgrade minor versions unless redeployed, even with this property set to true.
:param pulumi.Input[bool] enable_automatic_upgrade: Indicates whether the extension should be automatically upgraded by the platform if there is a newer version of the extension available.
:param pulumi.Input[str] force_update_tag: How the extension handler should be forced to update even if the extension configuration has not changed.
:param pulumi.Input[str] instance_id: The instance ID of the virtual machine.
:param pulumi.Input[pulumi.InputType['VirtualMachineExtensionInstanceViewArgs']] instance_view: The virtual machine extension instance view.
:param Any protected_settings: The extension can contain either protectedSettings or protectedSettingsFromKeyVault or no protected settings at all.
:param pulumi.Input[str] publisher: The name of the extension handler publisher.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param Any settings: Json formatted public settings for the extension.
:param pulumi.Input[str] type: Specifies the type of the extension; an example is "CustomScriptExtension".
:param pulumi.Input[str] type_handler_version: Specifies the version of the script handler.
:param pulumi.Input[str] vm_extension_name: The name of the virtual machine extension.
:param pulumi.Input[str] vm_scale_set_name: The name of the VM scale set.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: VirtualMachineScaleSetVMExtensionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Describes a VMSS VM Extension.
:param str resource_name: The name of the resource.
:param VirtualMachineScaleSetVMExtensionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(VirtualMachineScaleSetVMExtensionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
auto_upgrade_minor_version: Optional[pulumi.Input[bool]] = None,
enable_automatic_upgrade: Optional[pulumi.Input[bool]] = None,
force_update_tag: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
instance_view: Optional[pulumi.Input[pulumi.InputType['VirtualMachineExtensionInstanceViewArgs']]] = None,
protected_settings: Optional[Any] = None,
publisher: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
settings: Optional[Any] = None,
type: Optional[pulumi.Input[str]] = None,
type_handler_version: Optional[pulumi.Input[str]] = None,
vm_extension_name: Optional[pulumi.Input[str]] = None,
vm_scale_set_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = VirtualMachineScaleSetVMExtensionArgs.__new__(VirtualMachineScaleSetVMExtensionArgs)
__props__.__dict__["auto_upgrade_minor_version"] = auto_upgrade_minor_version
__props__.__dict__["enable_automatic_upgrade"] = enable_automatic_upgrade
__props__.__dict__["force_update_tag"] = force_update_tag
if instance_id is None and not opts.urn:
raise TypeError("Missing required property 'instance_id'")
__props__.__dict__["instance_id"] = instance_id
__props__.__dict__["instance_view"] = instance_view
__props__.__dict__["protected_settings"] = protected_settings
__props__.__dict__["publisher"] = publisher
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["settings"] = settings
__props__.__dict__["type"] = type
__props__.__dict__["type_handler_version"] = type_handler_version
__props__.__dict__["vm_extension_name"] = vm_extension_name
if vm_scale_set_name is None and not opts.urn:
raise TypeError("Missing required property 'vm_scale_set_name'")
__props__.__dict__["vm_scale_set_name"] = vm_scale_set_name
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:compute/v20210301:VirtualMachineScaleSetVMExtension"), pulumi.Alias(type_="azure-native:compute:VirtualMachineScaleSetVMExtension"), pulumi.Alias(type_="azure-nextgen:compute:VirtualMachineScaleSetVMExtension"), pulumi.Alias(type_="azure-native:compute/v20190701:VirtualMachineScaleSetVMExtension"), pulumi.Alias(type_="azure-nextgen:compute/v20190701:VirtualMachineScaleSetVMExtension"), pulumi.Alias(type_="azure-native:compute/v20191201:VirtualMachineScaleSetVMExtension"), pulumi.Alias(type_="azure-nextgen:compute/v20191201:VirtualMachineScaleSetVMExtension"), pulumi.Alias(type_="azure-native:compute/v20200601:VirtualMachineScaleSetVMExtension"), pulumi.Alias(type_="azure-nextgen:compute/v20200601:VirtualMachineScaleSetVMExtension"), pulumi.Alias(type_="azure-native:compute/v20201201:VirtualMachineScaleSetVMExtension"), pulumi.Alias(type_="azure-nextgen:compute/v20201201:VirtualMachineScaleSetVMExtension"), pulumi.Alias(type_="azure-native:compute/v20210401:VirtualMachineScaleSetVMExtension"), pulumi.Alias(type_="azure-nextgen:compute/v20210401:VirtualMachineScaleSetVMExtension"), pulumi.Alias(type_="azure-native:compute/v20210701:VirtualMachineScaleSetVMExtension"), pulumi.Alias(type_="azure-nextgen:compute/v20210701:VirtualMachineScaleSetVMExtension")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(VirtualMachineScaleSetVMExtension, __self__).__init__(
'azure-native:compute/v20210301:VirtualMachineScaleSetVMExtension',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'VirtualMachineScaleSetVMExtension':
"""
Get an existing VirtualMachineScaleSetVMExtension resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = VirtualMachineScaleSetVMExtensionArgs.__new__(VirtualMachineScaleSetVMExtensionArgs)
__props__.__dict__["auto_upgrade_minor_version"] = None
__props__.__dict__["enable_automatic_upgrade"] = None
__props__.__dict__["force_update_tag"] = None
__props__.__dict__["instance_view"] = None
__props__.__dict__["name"] = None
__props__.__dict__["protected_settings"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["publisher"] = None
__props__.__dict__["settings"] = None
__props__.__dict__["type"] = None
__props__.__dict__["type_handler_version"] = None
return VirtualMachineScaleSetVMExtension(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="autoUpgradeMinorVersion")
def auto_upgrade_minor_version(self) -> pulumi.Output[Optional[bool]]:
"""
Indicates whether the extension should use a newer minor version if one is available at deployment time. Once deployed, however, the extension will not upgrade minor versions unless redeployed, even with this property set to true.
"""
return pulumi.get(self, "auto_upgrade_minor_version")
@property
@pulumi.getter(name="enableAutomaticUpgrade")
def enable_automatic_upgrade(self) -> pulumi.Output[Optional[bool]]:
"""
Indicates whether the extension should be automatically upgraded by the platform if there is a newer version of the extension available.
"""
return pulumi.get(self, "enable_automatic_upgrade")
@property
@pulumi.getter(name="forceUpdateTag")
def force_update_tag(self) -> pulumi.Output[Optional[str]]:
"""
How the extension handler should be forced to update even if the extension configuration has not changed.
"""
return pulumi.get(self, "force_update_tag")
@property
@pulumi.getter(name="instanceView")
def instance_view(self) -> pulumi.Output[Optional['outputs.VirtualMachineExtensionInstanceViewResponse']]:
"""
The virtual machine extension instance view.
"""
return pulumi.get(self, "instance_view")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the extension.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="protectedSettings")
def protected_settings(self) -> pulumi.Output[Optional[Any]]:
"""
The extension can contain either protectedSettings or protectedSettingsFromKeyVault or no protected settings at all.
"""
return pulumi.get(self, "protected_settings")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state, which only appears in the response.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def publisher(self) -> pulumi.Output[Optional[str]]:
"""
The name of the extension handler publisher.
"""
return pulumi.get(self, "publisher")
@property
@pulumi.getter
def settings(self) -> pulumi.Output[Optional[Any]]:
"""
Json formatted public settings for the extension.
"""
return pulumi.get(self, "settings")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="typeHandlerVersion")
def type_handler_version(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the version of the script handler.
"""
return pulumi.get(self, "type_handler_version")
| 50.551948 | 1,361 | 0.687091 |
79473cc112c8fd56d993b405b110361d17972fa7 | 1,888 | py | Python | auth-api/src/auth_api/services/documents.py | bsnopek-freshworks/sbc-auth | 871800922461239c7a09225a3d708c79173410f9 | [
"Apache-2.0"
] | null | null | null | auth-api/src/auth_api/services/documents.py | bsnopek-freshworks/sbc-auth | 871800922461239c7a09225a3d708c79173410f9 | [
"Apache-2.0"
] | null | null | null | auth-api/src/auth_api/services/documents.py | bsnopek-freshworks/sbc-auth | 871800922461239c7a09225a3d708c79173410f9 | [
"Apache-2.0"
] | null | null | null | # Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Service for managing the documents."""
from jinja2 import Environment, FileSystemLoader
from sbc_common_components.tracing.service_tracing import ServiceTracing # noqa: I001
from auth_api.models import Documents as DocumentsModel
from auth_api.schemas import DocumentSchema
from config import get_named_config
ENV = Environment(loader=FileSystemLoader('.'), autoescape=True)
CONFIG = get_named_config()
@ServiceTracing.trace(ServiceTracing.enable_tracing, ServiceTracing.should_be_tracing)
class Documents:
"""Manages the documents in DB.
This service manages retrieving the documents.
"""
def __init__(self, model):
"""Return an invitation service instance."""
self._model = model
@ServiceTracing.disable_tracing
def as_dict(self):
"""Return the User as a python dict.
None fields are not included in the dict.
"""
document_schema = DocumentSchema()
obj = document_schema.dump(self._model, many=False)
return obj
@classmethod
def fetch_latest_document(cls, document_type):
"""Get a membership type by the given code."""
doc = DocumentsModel.fetch_latest_document_by_type(file_type=document_type)
if doc:
return Documents(doc)
return None
| 33.714286 | 86 | 0.731992 |
79473d6de561bc2bce3c1571e5aba1a6487bafba | 5,111 | py | Python | tests/attacks/inference/attribute_inference/test_baseline.py | Somanji0520/adversarial-robustness-toolbox | e23a686aea861066e42c793a81ea2d6b1efea872 | [
"MIT"
] | null | null | null | tests/attacks/inference/attribute_inference/test_baseline.py | Somanji0520/adversarial-robustness-toolbox | e23a686aea861066e42c793a81ea2d6b1efea872 | [
"MIT"
] | null | null | null | tests/attacks/inference/attribute_inference/test_baseline.py | Somanji0520/adversarial-robustness-toolbox | e23a686aea861066e42c793a81ea2d6b1efea872 | [
"MIT"
] | 1 | 2022-03-03T14:54:45.000Z | 2022-03-03T14:54:45.000Z | # MIT License
#
# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2021
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import pytest
import numpy as np
from art.attacks.inference.attribute_inference.black_box import AttributeInferenceBlackBox
from art.attacks.inference.attribute_inference.baseline import AttributeInferenceBaseline
from tests.utils import ARTTestException
logger = logging.getLogger(__name__)
@pytest.mark.skip_framework("dl_frameworks")
def test_black_box_baseline(art_warning, decision_tree_estimator, get_iris_dataset):
try:
attack_feature = 2 # petal length
# need to transform attacked feature into categorical
def transform_feature(x):
x[x > 0.5] = 2.0
x[(x > 0.2) & (x <= 0.5)] = 1.0
x[x <= 0.2] = 0.0
values = [0.0, 1.0, 2.0]
(x_train_iris, y_train_iris), (x_test_iris, y_test_iris) = get_iris_dataset
# training data without attacked feature
x_train_for_attack = np.delete(x_train_iris, attack_feature, 1)
# only attacked feature
x_train_feature = x_train_iris[:, attack_feature].copy().reshape(-1, 1)
transform_feature(x_train_feature)
# training data with attacked feature (after transformation)
x_train = np.concatenate((x_train_for_attack[:, :attack_feature], x_train_feature), axis=1)
x_train = np.concatenate((x_train, x_train_for_attack[:, attack_feature:]), axis=1)
# test data without attacked feature
x_test_for_attack = np.delete(x_test_iris, attack_feature, 1)
# only attacked feature
x_test_feature = x_test_iris[:, attack_feature].copy().reshape(-1, 1)
transform_feature(x_test_feature)
classifier = decision_tree_estimator()
attack = AttributeInferenceBlackBox(classifier, attack_feature=attack_feature)
# get original model's predictions
x_train_predictions = np.array([np.argmax(arr) for arr in classifier.predict(x_train_iris)]).reshape(-1, 1)
x_test_predictions = np.array([np.argmax(arr) for arr in classifier.predict(x_test_iris)]).reshape(-1, 1)
# train attack model
attack.fit(x_train)
# infer attacked feature
# inferred_train
_ = attack.infer(x_train_for_attack, x_train_predictions, values=values)
inferred_test = attack.infer(x_test_for_attack, x_test_predictions, values=values)
# check accuracy
# train_acc = np.sum(inferred_train == x_train_feature.reshape(1, -1)) / len(inferred_train)
test_acc = np.sum(inferred_test == x_test_feature.reshape(1, -1)) / len(inferred_test)
baseline_attack = AttributeInferenceBaseline(attack_feature=attack_feature)
# train attack model
baseline_attack.fit(x_train)
# infer attacked feature
# baseline_inferred_train
_ = baseline_attack.infer(x_train_for_attack, values=values)
baseline_inferred_test = baseline_attack.infer(x_test_for_attack, values=values)
# check accuracy
# baseline_train_acc = np.sum(baseline_inferred_train == x_train_feature.reshape(1, -1)) / len(
# baseline_inferred_train
# )
baseline_test_acc = np.sum(baseline_inferred_test == x_test_feature.reshape(1, -1)) / len(
baseline_inferred_test
)
assert test_acc > baseline_test_acc
except ARTTestException as e:
art_warning(e)
def test_check_params(art_warning, get_iris_dataset):
try:
(x_train, y_train), (_, _) = get_iris_dataset
with pytest.raises(ValueError):
AttributeInferenceBaseline(attack_feature="a")
with pytest.raises(ValueError):
AttributeInferenceBaseline(attack_feature=-3)
attack = AttributeInferenceBaseline(attack_feature=8)
with pytest.raises(ValueError):
attack.fit(x_train)
_ = AttributeInferenceBaseline()
except ARTTestException as e:
art_warning(e)
| 43.313559 | 120 | 0.714537 |
79473de28bb296633be3b8479f7f82aefaa21c94 | 413 | py | Python | python/schuelerpraktika/sascha_mueller/Aufgaben/Aufgabe8.py | maximilianharr/code_snippets | 8b271e6fa9174e24200e88be59e417abd5f2f59a | [
"BSD-3-Clause"
] | null | null | null | python/schuelerpraktika/sascha_mueller/Aufgaben/Aufgabe8.py | maximilianharr/code_snippets | 8b271e6fa9174e24200e88be59e417abd5f2f59a | [
"BSD-3-Clause"
] | null | null | null | python/schuelerpraktika/sascha_mueller/Aufgaben/Aufgabe8.py | maximilianharr/code_snippets | 8b271e6fa9174e24200e88be59e417abd5f2f59a | [
"BSD-3-Clause"
] | null | null | null | #-*- coding: utf-8 -*-
# @todo Auch importieren aus anderen Datei klassen.py
import time
import Printme
Printme.printme ("Max muss 50 Euro an Mr.X überweisen, weil er gegen ihn im Wetten\nin einem Online spiel verloren hat."); time.sleep(0.5)
print
Printme.printme ("Nun will er von seinen 100€ 50€ überweisen und zeigt uns die Kontostände vor und\nnach der Überweisung."); time.sleep(0.5)
print
import Klassen
| 34.416667 | 140 | 0.755448 |
79473e575228b13faaa72bc2b7af372503004e18 | 1,086 | py | Python | SUAVE/SUAVE-2.5.0/trunk/SUAVE/Methods/Flight_Dynamics/Dynamic_Stability/Full_Linearized_Equations/Supporting_Functions/cx_alpha.py | Vinicius-Tanigawa/Undergraduate-Research-Project | e92372f07882484b127d7affe305eeec2238b8a9 | [
"MIT"
] | null | null | null | SUAVE/SUAVE-2.5.0/trunk/SUAVE/Methods/Flight_Dynamics/Dynamic_Stability/Full_Linearized_Equations/Supporting_Functions/cx_alpha.py | Vinicius-Tanigawa/Undergraduate-Research-Project | e92372f07882484b127d7affe305eeec2238b8a9 | [
"MIT"
] | null | null | null | SUAVE/SUAVE-2.5.0/trunk/SUAVE/Methods/Flight_Dynamics/Dynamic_Stability/Full_Linearized_Equations/Supporting_Functions/cx_alpha.py | Vinicius-Tanigawa/Undergraduate-Research-Project | e92372f07882484b127d7affe305eeec2238b8a9 | [
"MIT"
] | null | null | null | ## @ingroup Methods-Flight_Dynamics-Dynamic_Stability-Full_Linearized_Equations-Supporting_Functions
# cx_alpha.py
#
# Created: Jun 2014, A. Wendorff
# Modified: Jan 2016, E. Botero
# ----------------------------------------------------------------------
# Method
# ----------------------------------------------------------------------
## @ingroup Methods-Flight_Dynamics-Dynamic_Stability-Full_Linearized_Equations-Supporting_Functions
def cx_alpha(cL, cL_alpha):
""" This calculates the coefficient of force in the x direction
with respect to the change in angle of attack of the aircraft
Assumptions:
None
Source:
J.H. Blakelock, "Automatic Control of Aircraft and Missiles"
Wiley & Sons, Inc. New York, 1991, (pg 23)
Inputs:
cL [dimensionless]
cL_alpha [dimensionless]
Outputs:
cx_alpha [dimensionless]
Properties Used:
N/A
"""
# Generating Stability derivative
cx_alpha = cL - cL_alpha
return cx_alpha | 29.351351 | 100 | 0.558932 |
79473eb7dd90bc16799c8050dcb918c7e97779dc | 3,550 | py | Python | test/functional/p2p_feefilter.py | knotcoin/knotcoin | 3f4ade4e2cabf94acd80bc043deec3d9a4209938 | [
"MIT"
] | null | null | null | test/functional/p2p_feefilter.py | knotcoin/knotcoin | 3f4ade4e2cabf94acd80bc043deec3d9a4209938 | [
"MIT"
] | null | null | null | test/functional/p2p_feefilter.py | knotcoin/knotcoin | 3f4ade4e2cabf94acd80bc043deec3d9a4209938 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2016-2017 The Knotcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test processing of feefilter messages."""
from test_framework.mininode import *
from test_framework.test_framework import KnotcoinTestFramework
from test_framework.util import *
import time
def hashToHex(hash):
return format(hash, '064x')
# Wait up to 60 secs to see if the testnode has received all the expected invs
def allInvsMatch(invsExpected, testnode):
for x in range(60):
with mininode_lock:
if (sorted(invsExpected) == sorted(testnode.txinvs)):
return True
time.sleep(1)
return False
class TestNode(P2PInterface):
def __init__(self):
super().__init__()
self.txinvs = []
def on_inv(self, message):
for i in message.inv:
if (i.type == 1):
self.txinvs.append(hashToHex(i.hash))
def clear_invs(self):
with mininode_lock:
self.txinvs = []
class FeeFilterTest(KnotcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
def run_test(self):
node1 = self.nodes[1]
node0 = self.nodes[0]
# Get out of IBD
node1.generate(1)
sync_blocks(self.nodes)
# Setup the p2p connections and start up the network thread.
self.nodes[0].add_p2p_connection(TestNode())
network_thread_start()
self.nodes[0].p2p.wait_for_verack()
# Test that invs are received for all txs at feerate of 20 sat/byte
node1.settxfee(Decimal("0.00020000"))
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert(allInvsMatch(txids, self.nodes[0].p2p))
self.nodes[0].p2p.clear_invs()
# Set a filter of 15 sat/byte
self.nodes[0].p2p.send_and_ping(msg_feefilter(15000))
# Test that txs are still being received (paying 20 sat/byte)
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert(allInvsMatch(txids, self.nodes[0].p2p))
self.nodes[0].p2p.clear_invs()
# Change tx fee rate to 10 sat/byte and test they are no longer received
node1.settxfee(Decimal("0.00010000"))
[node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
sync_mempools(self.nodes) # must be sure node 0 has received all txs
# Send one transaction from node0 that should be received, so that we
# we can sync the test on receipt (if node1's txs were relayed, they'd
# be received by the time this node0 tx is received). This is
# unfortunately reliant on the current relay behavior where we batch up
# to 35 entries in an inv, which means that when this next transaction
# is eligible for relay, the prior transactions from node1 are eligible
# as well.
node0.settxfee(Decimal("0.00020000"))
txids = [node0.sendtoaddress(node0.getnewaddress(), 1)]
assert(allInvsMatch(txids, self.nodes[0].p2p))
self.nodes[0].p2p.clear_invs()
# Remove fee filter and check that txs are received again
self.nodes[0].p2p.send_and_ping(msg_feefilter(0))
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert(allInvsMatch(txids, self.nodes[0].p2p))
self.nodes[0].p2p.clear_invs()
if __name__ == '__main__':
FeeFilterTest().main()
| 37.765957 | 81 | 0.66 |
79473f330c3770b4db133819b72165ec99f52f50 | 663 | py | Python | env/Lib/site-packages/tests/test_ImageSequenceClip.py | L0dz/auto-post | 91563235a74336e4326a13b142c05c6755e0cf47 | [
"Apache-2.0"
] | null | null | null | env/Lib/site-packages/tests/test_ImageSequenceClip.py | L0dz/auto-post | 91563235a74336e4326a13b142c05c6755e0cf47 | [
"Apache-2.0"
] | 6 | 2019-12-17T13:32:08.000Z | 2021-06-02T00:49:29.000Z | cineBot/lib/python3.6/site-packages/tests/test_ImageSequenceClip.py | furkanalpereny/cineBot | cb93b6fc6ab25ba0601067f54b6824a8462f470d | [
"MIT"
] | null | null | null | import pytest
from moviepy.editor import *
# must have to work on travis-ci
import sys
sys.path.append("tests")
import download_media
def test_download_media(capsys):
with capsys.disabled():
download_media.download()
def test_1():
images=[]
durations=[]
for i in range(5):
durations.append(i)
images.append("media/python_logo.png")
durations.append(i)
images.append("media/matplotlib_demo1.png")
clip=ImageSequenceClip(images, durations=durations)
assert clip.duration == sum(durations)
clip.write_videofile("/tmp/ImageSequenceClip1.mp4", fps=30)
if __name__ == '__main__':
pytest.main()
| 23.678571 | 63 | 0.693816 |
7947401ac29b70588330be8307246d1b8eaee7b9 | 9,096 | py | Python | fbchat/_threads/_group.py | tulir/fbchat | 476b7b0f1c9dc26ab2e515887febf6afcc6b4e91 | [
"BSD-3-Clause"
] | 42 | 2019-04-29T21:02:49.000Z | 2022-03-24T11:49:33.000Z | fbchat/_threads/_group.py | tulir/fbchat | 476b7b0f1c9dc26ab2e515887febf6afcc6b4e91 | [
"BSD-3-Clause"
] | 27 | 2019-11-21T08:50:05.000Z | 2021-01-07T09:39:38.000Z | fbchat/_threads/_group.py | tulir/fbchat | 476b7b0f1c9dc26ab2e515887febf6afcc6b4e91 | [
"BSD-3-Clause"
] | 13 | 2019-06-26T12:32:02.000Z | 2021-02-03T22:37:19.000Z | import attr
import datetime
from ._abc import ThreadABC
from . import _user
from .._common import attrs_default
from .. import _util, _session, _graphql, _models
from typing import Sequence, Iterable, Set, Mapping, Optional
@attrs_default
class Group(ThreadABC):
"""Represents a Facebook group. Implements `ThreadABC`.
Example:
>>> group = fbchat.Group(session=session, id="1234")
"""
#: The session to use when making requests.
session: _session.Session
#: The group's unique identifier.
id: str = attr.ib(converter=str)
def _to_send_data(self):
return {"thread_fbid": self.id}
def _copy(self) -> "Group":
return Group(session=self.session, id=self.id)
async def add_participants(self, user_ids: Iterable[str]):
"""Add users to the group.
Args:
user_ids: One or more user IDs to add
Example:
>>> group.add_participants(["1234", "2345"])
"""
data = self._to_send_data()
data["action_type"] = "ma-type:log-message"
data["log_message_type"] = "log:subscribe"
for i, user_id in enumerate(user_ids):
if user_id == self.session.user.id:
raise ValueError(
"Error when adding users: Cannot add self to group thread"
)
else:
data[
"log_message_data[added_participants][{}]".format(i)
] = "fbid:{}".format(user_id)
return await self.session._do_send_request(data)
async def remove_participant(self, user_id: str):
"""Remove user from the group.
Args:
user_id: User ID to remove
Example:
>>> group.remove_participant("1234")
"""
data = {"uid": user_id, "tid": self.id}
j = await self.session._payload_post("/chat/remove_participants/", data)
async def _admin_status(self, user_ids: Iterable[str], status: bool):
data = {"add": status, "thread_fbid": self.id}
for i, user_id in enumerate(user_ids):
data["admin_ids[{}]".format(i)] = str(user_id)
j = await self.session._payload_post("/messaging/save_admins/?dpr=1", data)
async def add_admins(self, user_ids: Iterable[str]):
"""Set specified users as group admins.
Args:
user_ids: One or more user IDs to set admin
Example:
>>> group.add_admins(["1234", "2345"])
"""
await self._admin_status(user_ids, True)
async def remove_admins(self, user_ids: Iterable[str]):
"""Remove admin status from specified users.
Args:
user_ids: One or more user IDs to remove admin
Example:
>>> group.remove_admins(["1234", "2345"])
"""
await self._admin_status(user_ids, False)
async def set_title(self, title: str):
"""Change title of the group.
Args:
title: New title
Example:
>>> group.set_title("Abc")
"""
data = {"thread_name": title, "thread_id": self.id}
j = await self.session._payload_post("/messaging/set_thread_name/?dpr=1", data)
async def set_image(self, image_id: str):
"""Change the group image from an image id.
Args:
image_id: ID of uploaded image
Example:
Upload an image, and use it as the group image.
>>> with open("image.png", "rb") as f:
... (file,) = client.upload([("image.png", f, "image/png")])
...
>>> group.set_image(file[0])
"""
data = {"thread_image_id": image_id, "thread_id": self.id}
j = await self.session._payload_post("/messaging/set_thread_image/?dpr=1", data)
async def set_approval_mode(self, require_admin_approval: bool):
"""Change the group's approval mode.
Args:
require_admin_approval: True or False
Example:
>>> group.set_approval_mode(False)
"""
data = {"set_mode": int(require_admin_approval), "thread_fbid": self.id}
j = await self.session._payload_post("/messaging/set_approval_mode/?dpr=1", data)
async def _users_approval(self, user_ids: Iterable[str], approve: bool):
data = {
"client_mutation_id": "0",
"actor_id": self.session.user.id,
"thread_fbid": self.id,
"user_ids": list(user_ids),
"response": "ACCEPT" if approve else "DENY",
"surface": "ADMIN_MODEL_APPROVAL_CENTER",
}
(j,) = await self.session._graphql_requests(
_graphql.from_doc_id("1574519202665847", {"data": data})
)
async def accept_users(self, user_ids: Iterable[str]):
"""Accept users to the group from the group's approval.
Args:
user_ids: One or more user IDs to accept
Example:
>>> group.accept_users(["1234", "2345"])
"""
await self._users_approval(user_ids, True)
async def deny_users(self, user_ids: Iterable[str]):
"""Deny users from joining the group.
Args:
user_ids: One or more user IDs to deny
Example:
>>> group.deny_users(["1234", "2345"])
"""
await self._users_approval(user_ids, False)
@attrs_default
class GroupData(Group):
"""Represents data about a Facebook group.
Inherits `Group`, and implements `ThreadABC`.
"""
#: The group's picture
photo: Optional[_models.Image] = None
#: The name of the group
name: Optional[str] = None
#: When the group was last active / when the last message was sent
last_active: Optional[datetime.datetime] = None
#: Number of messages in the group
message_count: Optional[int] = None
#: Set `Plan`
plan: Optional[_models.PlanData] = None
#: The group thread's participant user ids
participants: Set[ThreadABC] = attr.ib(factory=set)
#: A dictionary, containing user nicknames mapped to their IDs
nicknames: Mapping[str, str] = attr.ib(factory=dict)
#: The groups's message color
color: Optional[str] = None
#: The groups's default emoji
emoji: Optional[str] = None
# User ids of thread admins
admins: Set[str] = attr.ib(factory=set)
# True if users need approval to join
approval_mode: Optional[bool] = None
# Set containing user IDs requesting to join
approval_requests: Set[str] = attr.ib(factory=set)
# Link for joining group
join_link: Optional[str] = None
@classmethod
def _from_graphql(cls, session, data):
if data.get("image") is None:
data["image"] = {}
c_info = cls._parse_customization_info(data)
last_active = None
if "last_message" in data:
last_active = _util.millis_to_datetime(
int(data["last_message"]["nodes"][0]["timestamp_precise"])
)
plan = None
if data.get("event_reminders") and data["event_reminders"].get("nodes"):
plan = _models.PlanData._from_graphql(
session, data["event_reminders"]["nodes"][0]
)
return cls(
session=session,
id=data["thread_key"]["thread_fbid"],
participants=list(
cls._parse_participants(session, data["all_participants"])
),
nicknames=c_info.get("nicknames"),
color=c_info["color"],
emoji=c_info["emoji"],
admins=set([node.get("id") for node in data.get("thread_admins")]),
approval_mode=bool(data.get("approval_mode"))
if data.get("approval_mode") is not None
else None,
approval_requests=set(
node["requester"]["id"]
for node in data["group_approval_queue"]["nodes"]
)
if data.get("group_approval_queue")
else None,
join_link=data["joinable_mode"].get("link"),
photo=_models.Image._from_uri_or_none(data["image"]),
name=data.get("name"),
message_count=data.get("messages_count"),
last_active=last_active,
plan=plan,
)
@attrs_default
class NewGroup(ThreadABC):
"""Helper class to create new groups.
TODO: Complete this!
Construct this class with the desired users, and call a method like `wave`, to...
"""
#: The session to use when making requests.
session: _session.Session
#: The users that should be added to the group.
_users: Sequence["_user.User"]
@property
def id(self):
raise NotImplementedError(
"The method you called is not supported on NewGroup objects."
" Please use the supported methods to create the group, before attempting"
" to call the method."
)
def _to_send_data(self) -> dict:
return {
"specific_to_list[{}]".format(i): "fbid:{}".format(user.id)
for i, user in enumerate(self._users)
}
| 32.485714 | 89 | 0.591139 |
7947425fe2029061b94065f329b2e4e9aed0620a | 108,368 | py | Python | parser/fase2/team01/Grupo1/Instrucciones/DML/select.py | Josue-Zea/tytus | f9e4be9a8c03eb698fade7a748972e4f52d46685 | [
"MIT"
] | 35 | 2020-12-07T03:11:43.000Z | 2021-04-15T17:38:16.000Z | parser/fase2/team01/Grupo1/Instrucciones/DML/select.py | Josue-Zea/tytus | f9e4be9a8c03eb698fade7a748972e4f52d46685 | [
"MIT"
] | 47 | 2020-12-09T01:29:09.000Z | 2021-01-13T05:37:50.000Z | parser/fase2/team01/Grupo1/Instrucciones/DML/select.py | Josue-Zea/tytus | f9e4be9a8c03eb698fade7a748972e4f52d46685 | [
"MIT"
] | 556 | 2020-12-07T03:13:31.000Z | 2021-06-17T17:41:10.000Z | from Expresiones.Primitivo import Primitive
import sys
sys.path.append('../Grupo1/Instrucciones')
sys.path.append('../Grupo1/Utils')
sys.path.append('../Grupo1/Expresiones')
sys.path.append('../Grupo1/Librerias/storageManager')
sys.path.append('../Grupo1/Librerias/prettytable')
from jsonMode import *
from instruccion import *
from c3dGen import *
from Error import *
from Primitivo import *
from datetime import *
from TablaSimbolos import *
from prettytable import *
from operator import itemgetter
import math
import random
import hashlib
class Select(Instruccion):
global columnasAceptadas
global esCount
def __init__(self,arg0,arg1, parametros, fromopcional):
self.arg0 = arg0
self.arg1 = arg1
self.parametros = parametros
self.fromopcional = fromopcional
def execute(self, data):
fromData = self.fromopcional
cadenaE = self.arg1.upper()
#"select idproducto,producto,estado from tbProducto where estado=1;"
#cadenaE = "select count(*) from tbProducto;"
valRetSel = selectC3D(data.databaseSeleccionada, fromData.parametros[0].parametros.operador.upper(), cadenaE.upper(), ' ')
if fromData == None:
diccionarioColumnasAceptadas = {}
nuevaColumna = []
i = 0
contadorNombre = 0
nombreTabla = ''
select = self.parametros
columnasImprimir = select.listadeseleccion
for columnasSeleccionadas in columnasImprimir:
nombreColumna = columnasSeleccionadas.listaseleccionados
try:
if contadorNombre == 0: nombreTabla = nombreColumna.tipofuncionmatematica
else: nombreTabla = nombreColumna.tipofuncionmatematica + str(contadorNombre)
except:
try:
if nombreColumna.operador == 'md5':
return Error('Sintactico', 'El md5 solamente puede venir en el insert y update', 0, 0)
if contadorNombre == 0: nombreTabla = nombreColumna.operador
else: nombreTabla = nombreColumna.operador + str(contadorNombre)
except:
try:
if contadorNombre == 0: nombreTabla = nombreColumna.tipofuncionTrigonometrica
else: nombreTabla = nombreColumna.tipofuncionTrigonometrica + str(contadorNombre)
except:
if contadorNombre == 0: nombreTabla = nombreColumna.tipofuncionfehca
else: nombreTabla = nombreColumna.tipofuncionfehca + str(contadorNombre)
comprobar = nombreColumna.execute(data, None)
if isinstance(comprobar, Error):
return comprobar
diccionarioColumnasAceptadas[nombreTabla] = {'columnas': [], 'tipo': ''}
diccionarioColumnasAceptadas[nombreTabla]['columnas'].append([comprobar.val])
diccionarioColumnasAceptadas[nombreTabla]['tipo'] = comprobar.type
return diccionarioColumnasAceptadas
tablas = fromData.execute(data).execute(data)
where = tablas.whereopcional
directorioTablas = {}
tablasFromTemporales = []
columnasFromTemporales = {}
for tablasSeleccionadas in tablas.parametros:
if isinstance(tablasSeleccionadas.parametros.operador, Select):
tablas = tablasSeleccionadas.parametros.operador.execute(data)
if isinstance(tablas, Error):
return tablas
if tablasSeleccionadas.asop == None:
return Error('Sintactico', 'Se esperaba As o un Alias.', 0, 0)
else:
tablasFromTemporales.append(tablas)
contador = 0
nombre = ''
while True:
try:
if contador == 0:
nombre = tablasSeleccionadas.asop.upper()
else:
nombre = tablasSeleccionadas.asop.upper() + str(contador)
prueba = data.tablaSimbolos[data.databaseSeleccionada]['tablas'][nombre]
contador = contador + 1
except:
data.tablaSimbolos[data.databaseSeleccionada]['tablas'][nombre] = {'columns': []}
break
directorioNombres = []
for keysTemporales in tablas.keys():
eliminarPunto = False
nombreNuevo = ''
for letras in keysTemporales:
if eliminarPunto:
nombreNuevo = nombreNuevo + letras
if letras == '.':
eliminarPunto = True
directorioNombres.append({'viejo': keysTemporales, 'nuevo': nombreNuevo})
data.tablaSimbolos[data.databaseSeleccionada]['tablas'][nombre]['columns'].append(TableData(nombreNuevo, tablas[keysTemporales]['tipo'], None, None, None, None, None, None, None))
for nombres in directorioNombres:
tablas[nombres['nuevo']] = tablas.pop(nombres['viejo'])
juntarValores = []
inicio = 0
for keys in tablas.keys():
contador = 0
for val in tablas[keys]['columnas']:
if inicio == 0:
juntarValores.append(val)
else:
juntarValores[contador].append(val[0])
contador = contador + 1
inicio = inicio + 1
columnasFromTemporales[nombre] = juntarValores
directorioTablas[nombre] = {'fila' : None, 'alias': tablasSeleccionadas.asop.upper(), 'temporal': True}
elif tablasSeleccionadas.asop == None:
directorioTablas[tablasSeleccionadas.parametros.operador.upper()] = {'fila' : None, 'alias': '', 'temporal': False}
else:
directorioTablas[tablasSeleccionadas.parametros.operador.upper()] = {'fila' : None, 'alias': tablasSeleccionadas.asop.upper(), 'temporal': False}
try:
for keys in directorioTablas.keys():
data.tablaSimbolos[data.databaseSeleccionada]['tablas'][keys]
except:
for borrarTemporales in columnasFromTemporales.keys():
del(data.tablaSimbolos[data.databaseSeleccionada]['tablas'][borrarTemporales])
return Error('Semántico', 'Error(42P01): undefined_table.', 0, 0)
valores = []
temporales = []
columnasAceptadas = {}
for keys in directorioTablas.keys():
valores.append(keys)
columnasAceptadas[keys] = []
temporales.append(directorioTablas[keys]['temporal'])
if where == None:
val = self.funcionPosibilidades(data, valores, [], [], directorioTablas, True, columnasAceptadas, temporales, columnasFromTemporales)
else:
val = self.funcionPosibilidades(data, valores, [], [], directorioTablas, False, columnasAceptadas, temporales, columnasFromTemporales)
if isinstance(val, Error):
for borrarTemporales in columnasFromTemporales.keys():
del(data.tablaSimbolos[data.databaseSeleccionada]['tablas'][borrarTemporales])
return val
select = self.parametros
columnasImprimir = select.listadeseleccion
diccionarioColumnasAceptadas = {}
columnasAgregacion = []
for columnasSeleccionadas in columnasImprimir:
nombreColumna = columnasSeleccionadas.listaseleccionados
if isinstance(nombreColumna, FuncionMatematicaSimple):
columnasAgregacion.append(nombreColumna)
esCount=0
if nombreColumna.operador=='count':
esCount = 1
continue
try:
esCount=0
retorno = nombreColumna.obtenerSeleccionado(data, directorioTablas, columnasAceptadas, diccionarioColumnasAceptadas)
if isinstance(retorno, Error):
for borrarTemporales in columnasFromTemporales.keys():
del(data.tablaSimbolos[data.databaseSeleccionada]['tablas'][borrarTemporales])
return retorno
except:
cant = 0
for keys in columnasAceptadas:
cant = len(columnasAceptadas[keys])
break
nuevaColumna = []
i = 0
contadorNombre = 0
nombreTabla = ''
while True:
try:
if contadorNombre == 0: nombreTabla = nombreColumna.tipofuncionmatematica
else: nombreTabla = nombreColumna.tipofuncionmatematica + str(contadorNombre)
except:
try:
if nombreColumna.operador == 'md5':
return Error('Sintactico', 'El md5 solamente puede venir en el insert y update', 0, 0)
if contadorNombre == 0: nombreTabla = nombreColumna.operador
else: nombreTabla = nombreColumna.operador + str(contadorNombre)
except:
try:
if contadorNombre == 0: nombreTabla = nombreColumna.tipofuncionTrigonometrica
else: nombreTabla = nombreColumna.tipofuncionTrigonometrica + str(contadorNombre)
except :
if contadorNombre == 0: nombreTabla = nombreColumna.tipofuncionfehca
else: nombreTabla = nombreColumna.tipofuncionfehca + str(contadorNombre)
try:
a = diccionarioColumnasAceptadas[nombreTabla]
contadorNombre = contadorNombre + 1
except:
diccionarioColumnasAceptadas[nombreTabla] = {'columnas': [], 'tipo': ''}
break;
while True:
if i == cant:
break;
for keys in columnasAceptadas:
directorioTablas[keys]['fila'] = columnasAceptadas[keys][i]
comprobar = nombreColumna.execute(data, directorioTablas)
if isinstance(comprobar, Error):
return comprobar
diccionarioColumnasAceptadas[nombreTabla]['columnas'].append([comprobar.val])
diccionarioColumnasAceptadas[nombreTabla]['tipo'] = comprobar.type
i = i + 1
if select.distinct:
juntarValores = []
inicio = 0
for keys in diccionarioColumnasAceptadas.keys():
contador = 0
for val in diccionarioColumnasAceptadas[keys]['columnas']:
if inicio == 0:
juntarValores.append(val)
else:
juntarValores[contador].append(val[0])
contador = contador + 1
inicio = inicio + 1
contador = 0
nuevoArregloDistinct = []
routes = juntarValores
dups = set()
duplicadas = 0
for route in routes:
if tuple(route) in dups:
duplicadas = duplicadas + 1
else:
nuevoArregloDistinct.append(route)
dups.add(tuple(route))
contador = contador + 1
if duplicadas == 0:
nuevoArregloDistinct = juntarValores
contador = 0
for tablas in diccionarioColumnasAceptadas.keys():
datosTablas = diccionarioColumnasAceptadas[tablas]
columnaSelect = []
for filaActual in nuevoArregloDistinct:
columnaSelect.append([filaActual[contador]])
diccionarioColumnasAceptadas[tablas]['columnas'] = columnaSelect
contador = contador + 1
whereOpcional = True
groupByOpcional = False
groupByData = None
if self.fromopcional.whereopcional == None and self.fromopcional.groupbyopcional == None:
''
else:
if self.fromopcional.groupbyopcional == None:
groupByData = self.fromopcional.whereopcional.groupbyopcional
groupByOpcional = True
else:
groupByData = self.fromopcional.groupbyopcional
whereOpcional = False
groupByOpcional = True
if groupByData == None:
''
else:
if len(diccionarioColumnasAceptadas.keys()) == len(groupByData.lista):
for keys in groupByData.lista:
if keys.column.upper() in diccionarioColumnasAceptadas:
''
else:
return Error('Semantico', 'No se reconoce la columna ' + keys.column + '.', 0, 0)
else:
return Error('Semantico', 'Faltan columnas para agrupar en el group by.', 0, 0)
columnasMostrar = diccionarioColumnasAceptadas
juntarValoresN = []
inicio = 0
for keys in columnasMostrar.keys():
contador = 0
for val in columnasMostrar[keys]['columnas']:
if inicio == 0:
s = val.copy()
juntarValoresN.append(s)
else:
juntarValoresN[contador].append(val[0])
contador = contador + 1
inicio = inicio + 1
diccionarioAgrupacion = {}
pos = 0
for fila in juntarValoresN:
nombre = ''
for valorIndividual in fila:
nombre = nombre + str(valorIndividual)
if nombre in diccionarioAgrupacion:
diccionarioAgrupacion[nombre].append(pos)
else:
diccionarioAgrupacion[nombre] = []
diccionarioAgrupacion[nombre].append(pos)
pos = pos + 1
cambiarValores = False
for keys in diccionarioColumnasAceptadas.keys():
if len(diccionarioAgrupacion.keys()) < len(diccionarioColumnasAceptadas[keys]['columnas']):
cambiarValores = True
break
agregarColumnas = False
columnasGNuevas = []
for agregacion in columnasAgregacion:
val = agregacion.execute(data, diccionarioAgrupacion, diccionarioColumnasAceptadas, columnasAceptadas)
if isinstance(val, Error):
for borrarTemporales in columnasFromTemporales.keys():
del(data.tablaSimbolos[data.databaseSeleccionada]['tablas'][borrarTemporales])
return val
columnasGNuevas.append(val)
agregarColumnas = True
if agregarColumnas or cambiarValores:
juntarValores = []
inicio = 0
for keys in diccionarioColumnasAceptadas.keys():
contador = 0
for val in diccionarioColumnasAceptadas[keys]['columnas']:
if inicio == 0:
s = val.copy()
juntarValores.append(s)
else:
juntarValores[contador].append(val[0])
contador = contador + 1
inicio = inicio + 1
contador = 0
nuevoArregloDistinct = []
routes = juntarValores
dups = set()
duplicadas = 0
for route in routes:
if tuple(route) in dups:
duplicadas = duplicadas + 1
else:
nuevoArregloDistinct.append(route)
dups.add(tuple(route))
contador = contador + 1
if duplicadas == 0:
nuevoArregloDistinct = juntarValores
contador = 0
for tablas in diccionarioColumnasAceptadas.keys():
datosTablas = diccionarioColumnasAceptadas[tablas]
columnaSelect = []
for filaActual in nuevoArregloDistinct:
columnaSelect.append([filaActual[contador]])
diccionarioColumnasAceptadas[tablas]['columnas'] = columnaSelect
contador = contador + 1
for nuevas in columnasGNuevas:
cont = 0
for col in nuevas['val'].keys():
if cont == 0:
diccionarioColumnasAceptadas[nuevas['name']] = {'columnas': [], 'tipo': nuevas['type']}
diccionarioColumnasAceptadas[nuevas['name']]['columnas'].append(nuevas['val'][col])
cont = cont + 1
filascount = extractTable(data.databaseSeleccionada, fromData.parametros[0].parametros.operador.upper()) #extract
for borrarTemporales in columnasFromTemporales.keys():
del(data.tablaSimbolos[data.databaseSeleccionada]['tablas'][borrarTemporales])
if esCount==1:
returnCount = {}
returnCol = {}
returnCol2 = []
returnCol2.append(str(len(filascount)))
returnCol2.append('_')
returnCol['columnas']= returnCol2
returnCount['count'] = returnCol
return returnCount
else:
return diccionarioColumnasAceptadas
def executec3d(self, data):
fromData = self.fromopcional
#cadenaE = "select idproducto,producto,estado from tbProducto where estado=1;"
#valRetSel = selectC3D(data.databaseSeleccionada, fromData.parametros[0].parametros.operador.upper(), cadenaE.upper())
if fromData == None:
diccionarioColumnasAceptadas = {}
nuevaColumna = []
i = 0
contadorNombre = 0
nombreTabla = ''
select = self.parametros
columnasImprimir = select.listadeseleccion
for columnasSeleccionadas in columnasImprimir:
nombreColumna = columnasSeleccionadas.listaseleccionados
try:
if contadorNombre == 0: nombreTabla = nombreColumna.tipofuncionmatematica
else: nombreTabla = nombreColumna.tipofuncionmatematica + str(contadorNombre)
except:
try:
if nombreColumna.operador == 'md5':
return Error('Sintactico', 'El md5 solamente puede venir en el insert y update', 0, 0)
if contadorNombre == 0: nombreTabla = nombreColumna.operador
else: nombreTabla = nombreColumna.operador + str(contadorNombre)
except:
try:
if contadorNombre == 0: nombreTabla = nombreColumna.tipofuncionTrigonometrica
else: nombreTabla = nombreColumna.tipofuncionTrigonometrica + str(contadorNombre)
except:
if contadorNombre == 0: nombreTabla = nombreColumna.tipofuncionfehca
else: nombreTabla = nombreColumna.tipofuncionfehca + str(contadorNombre)
comprobar = nombreColumna.execute(data, None)
if isinstance(comprobar, Error):
return comprobar
diccionarioColumnasAceptadas[nombreTabla] = {'columnas': [], 'tipo': ''}
diccionarioColumnasAceptadas[nombreTabla]['columnas'].append([comprobar.val])
diccionarioColumnasAceptadas[nombreTabla]['tipo'] = comprobar.type
return diccionarioColumnasAceptadas
tablas = fromData.execute(data).execute(data)
where = tablas.whereopcional
directorioTablas = {}
tablasFromTemporales = []
columnasFromTemporales = {}
for tablasSeleccionadas in tablas.parametros:
if isinstance(tablasSeleccionadas.parametros.operador, Select):
tablas = tablasSeleccionadas.parametros.operador.execute(data)
if isinstance(tablas, Error):
return tablas
if tablasSeleccionadas.asop == None:
return Error('Sintactico', 'Se esperaba As o un Alias.', 0, 0)
else:
tablasFromTemporales.append(tablas)
contador = 0
nombre = ''
while True:
try:
if contador == 0:
nombre = tablasSeleccionadas.asop.upper()
else:
nombre = tablasSeleccionadas.asop.upper() + str(contador)
prueba = data.tablaSimbolos[data.databaseSeleccionada]['tablas'][nombre]
contador = contador + 1
except:
data.tablaSimbolos[data.databaseSeleccionada]['tablas'][nombre] = {'columns': []}
break
directorioNombres = []
for keysTemporales in tablas.keys():
eliminarPunto = False
nombreNuevo = ''
for letras in keysTemporales:
if eliminarPunto:
nombreNuevo = nombreNuevo + letras
if letras == '.':
eliminarPunto = True
directorioNombres.append({'viejo': keysTemporales, 'nuevo': nombreNuevo})
data.tablaSimbolos[data.databaseSeleccionada]['tablas'][nombre]['columns'].append(TableData(nombreNuevo, tablas[keysTemporales]['tipo'], None, None, None, None, None, None, None))
for nombres in directorioNombres:
tablas[nombres['nuevo']] = tablas.pop(nombres['viejo'])
juntarValores = []
inicio = 0
for keys in tablas.keys():
contador = 0
for val in tablas[keys]['columnas']:
if inicio == 0:
juntarValores.append(val)
else:
juntarValores[contador].append(val[0])
contador = contador + 1
inicio = inicio + 1
columnasFromTemporales[nombre] = juntarValores
directorioTablas[nombre] = {'fila' : None, 'alias': tablasSeleccionadas.asop.upper(), 'temporal': True}
elif tablasSeleccionadas.asop == None:
directorioTablas[tablasSeleccionadas.parametros.operador.upper()] = {'fila' : None, 'alias': '', 'temporal': False}
else:
directorioTablas[tablasSeleccionadas.parametros.operador.upper()] = {'fila' : None, 'alias': tablasSeleccionadas.asop.upper(), 'temporal': False}
try:
for keys in directorioTablas.keys():
data.tablaSimbolos[data.databaseSeleccionada]['tablas'][keys]
except:
for borrarTemporales in columnasFromTemporales.keys():
del(data.tablaSimbolos[data.databaseSeleccionada]['tablas'][borrarTemporales])
return Error('Semántico', 'Error(42P01): undefined_table.', 0, 0)
valores = []
temporales = []
columnasAceptadas = {}
for keys in directorioTablas.keys():
valores.append(keys)
columnasAceptadas[keys] = []
temporales.append(directorioTablas[keys]['temporal'])
if where == None:
val = self.funcionPosibilidades(data, valores, [], [], directorioTablas, True, columnasAceptadas, temporales, columnasFromTemporales)
else:
val = self.funcionPosibilidades(data, valores, [], [], directorioTablas, False, columnasAceptadas, temporales, columnasFromTemporales)
if isinstance(val, Error):
for borrarTemporales in columnasFromTemporales.keys():
del(data.tablaSimbolos[data.databaseSeleccionada]['tablas'][borrarTemporales])
return val
select = self.parametros #parametros
columnasImprimir = select.listadeseleccion
diccionarioColumnasAceptadas = {}
columnasAgregacion = []
for columnasSeleccionadas in columnasImprimir:
nombreColumna = columnasSeleccionadas.listaseleccionados
if isinstance(nombreColumna, FuncionMatematicaSimple):
columnasAgregacion.append(nombreColumna)
esCount=0
if nombreColumna.operador=='count':
esCount = 1
continue
try:
esCount=0
retorno = nombreColumna.obtenerSeleccionado(data, directorioTablas, columnasAceptadas, diccionarioColumnasAceptadas)
if isinstance(retorno, Error):
for borrarTemporales in columnasFromTemporales.keys():
del(data.tablaSimbolos[data.databaseSeleccionada]['tablas'][borrarTemporales])
return retorno
except:
cant = 0
for keys in columnasAceptadas:
cant = len(columnasAceptadas[keys])
break
nuevaColumna = []
i = 0
contadorNombre = 0
nombreTabla = ''
while True:
try:
if contadorNombre == 0: nombreTabla = nombreColumna.tipofuncionmatematica
else: nombreTabla = nombreColumna.tipofuncionmatematica + str(contadorNombre)
except:
try:
if nombreColumna.operador == 'md5':
return Error('Sintactico', 'El md5 solamente puede venir en el insert y update', 0, 0)
if contadorNombre == 0: nombreTabla = nombreColumna.operador
else: nombreTabla = nombreColumna.operador + str(contadorNombre)
except:
try:
if contadorNombre == 0: nombreTabla = nombreColumna.tipofuncionTrigonometrica
else: nombreTabla = nombreColumna.tipofuncionTrigonometrica + str(contadorNombre)
except :
if contadorNombre == 0: nombreTabla = nombreColumna.tipofuncionfehca
else: nombreTabla = nombreColumna.tipofuncionfehca + str(contadorNombre)
try:
a = diccionarioColumnasAceptadas[nombreTabla]
contadorNombre = contadorNombre + 1
except:
diccionarioColumnasAceptadas[nombreTabla] = {'columnas': [], 'tipo': ''}
break;
while True:
if i == cant:
break;
for keys in columnasAceptadas:
directorioTablas[keys]['fila'] = columnasAceptadas[keys][i]
comprobar = nombreColumna.execute(data, directorioTablas)
if isinstance(comprobar, Error):
return comprobar
diccionarioColumnasAceptadas[nombreTabla]['columnas'].append([comprobar.val])
diccionarioColumnasAceptadas[nombreTabla]['tipo'] = comprobar.type
i = i + 1
if select.distinct:
juntarValores = []
inicio = 0
for keys in diccionarioColumnasAceptadas.keys():
contador = 0
for val in diccionarioColumnasAceptadas[keys]['columnas']:
if inicio == 0:
juntarValores.append(val)
else:
juntarValores[contador].append(val[0])
contador = contador + 1
inicio = inicio + 1
contador = 0
nuevoArregloDistinct = []
routes = juntarValores
dups = set()
duplicadas = 0
for route in routes:
if tuple(route) in dups:
duplicadas = duplicadas + 1
else:
nuevoArregloDistinct.append(route)
dups.add(tuple(route))
contador = contador + 1
if duplicadas == 0:
nuevoArregloDistinct = juntarValores
contador = 0
for tablas in diccionarioColumnasAceptadas.keys():
datosTablas = diccionarioColumnasAceptadas[tablas]
columnaSelect = []
for filaActual in nuevoArregloDistinct:
columnaSelect.append([filaActual[contador]])
diccionarioColumnasAceptadas[tablas]['columnas'] = columnaSelect
contador = contador + 1
whereOpcional = True
groupByOpcional = False
groupByData = None
if self.fromopcional.whereopcional == None and self.fromopcional.groupbyopcional == None:
''
else:
if self.fromopcional.groupbyopcional == None:
groupByData = self.fromopcional.whereopcional.groupbyopcional
groupByOpcional = True
else:
groupByData = self.fromopcional.groupbyopcional
whereOpcional = False
groupByOpcional = True
if groupByData == None:
''
else:
if len(diccionarioColumnasAceptadas.keys()) == len(groupByData.lista):
for keys in groupByData.lista:
if keys.column.upper() in diccionarioColumnasAceptadas:
''
else:
return Error('Semantico', 'No se reconoce la columna ' + keys.column + '.', 0, 0)
else:
return Error('Semantico', 'Faltan columnas para agrupar en el group by.', 0, 0)
columnasMostrar = diccionarioColumnasAceptadas
juntarValoresN = []
inicio = 0
for keys in columnasMostrar.keys():
contador = 0
for val in columnasMostrar[keys]['columnas']:
if inicio == 0:
s = val.copy()
juntarValoresN.append(s)
else:
juntarValoresN[contador].append(val[0])
contador = contador + 1
inicio = inicio + 1
diccionarioAgrupacion = {}
pos = 0
for fila in juntarValoresN:
nombre = ''
for valorIndividual in fila:
nombre = nombre + str(valorIndividual)
if nombre in diccionarioAgrupacion:
diccionarioAgrupacion[nombre].append(pos)
else:
diccionarioAgrupacion[nombre] = []
diccionarioAgrupacion[nombre].append(pos)
pos = pos + 1
cambiarValores = False
for keys in diccionarioColumnasAceptadas.keys():
if len(diccionarioAgrupacion.keys()) < len(diccionarioColumnasAceptadas[keys]['columnas']):
cambiarValores = True
break
agregarColumnas = False
columnasGNuevas = []
for agregacion in columnasAgregacion:
val = agregacion.execute(data, diccionarioAgrupacion, diccionarioColumnasAceptadas, columnasAceptadas)
if isinstance(val, Error):
for borrarTemporales in columnasFromTemporales.keys():
del(data.tablaSimbolos[data.databaseSeleccionada]['tablas'][borrarTemporales])
return val
columnasGNuevas.append(val)
agregarColumnas = True
if agregarColumnas or cambiarValores:
juntarValores = []
inicio = 0
for keys in diccionarioColumnasAceptadas.keys():
contador = 0
for val in diccionarioColumnasAceptadas[keys]['columnas']:
if inicio == 0:
s = val.copy()
juntarValores.append(s)
else:
juntarValores[contador].append(val[0])
contador = contador + 1
inicio = inicio + 1
contador = 0
nuevoArregloDistinct = []
routes = juntarValores
dups = set()
duplicadas = 0
for route in routes:
if tuple(route) in dups:
duplicadas = duplicadas + 1
else:
nuevoArregloDistinct.append(route)
dups.add(tuple(route))
contador = contador + 1
if duplicadas == 0:
nuevoArregloDistinct = juntarValores
contador = 0
for tablas in diccionarioColumnasAceptadas.keys():
datosTablas = diccionarioColumnasAceptadas[tablas]
columnaSelect = []
for filaActual in nuevoArregloDistinct:
columnaSelect.append([filaActual[contador]])
diccionarioColumnasAceptadas[tablas]['columnas'] = columnaSelect
contador = contador + 1
for nuevas in columnasGNuevas:
cont = 0
for col in nuevas['val'].keys():
if cont == 0:
diccionarioColumnasAceptadas[nuevas['name']] = {'columnas': [], 'tipo': nuevas['type']}
diccionarioColumnasAceptadas[nuevas['name']]['columnas'].append(nuevas['val'][col])
cont = cont + 1
filascount = extractTable(data.databaseSeleccionada, fromData.parametros[0].parametros.operador.upper()) #extract
for borrarTemporales in columnasFromTemporales.keys():
del(data.tablaSimbolos[data.databaseSeleccionada]['tablas'][borrarTemporales])
if esCount==1:
returnCount = {}
returnCol = {}
returnCol2 = []
returnCol2.append(str(len(filascount)))
returnCol2.append('.')
returnCol['columnas']= returnCol2
returnCount['count'] = returnCol
return returnCount
else:
return diccionarioColumnasAceptadas
def __repr__(self):
return str(self.__dict__)
def funcionPosibilidades(self, data, nombres, columna, nombreAux, ordenTablas, noWhere, columnasAceptadas, temporales, columnasFromTemporales):
if len(nombres) == 0:
if noWhere:
val = 0
for fila in columna:
columnasAceptadas[nombreAux[val]].append(fila)
val = val + 1
else:
val = 0
for fila in columna:
ordenTablas[nombreAux[val]]['fila'] = fila
val = val + 1
result = self.fromopcional.whereopcional.operador.execute(data, ordenTablas)
if isinstance(result, Error):
return result
if result:
val = 0
for fila in columna:
columnasAceptadas[nombreAux[val]].append(fila)
val = val + 1
return 'fin'
nombre = nombres[0]
nombres.remove(nombre)
temporal = temporales[0]
temporales.pop(0)
if temporal:
filas = columnasFromTemporales[nombre]
else:
filas = extractTable(data.databaseSeleccionada, nombre) #extract
for fila in filas:
s = fila
columna.append(fila)
nombreAux.append(nombre)
comp = self.funcionPosibilidades(data, nombres, columna, nombreAux, ordenTablas, noWhere, columnasAceptadas, temporales, columnasFromTemporales)
if isinstance(comp, Error):
return comp
columna.remove(s)
nombreAux.remove(nombre)
nombres.append(nombre)
temporales.append(temporal)
return 'hola'
def ImprimirTabla(self, columnasMostrar):
juntarValores = []
inicio = 0
#print(columnasMostrar)
for keys in columnasMostrar.keys():
contador = 0
for val in columnasMostrar[keys]['columnas']:
if inicio == 0:
juntarValores.append(val)
else:
juntarValores[contador].append(val[0])
contador = contador + 1
inicio = inicio + 1
x = PrettyTable()
keys = columnasMostrar.keys()
x.field_names = keys
x.add_rows(
juntarValores
)
#print(str(x))
return x
class Casos(Instruccion):
def __init__(self, caso,elsecase):
self.caso = caso
self.elsecase = elsecase
def execute(self,data):
return self
def __repr__(self):
return str(self.__dict__)
class FromOpcional(Instruccion):
def __init__(self, parametros, whereogroup, groupbyopcional, orderby):
self.parametros = parametros
self.whereopcional = whereogroup
self.groupbyopcional = groupbyopcional
self.orderby = orderby
def execute(self,data):
return self
def __repr__(self):
return str(self.__dict__)
class ParametrosFromR(Instruccion):
def __init__(self, parametros, asop):
self.parametros = parametros
self.asop = asop
def execute(self,data):
return self
def __repr__(self):
return str(self.__dict__)
class ListaDeSeleccionadosConOperador(Instruccion):
#puede venir grastest con arg1
#least con arg 1
#case con arg1 y 2
def __init__(self, arg0,arg1,operador,arg2,arg3):
self.operador = operador
self.arg0 = arg0
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
def execute(self,data, valoresTabla):
#print(self)
#print(valoresTabla)
if self.operador.upper() == 'CASE' :
left = ''
for arg in self.arg1 :
condit = arg.caso.whenCase.execute(data, valoresTabla)
if isinstance(condit, Error):
return condit
if condit :
return Primitive(str(arg.caso.thenCase.type), arg.caso.thenCase.val)
if arg.elsecase != None :
left = arg.elsecase.elseopcional
if left == None :
error = Error('Semántico', 'Error(????): Else case no específicado.', 0, 0)
return error
return left
else :
''
items = []
tipo = None
tipofecha = False
for arg in self.arg1 :
try:
resp = arg.execute(data, valoresTabla)
except:
resp = arg.execute()
if isinstance(resp, Error):
return resp
if tipo == None :
tipo = resp.type
elif tipo != resp.type :
error = Error('Semántico', 'Error(????): Error de tipos.', 0, 0)
return error
if resp.type == 'string' :
try :
dextraccion = resp
fechacopleta = datetime.strptime(dextraccion.val,'%Y-%m-%d %H:%M:%S')
tipofecha = True
except :
try:
dextraccion = resp
fechacopleta = datetime.strptime(dextraccion.val,'%H:%M:%S')
tipofecha = True
except :
try :
dextraccion = resp
fechacopleta = datetime.strptime(dextraccion.val,'%Y-%m-%d')
tipofecha = True
except :
if tipofecha :
error = Error('Semántico', 'Error(????): Error de tipos.', 0, 0)
return error
items.append(resp.val)
if self.operador.upper() == 'GREATEST' :
try:
return Primitive('integer', int(max(items)))
except:
return Primitive('string', max(items))
else :
'LEAST'
return Primitive('string', min(items))
def __repr__(self):
return str(self.__dict__)
class ListaDeSeleccionados(Instruccion):
#puede venir asterisco(*) entonces tipo == True
#puede venir un select completo -> Tipo == False
def __init__(self, argumento,tipo):
self.argumento = argumento
self.tipo = tipo
def execute(self, data):
return self
def __repr__(self):
return str(self.__dict__)
class ElseOpcional(Instruccion):
def __init__(self, elseopcional):
self.elseopcional = elseopcional
def execute(self,data):
return self
def __repr__(self):
return str(self.__dict__)
class QuerysSelect(Instruccion):
def __init__(self,arg0,arg1, operador,select1,allopcional,select2):
self.operador = operador
self.select1 = select1
self.allopcional = allopcional
self.select2 = select2
self.arg0 = arg0
self.arg1 = arg1
def execute(self,data):
query1 = self.select1.execute(data)
if isinstance(query1, Error):
return query1
query2 = self.select2.execute(data)
if isinstance(query2, Error):
return query2
if len(query1.keys()) != len(query2.keys()):
return Error('Semantico', 'La cantidad de columnas en el ' + self.operador + ' tiene que ser la misma.', 0, 0)
if self.operador == 'union':
keys2 = []
for key in query2.keys():
keys2.append(key)
cont = 0
for key in query1.keys():
query1[key]['columnas'] = query1[key]['columnas'] + query2[keys2[cont]]['columnas']
cont = cont + 1
return query1
elif self.operador == 'intersect':
return query1
return self
def __repr__(self):
return str(self.__dict__)
def ImprimirTabla(self, columnasMostrar):
juntarValores = []
inicio = 0
for keys in columnasMostrar.keys():
contador = 0
for val in columnasMostrar[keys]['columnas']:
if inicio == 0:
juntarValores.append(val)
else:
juntarValores[contador].append(val[0])
contador = contador + 1
inicio = inicio + 1
x = PrettyTable()
keys = columnasMostrar.keys()
x.field_names = keys
x.add_rows(
juntarValores
)
return x
class ParametrosFrom(Instruccion):
#true select
#false id
def __init__(self,arg0,arg1, parametro,tipoparametro):
self.operador = parametro
self.tipoparametro = tipoparametro
self.arg0 = arg0
self.arg1 = arg1
def execute(self,data):
return self
def __repr__(self):
return str(self.__dict__)
class WhereOpcional(Instruccion):
def __init__(self, condiciones,groupbyopcional):
self.operador = condiciones
self.groupbyopcional = groupbyopcional
def execute(self,data):
return self
def __repr__(self):
return str(self.__dict__)
class GroupByOpcional(Instruccion):
def __init__(self, lista,havingopcional):
self.lista = lista
self.havingopcional = havingopcional
def execute(self,data):
return self
def __repr__(self):
return str(self.__dict__)
class HavingOpcional(Instruccion):
def __init__(self, Condiciones):
self.Condiciones = Condiciones
def execute(self,data):
return self
def __repr__(self):
return str(self.__dict__)
class Allopcional(Instruccion):
def __init__(self, allopcional):
self.allopcional = allopcional
def execute(self,data):
return self
def __repr__(self):
return str(self.__dict__)
class Case(Instruccion):
def __init__(self, whenCase,thenCase):
self.whenCase = whenCase
self.thenCase = thenCase
def execute(self,data):
return self
def __repr__(self):
return str(self.__dict__)
class ListaDeSeleccionadosR(Instruccion):
def __init__(self, listaseleccionados,asopcional):
self.listaseleccionados = listaseleccionados
self.asopcional = asopcional
def execute(self, data):
return self.listaseleccionados.execute(data)
#return self
def __repr__(self):
return str(self.__dict__)
class ParametrosSelect(Instruccion):
#true si hay distinct
#false no hay distinct
def __init__(self, distinct, listadeseleccion):
self.distinct = distinct
self.listadeseleccion = listadeseleccion
def execute(self, data):
if self.listadeseleccion != None:
for selection in self.listadeseleccion:
return selection.execute(data)
return self
def __repr__(self):
return str(self.__dict__)
class As(Instruccion):
def __init__(self, argumento):
self.argumento = argumento
def execute(self,data):
return self
def __repr__(self):
return str(self.__dict__)
class TipoRound(Instruccion):
def __init__(self, arg1):
self.arg1 = arg1
def execute(self,data):
return self
def __repr__(self):
return str(self.__dict__)
class FuncionBinaria(Instruccion):
#convert tiene un tipo no un argumento
def __init__(self, operador, arg1,arg2,arg3):
self.operador = operador
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
def execute(self, data, valoresTabla):
tipo = str(self.operador)
if tipo == 'length':
try:
argumento = self.arg1.execute()
except:
argumento = self.arg1.execute(data, valoresTabla)
if isinstance(argumento, Error):
return argumento
if argumento.type == 'string' or argumento.type == 'ID' :
return Primitive('integer',len(str(argumento.val)))
else:
error = Error('Semántico', 'Error de tipos en LENGTH, solo se aceptan valores de cadenas, se obtuvo: '+str(argumento.val), 0, 0)
return error
elif tipo == 'substring' or tipo == 'substr':
try:
argumento = self.arg1.execute()
except:
argumento = self.arg1.execute(data, valoresTabla)
if isinstance(argumento, Error):
return argumento
try:
argumento1 = self.arg2.execute()
except:
argumento1 = self.arg2.execute(data, valoresTabla)
if isinstance(argumento1, Error):
return argumento1
try:
argumento2 = self.arg3.execute()
except:
argumento2 = self.arg3.execute(data, valoresTabla)
if isinstance(argumento2, Error):
return argumento2
if argumento.type == 'string' or argumento.type == 'ID' :
return Primitive('integer',str(argumento.val)[argumento1.val:argumento2.val])
else:
error = Error('Semántico', 'Error de tipos en LENGTH, solo se aceptan valores de cadenas, se obtuvo: '+str(argumento.val),0,0)
return error
elif tipo == 'md5':
try:
argumento = self.arg1.execute()
except:
argumento = self.arg1.execute(data, valoresTabla)
if isinstance(argumento, Error):
return argumento
if argumento.type == 'string' or argumento.type == 'ID' :
textoaconvertir = str(argumento.val)
md5_object = hashlib.md5(textoaconvertir.encode())
md5_hash = md5_object.hexdigest()
return Primitive('string',md5_hash)
else:
error = Error('Semántico', 'Error de tipos en MD5, solo se aceptan valores de cadenas, se obtuvo: '+str(argumento.val),0,0)
return error
elif tipo == 'sha256':
try:
argumento = self.arg1.execute()
except:
argumento = self.arg1.execute(data, valoresTabla)
if isinstance(argumento, Error):
return argumento
if argumento.type == 'string' or argumento.type == 'ID' :
textoaconvertir = str(argumento.val)
sha256_object = hashlib.sha256(textoaconvertir.encode())
sha256_hash = sha256_object.hexdigest()
return Primitive('string',sha256_hash)
else:
error = Error('Semántico', 'Error de tipos en MD5, solo se aceptan valores de cadenas, se obtuvo: '+str(argumento.val),0,0)
return error
return self
def __repr__(self):
return str(self.__dict__)
class FucionTrigonometrica(Instruccion):
def __init__(self, tipofuncionTrigonometrica, arg1,arg2):
self.tipofuncionTrigonometrica = tipofuncionTrigonometrica
self.arg1 = arg1
self.arg2 = arg2
def execute(self, data, valoresTabla):
tipo = str(self.tipofuncionTrigonometrica)
if tipo == 'acos' :
'devuelve el coseno inverso'
try:
argumento = self.arg1.execute()
except:
argumento = self.arg1.execute(data, valoresTabla)
if isinstance(argumento, Error):
return argumento
if argumento.type == 'integer' or argumento.type == 'float' :
try :
return Primitive('float',math.acos(argumento.val))
except :
error = Error('Semántico', 'Error de DOMINIO en ACOS, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
else :
error = Error('Semántico', 'Error de tipos en ACOS, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
elif tipo == 'acosd' :
'devuelve el coseno inverso en grados '
try:
argumento = self.arg1.execute()
except:
argumento = self.arg1.execute(data, valoresTabla)
if isinstance(argumento, Error):
return argumento
if argumento.type == 'integer' or argumento.type == 'float' :
try:
return Primitive('float',math.degrees(math.acos(argumento.val)))
except :
error = Error('Semántico', 'Error de DOMINIO en ACOSD, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
else :
error = Error('Semántico', 'Error de tipos en ACOSD, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
elif tipo == 'asin' :
'devuelve el seno inverso'
try:
argumento = self.arg1.execute()
except:
argumento = self.arg1.execute(data, valoresTabla)
if isinstance(argumento, Error):
return argumento
if argumento.type == 'integer' or argumento.type == 'float' :
try:
return Primitive('float',math.asin(argumento.val))
except :
error = Error('Semántico', 'Error de DOMINIO en ASIN, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
else :
error = Error('Semántico', 'Error de tipos en ASIN, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
elif tipo == 'asind' :
'devuelve el seno inverso en grados'
try:
argumento = self.arg1.execute()
except:
argumento = self.arg1.execute(data, valoresTabla)
if isinstance(argumento, Error):
return argumento
if argumento.type == 'integer' or argumento.type == 'float' :
try:
return Primitive('float',math.degrees(math.asin(argumento.val)))
except :
error = Error('Semántico', 'Error de DOMINIO en ASIND, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
else :
error = Error('Semántico', 'Error de tipos en ASIND, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
elif tipo == 'atan' :
'devuelve el tangente inverso'
try:
argumento = self.arg1.execute()
except:
argumento = self.arg1.execute(data, valoresTabla)
if isinstance(argumento, Error):
return argumento
if argumento.type == 'integer' or argumento.type == 'float' :
try:
return Primitive('float',math.atan(argumento.val))
except :
error = Error('Semántico', 'Error de DOMINIO en ATAN, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
else :
error = Error('Semántico', 'Error de tipos en ATAN, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
elif tipo == 'atand' :
'devuelve el tangente inverso en grados'
try:
argumento = self.arg1.execute()
except:
argumento = self.arg1.execute(data, valoresTabla)
if isinstance(argumento, Error):
return argumento
if argumento.type == 'integer' or argumento.type == 'float' :
try:
return Primitive('float',math.degrees(math.atan(argumento.val)))
except :
error = Error('Semántico', 'Error de DOMINIO en ACOS, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
else :
error = Error('Semántico', 'Error de tipos en ATAND, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
elif tipo == 'atan2' :
'devuelve el tangente inverso de una div'
try:
argumento = self.arg1.execute()
except:
argumento = self.arg1.execute(data, valoresTabla)
if isinstance(argumento, Error):
return argumento
try:
argumento2 = self.arg2.execute()
except:
argumento2 = self.arg2.execute(data, valoresTabla)
if isinstance(argumento2, Error):
return argumento2
if argumento.type == 'integer' or argumento.type == 'float' :
if argumento2.type == 'integer' or argumento2.type == 'float' :
try:
return Primitive('float',math.atan2(argumento.val,argumento2.val))
except :
error = Error('Semántico', 'Error de DOMINIO en ATAN2, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
else :
error = Error('Semántico', 'Error de tipos en ATAN2, solo se aceptan valores numéricos, se obtuvo: '+argumento2.val, 0, 0)
return error
else :
error = Error('Semántico', 'Error de tipos en ATAN2, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
elif tipo == 'atan2d' :
'devuelve el tangente inverso de una div en grados'
try:
argumento = self.arg1.execute()
except:
argumento = self.arg1.execute(data, valoresTabla)
if isinstance(argumento, Error):
return argumento
try:
argumento2 = self.arg2.execute()
except:
argumento2 = self.arg2.execute(data, valoresTabla)
if isinstance(argumento2, Error):
return argumento2
if argumento.type == 'integer' or argumento.type == 'float' :
if argumento2.type == 'integer' or argumento2.type == 'float' :
try:
return Primitive('float',math.degrees(math.atan2(argumento.val,argumento2.val)))
except :
error = Error('Semántico', 'Error de DOMINIO en ATAN2D, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
else :
error = Error('Semántico', 'Error de tipos en ATAN2D, solo se aceptan valores numéricos, se obtuvo: '+argumento2.val, 0, 0)
return error
else :
error = Error('Semántico', 'Error de tipos en ATAN2D, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
elif tipo == 'cos' :
'devuelve el coseno'
try:
argumento = self.arg1.execute()
except:
argumento = self.arg1.execute(data, valoresTabla)
if isinstance(argumento, Error):
return argumento
if argumento.type == 'integer' or argumento.type == 'float' :
try:
return Primitive('float',math.cos(argumento.val))
except :
error = Error('Semántico', 'Error de DOMINIO en COS, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
else :
error = Error('Semántico', 'Error de tipos en COS, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
elif tipo == 'cosd' :
'devuelve el coseno en grados'
try:
argumento = self.arg1.execute()
except:
argumento = self.arg1.execute(data, valoresTabla)
if isinstance(argumento, Error):
return argumento
if argumento.type == 'integer' or argumento.type == 'float' :
try:
return Primitive('float',math.degrees(math.cos(argumento.val)))
except :
error = Error('Semántico', 'Error de DOMINIO en COSD, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
else :
error = Error('Semántico', 'Error de tipos en COSD, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
elif tipo == 'cot' :
'devuelve el cotangente'
try:
argumento = self.arg1.execute()
except:
argumento = self.arg1.execute(data, valoresTabla)
if isinstance(argumento, Error):
return argumento
if argumento.type == 'integer' or argumento.type == 'float' :
try:
return Primitive('float',math.cos(argumento.val)/math.sin(argumento.val))
except :
error = Error('Semántico', 'Error de DOMINIO en COT, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
else :
error = Error('Semántico', 'Error de tipos en COT, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
elif tipo == 'cotd' :
'devuelve el cotangente en grados'
try:
argumento = self.arg1.execute()
except:
argumento = self.arg1.execute(data, valoresTabla)
if isinstance(argumento, Error):
return argumento
if argumento.type == 'integer' or argumento.type == 'float' :
try:
return Primitive('float',math.degrees(math.cos(argumento.val)/math.sin(argumento.val)))
except :
error = Error('Semántico', 'Error de DOMINIO en COTD, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
else :
error = Error('Semántico', 'Error de tipos en COTD, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
elif tipo == 'sin' :
'devuelve el sin'
try:
argumento = self.arg1.execute()
except:
argumento = self.arg1.execute(data, valoresTabla)
if isinstance(argumento, Error):
return argumento
if argumento.type == 'integer' or argumento.type == 'float' :
try:
return Primitive('float',math.sin(argumento.val))
except :
error = Error('Semántico', 'Error de DOMINIO en SIN, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
else :
error = Error('Semántico', 'Error de tipos en SIN, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
elif tipo == 'sind' :
'devuelve el coseno en grados'
try:
argumento = self.arg1.execute()
except:
argumento = self.arg1.execute(data, valoresTabla)
if isinstance(argumento, Error):
return argumento
if argumento.type == 'integer' or argumento.type == 'float' :
try:
return Primitive('float',math.degrees(math.sin(argumento.val)))
except :
error = Error('Semántico', 'Error de DOMINIO en SIND, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
else :
error = Error('Semántico', 'Error de tipos en SIND, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
elif tipo == 'tan' :
'devuelve el tan'
try:
argumento = self.arg1.execute()
except:
argumento = self.arg1.execute(data, valoresTabla)
if isinstance(argumento, Error):
return argumento
if argumento.type == 'integer' or argumento.type == 'float' :
try:
return Primitive('float',math.tan(argumento.val))
except :
error = Error('Semántico', 'Error de DOMINIO en TAN, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
else :
error = Error('Semántico', 'Error de tipos en TAN, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
elif tipo == 'tand' :
'devuelve el tan en grados'
try:
argumento = self.arg1.execute()
except:
argumento = self.arg1.execute(data, valoresTabla)
if isinstance(argumento, Error):
return argumento
if argumento.type == 'integer' or argumento.type == 'float' :
try:
return Primitive('float',math.degrees(math.tan(argumento.val)))
except :
error = Error('Semántico', 'Error de DOMINIO en TAND, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
else :
error = Error('Semántico', 'Error de tipos en TAND, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
elif tipo == 'sinh' :
'devuelve el sinh'
try:
argumento = self.arg1.execute()
except:
argumento = self.arg1.execute(data, valoresTabla)
if isinstance(argumento, Error):
return argumento
if argumento.type == 'integer' or argumento.type == 'float' :
try:
return Primitive('float',math.sinh(argumento.val))
except :
error = Error('Semántico', 'Error de DOMINIO en SINH, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
else :
error = Error('Semántico', 'Error de tipos en SINH, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
elif tipo == 'cosh' :
'devuelve el cosh'
try:
argumento = self.arg1.execute()
except:
argumento = self.arg1.execute(data, valoresTabla)
if isinstance(argumento, Error):
return argumento
if argumento.type == 'integer' or argumento.type == 'float' :
try:
return Primitive('float',math.cosh(argumento.val))
except :
error = Error('Semántico', 'Error de DOMINIO en COSH, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
else :
error = Error('Semántico', 'Error de tipos en COSH, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
elif tipo == 'tanh' :
'devuelve el tanh'
try:
argumento = self.arg1.execute()
except:
argumento = self.arg1.execute(data, valoresTabla)
if isinstance(argumento, Error):
return argumento
if argumento.type == 'integer' or argumento.type == 'float' :
try:
return Primitive('float',math.tanh(argumento.val))
except :
error = Error('Semántico', 'Error de DOMINIO en TANH, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
else :
error = Error('Semántico', 'Error de tipos en SINH, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
elif tipo == 'asinh' :
'devuelve el asinh'
try:
argumento = self.arg1.execute()
except:
argumento = self.arg1.execute(data, valoresTabla)
if isinstance(argumento, Error):
return argumento
if argumento.type == 'integer' or argumento.type == 'float' :
try:
return Primitive('float',math.asinh(argumento.val))
except :
error = Error('Semántico', 'Error de DOMINIO en ASINH, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
else :
error = Error('Semántico', 'Error de tipos en ASINH, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
elif tipo == 'acosh' :
'devuelve el asinh'
try:
argumento = self.arg1.execute()
except:
argumento = self.arg1.execute(data, valoresTabla)
if isinstance(argumento, Error):
return argumento
if argumento.type == 'integer' or argumento.type == 'float' :
try:
return Primitive('float',math.asinh(argumento.val))
except :
error = Error('Semántico', 'Error de DOMINIO en ACOSH, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
else :
error = Error('Semántico', 'Error de tipos en ACOSH, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
elif tipo == 'atanh' :
'devuelve el atanh'
try:
argumento = self.arg1.execute()
except:
argumento = self.arg1.execute(data, valoresTabla)
if isinstance(argumento, Error):
return argumento
if argumento.type == 'integer' or argumento.type == 'float' :
try:
return Primitive('float',math.atanh(argumento.val))
except :
error = Error('Semántico', 'Error de DOMINIO en ATANH, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
else :
error = Error('Semántico', 'Error de tipos en ATANH, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
#return self
def __repr__(self):
return str(self.__dict__)
class OperadoresSelect(Instruccion):
# | square
# || cube
# & and
# | or dos args
# # <- xor
# ~ not
# << sl(bitwise shift left)
# >> sr(bitwise shift right)
def __init__(self, tipoOperador, arg1,arg2):
self.tipoOperador = tipoOperador
self.arg1 = arg1
self.arg2 = arg2
def execute(self,data):
try:
argumento = self.arg1.execute()
except:
argumento = self.arg1.execute(data, valoresTabla)
if isinstance(argumento, Error):
return argumento
if self.tipoOperador == 'square':
if argumento.type == 'integer' or argumento.type == 'float' :
return Primitive('float', math.pow(float(argumento.val),2))
else:
error = Error('Semántico', 'Error de tipos en |, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
elif self.tipoOperador == 'cube':
if argumentotype == 'integer' or argumento.type == 'float' :
return Primitive('float', math.pow(float(argumento.val),3))
else:
error = Error('Semántico', 'Error de tipos en ||, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
elif self.tipoOperador == 'and':
try:
argumento2 = self.arg2.execute()
except:
argumento2 = self.arg2.execute(data, valoresTabla)
if isinstance(argumento, Error):
return argumento
if (argumento.type == 'integer' or argumento.type == 'float') and (argumento2.type == 'integer' or argumento2.type == 'float') :
return Primitive('float', float(argumento.val & argumento2.val))
else:
error = Error('Semántico', 'Error de tipos en ||, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
elif self.tipoOperador == 'or':
try:
argumento2 = self.arg2.execute()
except:
argumento2 = self.arg2.execute(data, valoresTabla)
if isinstance(argumento, Error):
return argumento
if (argumento.type == 'integer' or argumento.type == 'float') and (argumento2.type == 'integer' or argumento2.type == 'float') :
return Primitive('float', float(argumento.val | argumento2.val))
else:
error = Error('Semántico', 'Error de tipos en ||, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
elif self.tipoOperador == 'xor':
try:
argumento2 = self.arg2.execute()
except:
argumento2 = self.arg2.execute(data, valoresTabla)
if isinstance(argumento, Error):
return argumento
if (argumento.type == 'integer' or argumento.type == 'float') and (argumento2.type == 'integer' or argumento2.type == 'float') :
return Primitive('float', float(argumento.val ^ argumento2.val))
else:
error = Error('Semántico', 'Error de tipos en #, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
elif self.tipoOperador == 'not':
if (argumento.type == 'integer' or argumento.type == 'float'):
return Primitive('float', float(~argumento.val))
else:
error = Error('Semántico', 'Error de tipos en ~, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
elif self.tipoOperador == 'sl':
try:
argumento2 = self.arg2.execute()
except:
argumento2 = self.arg2.execute(data, valoresTabla)
if isinstance(argumento, Error):
return argumento
if (argumento.type == 'integer' or argumento.type == 'float') and (argumento2.type == 'integer' or argumento2.type == 'float') :
return Primitive('float', float(argumento.val << argumento2.val))
else:
error = Error('Semántico', 'Error de tipos en <<, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
elif self.tipoOperador == 'sr':
try:
argumento2 = self.arg2.execute()
except:
argumento2 = self.arg2.execute(data, valoresTabla)
if isinstance(argumento, Error):
return argumento
if (argumento.type == 'integer' or argumento.type == 'float') and (argumento2.type == 'integer' or argumento2.type == 'float') :
return Primitive('float', float(argumento.val >> argumento2.val))
else:
error = Error('Semántico', 'Error de tipos en >>, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
return self
def __repr__(self):
return str(self.__dict__)
class FuncionMatematica(Instruccion):
def __init__(self, tipofuncionmatematica, arg1, arg2, arg3, arg4):
self.tipofuncionmatematica = tipofuncionmatematica
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
self.arg4 = arg4
def execute(self, data, valoresTabla):
tipo = str(self.tipofuncionmatematica)
if tipo == 'abs' :
'valor absoluto - FALTA IDS'
try:
argumento = self.arg1.execute()
except:
argumento = self.arg1.execute(data, valoresTabla)
if isinstance(argumento, Error):
return argumento
if argumento.type == 'integer' or argumento.type == 'float' :
return Primitive('float', math.fabs(float(argumento.val)))
else :
error = Error('Semántico', 'Error de tipos en ABS, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
elif tipo == 'cbrt' :
'raíz cúbica - solo numeros positivos'
try:
argumento = self.arg1.execute()
except:
argumento = self.arg1.execute(data, valoresTabla)
if isinstance(argumento, Error):
return argumento
if argumento.type == 'integer' or argumento.type == 'float' :
if argumento.val > 0 :
reto = argumento.val**(1/3)
if isinstance(reto, int) :
return Primitive('integer', reto)
return Primitive('float', reto)
else :
error = Error('Semántico', 'Error de tipos en CBRT, solo se aceptan valores numéricos positivo, se obtuvo: '+str(argumento.val), 0, 0)
return error
else :
error = Error('Semántico', 'Error de tipos en CBRT, solo se aceptan valores numéricos positivo, se obtuvo: '+str(argumento.val), 0, 0)
return error
elif tipo == 'ceil' :
'redondear - solo numeros positivos'
try:
argumento = self.arg1.execute()
except:
argumento = self.arg1.execute(data, valoresTabla)
if isinstance(argumento, Error):
return argumento
if argumento.type == 'integer' or argumento.type == 'float' :
if argumento.val > 0 :
reto = math.ceil(argumento.val)
return Primitive('integer', reto)
else :
error = Error('Semántico', 'Error de tipos en CEIL, solo se aceptan valores numéricos positivo, se obtuvo: '+str(argumento.val), 0, 0)
return error
else :
error = Error('Semántico', 'Error de tipos en CEIL, solo se aceptan valores numéricos positivo, se obtuvo: '+str(argumento.val), 0, 0)
return error
elif tipo == 'ceiling' :
'redondear - solo numeros positivos'
try:
argumento = self.arg1.execute()
except:
argumento = self.arg1.execute(data, valoresTabla)
if isinstance(argumento, Error):
return argumento
if argumento.type == 'integer' or argumento.type == 'float' :
if argumento.val > 0 :
reto = math.ceil(argumento.val)
return Primitive('integer', reto)
else :
error = Error('Semántico', 'Error de tipos en CEIL, solo se aceptan valores numéricos positivo, se obtuvo: '+str(argumento.val), 0, 0)
return error
else :
error = Error('Semántico', 'Error de tipos en CEIL, solo se aceptan valores numéricos positivo, se obtuvo: '+str(argumento.val), 0, 0)
return error
elif tipo == 'degrees' :
'radianes a grados - '
try:
argumento = self.arg1.execute()
except:
argumento = self.arg1.execute(data, valoresTabla)
if isinstance(argumento, Error):
return argumento
if argumento.type == 'integer' or argumento.type == 'float' :
reto = math.degrees(argumento.val)
return Primitive('float', reto)
else :
error = Error('Semántico', 'Error de tipos en DEGREES, solo se aceptan valores numéricos positivo, se obtuvo: '+str(argumento.val), 0, 0)
return error
elif tipo == 'div' :
'cociente - '
try:
argumento = self.arg1.execute()
except:
argumento = self.arg1.execute(data, valoresTabla)
if isinstance(argumento, Error):
return argumento
try:
argumento2 = self.arg2.execute()
except:
argumento2 = self.arg2.execute(data, valoresTabla)
if isinstance(argumento2, Error):
return argumento2
if argumento.type == 'integer' or argumento.type == 'float' :
if argumento2.type == 'integer' or argumento2.type == 'float' :
reto = math.trunc(argumento.val / argumento2.val)
return Primitive('integer', reto)
else:
error = Error('Semántico', 'Error de tipos en DIV, solo se aceptan valores numéricos positivo, se obtuvo: '+argumento2.val, 0, 0)
return error
else :
error = Error('Semántico', 'Error de tipos en DIV, solo se aceptan valores numéricos positivo, se obtuvo: '+str(argumento.val), 0, 0)
return error
elif tipo == 'exp' :
'e^ argumento - '
try:
argumento = self.arg1.execute()
except:
argumento = self.arg1.execute(data, valoresTabla)
if isinstance(argumento, Error):
return argumento
if argumento.type == 'integer' or argumento.type == 'float' :
reto = math.exp(argumento.val)
return Primitive('float', reto)
else :
error = Error('Semántico', 'Error de tipos en EXP, solo se aceptan valores numéricos positivo, se obtuvo: '+str(argumento.val), 0, 0)
return error
elif tipo == 'factorial' :
'x! - solo numeros positivos'
try:
argumento = self.arg1.execute()
except:
argumento = self.arg1.execute(data, valoresTabla)
if isinstance(argumento, Error):
return argumento
if argumento.type == 'integer' :
if argumento.val > 0 :
reto = math.factorial(argumento.val)
return Primitive('integer', reto)
else :
error = Error('Semántico', 'Error de tipos en FACTORIAL, solo se aceptan valores numéricos positivo, se obtuvo: '+str(argumento.val), 0, 0)
return error
else :
error = Error('Semántico', 'Error de tipos en FACTORIAL, solo se aceptan valores numéricos positivo, se obtuvo: '+str(argumento.val), 0, 0)
return error
elif tipo == 'floor' :
'redondear al menor -'
try:
argumento = self.arg1.execute()
except:
argumento = self.arg1.execute(data, valoresTabla)
if isinstance(argumento, Error):
return argumento
if argumento.type == 'integer' or argumento.type == 'float' :
reto = math.trunc(argumento.val)
return Primitive('integer', reto)
else :
error = Error('Semántico', 'Error de tipos en FLOOR, solo se aceptan valores numéricos positivo, se obtuvo: '+str(argumento.val), 0, 0)
return error
elif tipo == 'gcd' :
'MCD - '
try:
argumento = self.arg1.execute()
except:
argumento = self.arg1.execute(data, valoresTabla)
if isinstance(argumento, Error):
return argumento
try:
argument2 = self.arg2.execute()
except:
argument2 = self.arg2.execute(data, valoresTabla)
if isinstance(argument2, Error):
return argumento2
if argumento.type == 'integer' or argumento.type == 'float' :
if argumento2.type == 'integer' or argumento2.type == 'float' :
if argumento.val > 0 and argumento2.val > 0 :
reto = math.gcd(argumento.val, argumento2.val)
return Primitive('integer', reto)
else :
error = Error('Semántico', 'Error de tipos en GCD, solo se aceptan valores numéricos positivos', 0, 0)
return error
else:
error = Error('Semántico', 'Error de tipos en GCD, solo se aceptan valores numéricos, se obtuvo: '+argumento2.val, 0, 0)
return error
else :
error = Error('Semántico', 'Error de tipos en GCD, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
elif tipo == 'ln' :
'Ln -'
try:
argumento = self.arg1.execute()
except:
argumento = self.arg1.execute(data, valoresTabla)
if isinstance(argumento, Error):
return argumento
if argumento.type == 'integer' or argumento.type == 'float' :
if argumento.val > 0 :
reto = math.log(argumento.val)
return Primitive('float', reto)
else :
error = Error('Semántico', 'Error de tipos en Ln, solo se aceptan valores numéricos positivo, se obtuvo: '+str(argumento.val), 0, 0)
return error
else :
error = Error('Semántico', 'Error de tipos en Ln, solo se aceptan valores numéricos positivo, se obtuvo: '+str(argumento.val), 0, 0)
return error
elif tipo == 'log' :
'Log10 -'
try:
argumento = self.arg1.execute()
except:
argumento = self.arg1.execute(data, valoresTabla)
if isinstance(argumento, Error):
return argumento
if argumento.type == 'integer' or argumento.type == 'float' :
if argumento.val > 0 :
reto = math.log10(argumento.val)
return Primitive('integer', reto)
else :
error = Error('Semántico', 'Error de tipos en LOG, solo se aceptan valores numéricos positivo, se obtuvo: '+str(argumento.val), 0, 0)
return error
else :
error = Error('Semántico', 'Error de tipos en LOG, solo se aceptan valores numéricos positivo, se obtuvo: '+str(argumento.val), 0, 0)
return error
elif tipo == 'mod' :
'modulo - '
try:
argumento = self.arg1.execute()
except:
argumento = self.arg1.execute(data, valoresTabla)
if isinstance(argumento, Error):
return argumento
try:
argumento2 = self.arg2.execute()
except:
argumento2 = self.arg2.execute(data, valoresTabla)
if isinstance(argumento2, Error):
return argumento2
if argumento.type == 'integer' or argumento.type == 'float' :
if argumento2.type == 'integer' or argumento.type == 'float' :
reto = math.remainder(argumento.val, argumento2.val)
return Primitive('integer', reto)
else:
error = Error('Semántico', 'Error de tipos en MOD, solo se aceptan valores numéricos positivo, se obtuvo: '+argumento2.val, 0, 0)
return error
else :
error = Error('Semántico', 'Error de tipos en MOD, solo se aceptan valores numéricos positivo, se obtuvo: '+str(argumento.val), 0, 0)
return error
elif tipo == 'pi' :
'PI'
return Primitive('float', math.pi)
elif tipo == 'power' :
'power - solo positivos'
try:
argumento = self.arg1.execute()
except:
argumento = self.arg1.execute(data, valoresTabla)
if isinstance(argumento, Error):
return argumento
try:
argumento2 = self.arg2.execute()
except:
argumento2 = self.arg2.execute(data, valoresTabla)
if isinstance(argumento2, Error):
return argumento2
if argumento.type == 'integer' or 'float' :
if argumento2.type == 'integer' or 'float' :
if argumento.val > 0 and argumento2.val > 0 :
reto = math.pow(argumento.val, argumento2.val)
if isinstance(reto, int) : return Primitive('integer', reto)
else : return Primitive('float', reto)
else :
error = Error('Semántico', 'Error de tipos en POWER, solo se aceptan valores numéricos positivo', 0, 0)
return error
else:
error = Error('Semántico', 'Error de tipos en POWER, solo se aceptan valores numéricos positivo, se obtuvo: '+argumento2.val, 0, 0)
return error
else :
error = Error('Semántico', 'Error de tipos en POWER, solo se aceptan valores numéricos positivo, se obtuvo: '+str(argumento.val), 0, 0)
return error
elif tipo == 'radians' :
'grados a radianes - '
try:
argumento = self.arg1.execute()
except:
argumento = self.arg1.execute(data, valoresTabla)
if isinstance(argumento, Error):
return argumento
if argumento.type == 'integer' or argumento.type == 'float' :
if argumento.val > 0:
reto = math.radians(argumento.val)
return Primitive('float', reto)
else :
error = Error('Semántico', 'Error de tipos en RADIANS, solo se aceptan valores numéricos positivo', 0, 0)
return error
else :
error = Error('Semántico', 'Error de tipos en RADIANS, solo se aceptan valores numéricos positivo, se obtuvo: '+str(argumento.val), 0, 0)
return error
elif tipo == 'round' :
'round - redondear n decimales'
try:
argumento = self.arg1.execute()
except:
argumento = self.arg1.execute(data, valoresTabla)
if isinstance(argumento, Error):
return argumento
if argumento.type == 'integer' or argumento.type == 'float' :
if self.arg2 == None :
'numero de redondeo no específicado'
reto = round(argumento.val)
return Primitive('integer', reto)
else:
'numero de redondeo específicado'
try:
argumento2 = self.arg2.execute()
except:
argumento2 = self.arg2.execute(data, valoresTabla)
if isinstance(argumento2, Error):
return argumento2
if argumento2.type == 'integer' or rgumento2.type == 'float' :
if argumento2.val > 0 :
reto = round(argumento.val, argumento2.val)
if isinstance(reto, int): return Primitive('integer', reto)
else: return Primitive('float', reto)
else :
error = Error('Semántico', 'Error de tipos en ROUND, solo se aceptan valores numéricos positivo', 0, 0)
return error
else:
error = Error('Semántico', 'Error de tipos en ROUND, solo se aceptan valores numéricos positivo, se obtuvo: '+argumento2.val, 0, 0)
return error
else :
error = Error('Semántico', 'Error de tipos en ROUND, solo se aceptan valores numéricos positivo, se obtuvo: '+str(argumento.val), 0, 0)
return error
elif tipo == 'sign' :
'devuelve signo - 1 o -1'
try:
argumento = self.arg1.execute()
except:
argumento = self.arg1.execute(data, valoresTabla)
if isinstance(argumento, Error):
return argumento
if argumento.type == 'integer' or argumento.type == 'float' :
if argumento.val > 0:
return Primitive('integer', 1)
else :
return Primitive('integer', -1)
else :
error = Error('Semántico', 'Error de tipos en SIGN, solo se aceptan valores numéricos positivo, se obtuvo: '+str(argumento.val), 0, 0)
return error
elif tipo == 'sqrt' :
'grados a radianes - '
try:
argumento = self.arg1.execute()
except:
argumento = self.arg1.execute(data, valoresTabla)
if isinstance(argumento, Error):
return argumento
if argumento.type == 'integer' or argumento.type == 'float' :
if argumento.val > 0:
reto = math.sqrt(argumento.val)
return Primitive('float', reto)
else :
error = Error('Semántico', 'Error de tipos en SQRT, solo se aceptan valores numéricos positivo', 0, 0)
return error
else :
error = Error('Semántico', 'Error de tipos en SQRT, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
elif tipo == 'width_bucket' :
'histograma - argumento1 puede ser una columna'
try:
argumento = self.arg1.execute()
except:
argumento = self.arg1.execute(data, valoresTabla)
if isinstance(argumento, Error):
return argumento
try:
argumento2 = self.arg2.execute()
except:
argumento2 = self.arg2.execute(data, valoresTabla)
if isinstance(argumento2, Error):
return argumento2
try:
argumento3 = self.arg3.execute()
except:
argumento3 = self.arg3.execute(data, valoresTabla)
if isinstance(argumento3, Error):
return argumento3
try:
argumento4 = self.arg4.execute()
except:
argumento4 = self.arg4.execute(data, valoresTabla)
if isinstance(argumento4, Error):
return argumento4
if argumento.type == 'integer' or argumento.type == 'float' :
if argumento2.type == 'integer' or argumento2.type == 'float' :
if argumento3.type == 'integer' or argumento3.type == 'float' :
if argumento4.type == 'integer' or argumento4.type == 'float' :
return Primitive('integer', self.widthbucket(int(argumento.val), int(argumento2.val), int(argumento3.val), int(argumento4.val)))
#return Primitive('integer', self.widthbucket(9, 1, 12, 4))
else:
error = Error('Semántico', 'Error de tipos en width_bucket, solo se aceptan valores numéricos', 0, 0)
return error
else:
error = Error('Semántico', 'Error de tipos en width_bucket, solo se aceptan valores numéricos', 0, 0)
return error
else:
error = Error('Semántico', 'Error de tipos en width_bucket, solo se aceptan valores numéricos', 0, 0)
return error
else:
error = Error('Semántico', 'Error de tipos en width_bucket, solo se aceptan valores numéricos', 0, 0)
return error
elif tipo == 'trunc' :
'grados a radianes - '
try:
argumento = self.arg1.execute()
except:
argumento = self.arg1.execute(data, valoresTabla)
if isinstance(argumento, Error):
return argumento
if argumento.type == 'integer' or argumento.type == 'float' :
if argumento.val > 0:
reto = math.trunc(argumento.val)
return Primitive('integer', reto)
else :
error = Error('Semántico', 'Error de tipos en trunc, solo se aceptan valores numéricos positivo', 0, 0)
return error
else :
error = Error('Semántico', 'Error de tipos en trunc, solo se aceptan valores numéricos, se obtuvo: '+str(argumento.val), 0, 0)
return error
elif tipo == 'random' :
'random entre 0 and 1'
return Primitive('integer', random.randint(0,1))
elif tipo == 'setseed' :
''
elif tipo == 'scale' :
''
return self
def widthbucket(self, nnum, nmin, nmax, nbuckets):
if nnum < nmin :
return 0
elif nnum > nmax :
return nbuckets+1
else:
bucket_width = (nmax - nmin + 1) / nbuckets
i = nmin-1
bucket = 1
while i < nmax:
if i+bucket_width > nmax:
#if nnum >= i or nnum <= nmax:
#return bucket
break
else:
if nnum > i and nnum <= i+bucket_width:
#return bucket
break
i = i+bucket_width
bucket = bucket + 1
return bucket
def __repr__(self):
return str(self.__dict__)
class FuncionFecha(Instruccion):
#2arg:
#extract(parte y tamestap) y datepart ( argument y argument)
def __init__(self, tipofuncionfehca, arg1,arg2):
self.tipofuncionfehca = tipofuncionfehca
self.arg1 = arg1
self.arg2 = arg2
def execute(self, data, valoresTabla):
tipo = self.tipofuncionfehca
if tipo == 'extract':
extraccion = self.arg1
dextraccion = self.arg2.execute()
fechacopleta = ''
hora = ''
años = ''
try:
fechacopleta = datetime.strptime(dextraccion.val,'%Y-%m-%d %H:%M:%S')
except:
try:
hora = datetime.strptime(dextraccion.val,'%H:%M:%S')
except:
try :
años = datetime.strptime(dextraccion.val,'%Y-%m-%d')
except :
error = Error('Semántico', 'Error de tipos en DATE, solo se aceptan valores de fechas, se obtuvo: '+str(dextraccion.val), 0, 0)
return error
if fechacopleta != '' :
if extraccion == 'YEAR':
return Primitive('integer',fechacopleta.year)
elif extraccion == 'MONTH':
return Primitive('integer',fechacopleta.month)
elif extraccion == 'DAY':
return Primitive('integer',fechacopleta.day)
elif extraccion == 'HOUR':
return Primitive('integer',fechacopleta.hour)
elif extraccion == 'MINUTE':
return Primitive('integer',fechacopleta.minute)
elif extraccion == 'SECOND':
return Primitive('integer',fechacopleta.second)
elif hora != '' :
if extraccion == 'HOUR':
return Primitive('integer',fechacopleta.hour)
elif extraccion == 'MINUTE':
return Primitive('integer',fechacopleta.minute)
elif extraccion == 'SECOND':
return Primitive('integer',fechacopleta.second)
else :
error = Error('Semántico', 'Error de tipos en DATE, se quiere extraer una parte de la fecha no ingresada', 0, 0)
return error
elif hora != '' :
if extraccion == 'YEAR':
return Primitive('integer',fechacopleta.year)
elif extraccion == 'MONTH':
return Primitive('integer',fechacopleta.month)
elif extraccion == 'DAY':
return Primitive('integer',fechacopleta.day)
else :
error = Error('Semántico', 'Error de tipos en DATE, se quiere extraer una parte de la fecha no fue ingresada', 0, 0)
return error
elif tipo == 'now' :
return Primitive('string', str(datetime.now())[:19])
elif tipo == 'current_date' :
return Primitive('string', str(datetime.now().date()))
elif tipo == 'current_time' :
return Primitive('string', str(datetime.now().time())[:8])
elif tipo == 'timestamp' :
dextraccion = self.arg2.execute()
fechaval = datetime.strptime(dextraccion.val,'%Y-%m-%d %H:%M:%S')
return Primitive('string',str(fechaval))
elif tipo == 'date_part' :
extraccion = self.arg1.execute()
dextraccion = self.arg2.execute()
dic ={}
valor = ''
descrip = ''
for dex in dextraccion.val:
if dex.isnumeric():
valor += dex
elif (dex == ' ' and descrip != ''):
dic[descrip] = valor
valor = ''
descrip = ''
elif dex.isalpha() :
descrip +=dex
dic[descrip] = valor
#print(dic)
for key in dic:
if str(key).find(extraccion.val) != -1 :
return Primitive('integer',dic[key])
error = Error('Semántico', 'Error de valores en DATEPART, se solicita un valo no encontrado en la cadena ', 0, 0)
return error
return self
def __repr__(self):
return str(self.__dict__)
class FuncionMatematicaSimple(Instruccion):
#puede venir:
#Count,max,sum,avg,min
def __init__(self, operador, argumento):
self.argumento = argumento
self.operador = operador
def execute(self, data, diccionarioAgrupacion, diccionarioColumnasAceptadas, columnasAceptadas):
diccionarioRetorno = {'val': {}, 'type': None, 'name': ''}
contador = 0
noEncontrado = True
columnaImprimir = None
tablaAceptada = None
for keys in columnasAceptadas:
contador = 0
for columnas in data.tablaSimbolos[data.databaseSeleccionada]['tablas'][keys]['columns']:
if columnas.name.upper() == self.argumento.column.upper():
noEncontrado = False
tablaAceptada = keys
columnaImprimir = columnas
diccionarioRetorno['type'] = columnas.type
breakcount
else:
contador = contador + 1
if not noEncontrado :
break
if noEncontrado:
if self.operador == '':
if self.argumento.column == '*':
contador = 0
else:
return Error('Semantico', 'La columna ' + self.argumento.column.upper() + ' no existe.', 0, 0)
else:
return Error('Semantico', 'La columna ' + self.argumento.column.upper() + ' no existe.', 0, 0)
diccionarioRetorno['name'] = self.operador
if self.operador == 'avg':
if columnaImprimir.type == 'integer' or columnaImprimir.type == 'float':
val = 0
cont = 0
for key in diccionarioAgrupacion:
val = 0
cont = 0
for pos in diccionarioAgrupacion[key]:
val = val + columnasAceptadas[tablaAceptada][pos][contador]
cont = cont + 1
res = val/cont
diccionarioRetorno['val'][key] = [res]
return diccionarioRetorno
else:
return Error('Semantico', 'El tipo para AVG debe ser numerico o float.', 0, 0)
elif self.operador == 'sum':
if columnaImprimir.type == 'integer' or columnaImprimir.type == 'float':
val = 0
for key in diccionarioAgrupacion:
#print (key)
val = 0
for pos in diccionarioAgrupacion[key]:
val = val + columnasAceptadas[tablaAceptada][pos][contador]
diccionarioRetorno['val'][key] = [val]
return diccionarioRetorno
else:
return Error('Semantico', 'El tipo para SUM debe ser numerico o float.', 0, 0)
elif self.operador == 'counting':
val = 0
for key in diccionarioAgrupacion:
val = len(diccionarioAgrupacion[key])
diccionarioRetorno['val'][key] = [val]
return diccionarioRetorno
elif self.operador == 'max':
val = 0
valComp = []
for key in diccionarioAgrupacion:
val = 0
valComp = []
for pos in diccionarioAgrupacion[key]:
valComp.append(columnasAceptadas[tablaAceptada][pos][contador])
r = max(valComp)
diccionarioRetorno['val'][key] = [r]
return diccionarioRetorno
elif self.operador == 'min':
val = 0
valComp = []
for key in diccionarioAgrupacion:
val = 0
valComp = []
for pos in diccionarioAgrupacion[key]:
valComp.append(columnasAceptadas[tablaAceptada][pos][contador])
r = min(valComp)
diccionarioRetorno['val'][key] = [r]
return diccionarioRetorno
return self
def __repr__(self):
return str(self.__dict__)
| 40.180942 | 203 | 0.523928 |
794742828b25648184ba2e9c2ab718ca44b16378 | 397 | py | Python | itchatmp/exceptions.py | yf-ftd/itchatmp | 3c554f4f3e175a66f51edf4b8ff7982d0af378a0 | [
"MIT"
] | 1,504 | 2016-10-06T05:40:59.000Z | 2022-03-18T02:46:48.000Z | itchatmp/exceptions.py | yf-ftd/itchatmp | 3c554f4f3e175a66f51edf4b8ff7982d0af378a0 | [
"MIT"
] | 51 | 2016-12-11T14:21:33.000Z | 2020-01-16T09:00:40.000Z | itchatmp/exceptions.py | yf-ftd/itchatmp | 3c554f4f3e175a66f51edf4b8ff7982d0af378a0 | [
"MIT"
] | 370 | 2016-10-15T02:22:58.000Z | 2022-03-14T08:50:13.000Z | class BaseException(Exception):
''' exception base '''
pass
class ParameterError(BaseException):
def __init__(self, message=''):
self.message = message
def __str__(self):
return self.message
class EnvironmentError(BaseException):
def __init__(self, message=''):
self.message = message
def __str__(self):
return self.message
| 24.8125 | 39 | 0.634761 |
7947437203a11573e9fc5ee31940198793a70b2f | 7,159 | py | Python | pystachio/basic.py | jdxcode/pystachio | 601a2c36d7d67efa8f917e7cbf0ab8dc66c7827f | [
"MIT"
] | null | null | null | pystachio/basic.py | jdxcode/pystachio | 601a2c36d7d67efa8f917e7cbf0ab8dc66c7827f | [
"MIT"
] | null | null | null | pystachio/basic.py | jdxcode/pystachio | 601a2c36d7d67efa8f917e7cbf0ab8dc66c7827f | [
"MIT"
] | null | null | null | from .base import Object
from .compatibility import Compatibility
from .parsing import MustacheParser
from .typing import Type, TypeCheck, TypeFactory, TypeMetaclass
class SimpleObject(Object, Type):
"""
A simply-valued (unnamable) object.
"""
__slots__ = ('_value',)
def __init__(self, value):
self._value = value
super(SimpleObject, self).__init__()
def get(self):
return self._value
def dup(self):
return self.__class__(self._value)
def _my_cmp(self, other):
if self.__class__ != other.__class__:
return -1
si, _ = self.interpolate()
oi, _ = other.interpolate()
if si._value < oi._value:
return -1
elif si._value > oi._value:
return 1
else:
return 0
def __hash__(self):
return hash(self._value)
def __eq__(self, other):
return self._my_cmp(other) == 0
def __lt__(self, other):
return self._my_cmp(other) == -1
def __gt__(self, other):
return self._my_cmp(other) == 1
def __le__(self, other):
return self._my_cmp(other) <= 0
def __ge__(self, other):
return self._my_cmp(other) >= 0
def __unicode__(self):
si, _ = self.interpolate()
return unicode(si._value)
def __str__(self):
si, _ = self.interpolate()
return str(si._value)
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, str(self) if Compatibility.PY3 else unicode(self))
def interpolate(self):
if not isinstance(self._value, Compatibility.stringy):
return self.__class__(self.coerce(self._value)), []
else:
joins, unbound = MustacheParser.resolve(self._value, *self.scopes())
if unbound:
return self.__class__(joins), unbound
else:
# XXX
self_copy = self.copy()
self_copy._value = self_copy.coerce(joins)
return self_copy, unbound
@classmethod
def type_factory(cls):
return cls.__name__
@classmethod
def type_parameters(cls):
return ()
class String(SimpleObject):
@classmethod
def checker(cls, obj):
assert isinstance(obj, String)
if isinstance(obj._value, Compatibility.stringy):
return TypeCheck.success()
else:
# TODO(wickman) Perhaps we should mark uninterpolated Mustache objects as
# intrinsically non-stringy, because String will never typecheck false given
# its input constraints.
return TypeCheck.failure("%s not a string" % repr(obj._value))
@classmethod
def coerce(cls, value):
ACCEPTED_SOURCE_TYPES = Compatibility.stringy + Compatibility.numeric
if not isinstance(value, ACCEPTED_SOURCE_TYPES):
raise cls.CoercionError(value, cls)
return str(value) if Compatibility.PY3 else unicode(value)
class StringFactory(TypeFactory):
PROVIDES = 'String'
@staticmethod
def create(type_dict, *type_parameters):
return String
class Integer(SimpleObject):
@classmethod
def checker(cls, obj):
assert isinstance(obj, Integer)
if isinstance(obj._value, Compatibility.integer):
return TypeCheck.success()
else:
return TypeCheck.failure("%s not an integer" % repr(obj._value))
@classmethod
def coerce(cls, value):
ACCEPTED_SOURCE_TYPES = Compatibility.numeric + Compatibility.stringy
if not isinstance(value, ACCEPTED_SOURCE_TYPES):
raise cls.CoercionError(value, cls)
try:
return int(value)
except ValueError:
raise cls.CoercionError(value, cls)
class IntegerFactory(TypeFactory):
PROVIDES = 'Integer'
@staticmethod
def create(type_dict, *type_parameters):
return Integer
class Float(SimpleObject):
@classmethod
def checker(cls, obj):
assert isinstance(obj, Float)
if isinstance(obj._value, Compatibility.real + Compatibility.integer):
return TypeCheck.success()
else:
return TypeCheck.failure("%s not a float" % repr(obj._value))
@classmethod
def coerce(cls, value):
ACCEPTED_SOURCE_TYPES = Compatibility.numeric + Compatibility.stringy
if not isinstance(value, ACCEPTED_SOURCE_TYPES):
raise cls.CoercionError(value, cls)
try:
return float(value)
except ValueError:
raise cls.CoercionError(value, cls)
class FloatFactory(TypeFactory):
PROVIDES = 'Float'
@staticmethod
def create(type_dict, *type_parameters):
return Float
class Boolean(SimpleObject):
@classmethod
def checker(cls, obj):
assert isinstance(obj, Boolean)
if isinstance(obj._value, bool):
return TypeCheck.success()
else:
return TypeCheck.failure("%s not a boolean" % repr(obj._value))
@classmethod
def coerce(cls, value):
ACCEPTED_SOURCE_TYPES = (bool,) + Compatibility.numeric + Compatibility.stringy
if not isinstance(value, ACCEPTED_SOURCE_TYPES):
raise cls.CoercionError(value, cls)
if isinstance(value, bool):
return value
elif isinstance(value, Compatibility.stringy):
if value.lower() in ("true", "1"):
return True
elif value.lower() in ("false", "0"):
return False
else:
raise cls.CoercionError(value, cls)
else:
return bool(value)
class BooleanFactory(TypeFactory):
PROVIDES = 'Boolean'
@staticmethod
def create(type_dict, *type_parameters):
return Boolean
class EnumContainer(SimpleObject):
def __init__(self, value):
stringish = String(value)
_, refs = stringish.interpolate()
if not refs and value not in self.VALUES:
raise ValueError('%s only accepts the following values: %s' % (
self.__class__.__name__, ', '.join(self.VALUES)))
super(EnumContainer, self).__init__(value)
@classmethod
def checker(cls, obj):
assert isinstance(obj, EnumContainer)
if isinstance(obj._value, Compatibility.stringy) and obj._value in cls.VALUES:
return TypeCheck.success()
else:
return TypeCheck.failure("%s not in the enumeration (%s)" % (repr(obj._value),
', '.join(cls.VALUES)))
@classmethod
def coerce(cls, value):
if not isinstance(value, Compatibility.stringy) or value not in cls.VALUES:
raise cls.CoercionError(value, cls, '%s is not one of %s' % (
value, ', '.join(cls.VALUES)))
return str(value) if Compatibility.PY3 else unicode(value)
@classmethod
def type_factory(cls):
return 'Enum'
@classmethod
def type_parameters(cls):
return (cls.__name__, cls.VALUES)
class EnumFactory(TypeFactory):
PROVIDES = 'Enum'
@staticmethod
def create(type_dict, *type_parameters):
"""
EnumFactory.create(*type_parameters) expects:
enumeration name, (enumeration values)
"""
name, values = type_parameters
assert isinstance(values, (list, tuple))
for value in values:
assert isinstance(value, Compatibility.stringy)
return TypeMetaclass(str(name), (EnumContainer,), { 'VALUES': values })
def Enum(*stuff):
# TODO(wickman) Check input
if len(stuff) == 2 and isinstance(stuff[0], Compatibility.stringy) and (
isinstance(stuff[1], (list, tuple))):
name, values = stuff
return TypeFactory.new({}, EnumFactory.PROVIDES, name, values)
else:
return TypeFactory.new({}, EnumFactory.PROVIDES, 'Enum_' + '_'.join(stuff), stuff)
| 27.748062 | 98 | 0.684733 |
794743a4cdeb7b19cad80c391484c031ddc3c18a | 1,312 | py | Python | detectron2/modeling/__init__.py | irenecortes/detectron2 | 9c48f7a7b1ff87525dee810006e741bbae0b56eb | [
"Apache-2.0"
] | null | null | null | detectron2/modeling/__init__.py | irenecortes/detectron2 | 9c48f7a7b1ff87525dee810006e741bbae0b56eb | [
"Apache-2.0"
] | null | null | null | detectron2/modeling/__init__.py | irenecortes/detectron2 | 9c48f7a7b1ff87525dee810006e741bbae0b56eb | [
"Apache-2.0"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from detectron2.layers import ShapeSpec
from .anchor_generator import build_anchor_generator, ANCHOR_GENERATOR_REGISTRY
from .backbone import (
BACKBONE_REGISTRY,
FPN,
Backbone,
ResNet,
ResNetBlockBase,
build_backbone,
build_resnet_backbone,
make_stage,
)
from .meta_arch import (
META_ARCH_REGISTRY,
SEM_SEG_HEADS_REGISTRY,
GeneralizedRCNN,
PanopticFPN,
ProposalNetwork,
RetinaNet,
SemanticSegmentor,
build_model,
build_sem_seg_head,
)
from .postprocessing import detector_postprocess
from .proposal_generator import (
PROPOSAL_GENERATOR_REGISTRY,
build_proposal_generator,
RPN_HEAD_REGISTRY,
build_rpn_head,
)
from .roi_heads import (
ROI_BOX_HEAD_REGISTRY,
ROI_HEADS_REGISTRY,
ROI_KEYPOINT_HEAD_REGISTRY,
ROI_MASK_HEAD_REGISTRY,
ROIHeads,
StandardROIHeads,
EmbeddingHead,
BaseMaskRCNNHead,
BaseKeypointRCNNHead,
build_box_head,
build_embedding_head,
build_keypoint_head,
build_mask_head,
build_roi_heads,
)
from .test_time_augmentation import DatasetMapperTTA, GeneralizedRCNNWithTTA
_EXCLUDE = {"ShapeSpec"}
__all__ = [k for k in globals().keys() if k not in _EXCLUDE and not k.startswith("_")]
| 24.754717 | 86 | 0.755335 |
7947441c6f92d3f548e39797372e86e679fb589c | 6,715 | py | Python | noxfile.py | tswast/python-bigtable | 3861f6b0552e431a1fc7aa872c4d293ca129c28c | [
"Apache-2.0"
] | null | null | null | noxfile.py | tswast/python-bigtable | 3861f6b0552e431a1fc7aa872c4d293ca129c28c | [
"Apache-2.0"
] | 7 | 2020-09-22T19:29:12.000Z | 2020-10-08T16:11:01.000Z | noxfile.py | tswast/python-bigtable | 3861f6b0552e431a1fc7aa872c4d293ca129c28c | [
"Apache-2.0"
] | 1 | 2020-10-04T12:10:40.000Z | 2020-10-04T12:10:40.000Z | # -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import shutil
import nox
DEFAULT_PYTHON_VERSION = "3.8"
SYSTEM_TEST_PYTHON_VERSIONS = ["2.7", "3.8"]
UNIT_TEST_PYTHON_VERSIONS = ["2.7", "3.5", "3.6", "3.7", "3.8"]
LOCAL_DEPS = ()
@nox.session(python=DEFAULT_PYTHON_VERSION)
def lint(session):
"""Run linters.
Returns a failure if the linters find linting errors or sufficiently
serious code quality issues.
"""
session.install("flake8", "black", *LOCAL_DEPS)
session.run(
"black",
"--check",
"google",
"tests",
"docs",
)
session.run("flake8", "google", "tests")
@nox.session(python="3.6")
def blacken(session):
"""Run black.
Format code to uniform standard.
"""
session.install("black")
session.run(
"black",
"google",
"tests",
"docs",
)
@nox.session(python=DEFAULT_PYTHON_VERSION)
def lint_setup_py(session):
"""Verify that setup.py is valid (including RST check)."""
session.install("docutils", "pygments")
session.run("python", "setup.py", "check", "--restructuredtext", "--strict")
def default(session):
# Install all test dependencies, then install this package in-place.
session.install("mock", "pytest", "pytest-cov")
for local_dep in LOCAL_DEPS:
session.install("-e", local_dep)
session.install("-e", ".")
# Run py.test against the unit tests.
session.run(
"py.test",
"--quiet",
"--cov=google.cloud",
"--cov=tests.unit",
"--cov-append",
"--cov-config=.coveragerc",
"--cov-report=",
"--cov-fail-under=0",
os.path.join("tests", "unit"),
*session.posargs,
)
@nox.session(python=UNIT_TEST_PYTHON_VERSIONS)
def unit(session):
"""Run the unit test suite."""
default(session)
@nox.session(python=DEFAULT_PYTHON_VERSION)
def cover(session):
"""Run the final coverage report.
This outputs the coverage report aggregating coverage from the unit
test runs (not system test runs), and then erases coverage data.
"""
session.install("coverage", "pytest-cov")
session.run("coverage", "report", "--show-missing", "--fail-under=99")
session.run("coverage", "erase")
@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS)
def system(session):
"""Run the system test suite."""
system_test_path = os.path.join("tests", "system.py")
system_test_folder_path = os.path.join("tests", "system")
# Sanity check: Only run tests if the environment variable is set.
if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""):
session.skip("Credentials must be set via environment variable")
system_test_exists = os.path.exists(system_test_path)
system_test_folder_exists = os.path.exists(system_test_folder_path)
# Sanity check: only run tests if found.
if not system_test_exists and not system_test_folder_exists:
session.skip("System tests were not found")
# Use pre-release gRPC for system tests.
session.install("--pre", "grpcio")
# Install all test dependencies, then install this package into the
# virtualenv's dist-packages.
session.install("mock", "pytest")
for local_dep in LOCAL_DEPS:
session.install("-e", local_dep)
session.install("-e", "test_utils/")
session.install("-e", ".")
# Run py.test against the system tests.
if system_test_exists:
session.run("py.test", "--quiet", system_test_path, *session.posargs)
if system_test_folder_exists:
session.run("py.test", "--quiet", system_test_folder_path, *session.posargs)
@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS)
def snippets(session):
"""Run the documentation example snippets."""
# Sanity check: Only run snippets system tests if the environment variable
# is set.
if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""):
session.skip("Credentials must be set via environment variable.")
# Install all test dependencies, then install local packages in place.
session.install("mock", "pytest")
for local_dep in LOCAL_DEPS:
session.install("-e", local_dep)
session.install("-e", "test_utils/")
session.install("-e", ".")
session.run(
"py.test",
"--quiet",
os.path.join("docs", "snippets.py"),
*session.posargs
)
session.run(
"py.test",
"--quiet",
os.path.join("docs", "snippets_table.py"),
*session.posargs
)
@nox.session(python=DEFAULT_PYTHON_VERSION)
def docs(session):
"""Build the docs for this library."""
session.install("-e", ".")
session.install("sphinx", "alabaster", "recommonmark")
shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
session.run(
"sphinx-build",
"-W", # warnings as errors
"-T", # show full traceback on exception
"-N", # no colors
"-b",
"html",
"-d",
os.path.join("docs", "_build", "doctrees", ""),
os.path.join("docs", ""),
os.path.join("docs", "_build", "html", ""),
)
@nox.session(python=DEFAULT_PYTHON_VERSION)
def docfx(session):
"""Build the docfx yaml files for this library."""
session.install("-e", ".")
session.install("sphinx", "alabaster", "recommonmark", "sphinx-docfx-yaml")
shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
session.run(
"sphinx-build",
"-T", # show full traceback on exception
"-N", # no colors
"-D",
(
"extensions=sphinx.ext.autodoc,"
"sphinx.ext.autosummary,"
"docfx_yaml.extension,"
"sphinx.ext.intersphinx,"
"sphinx.ext.coverage,"
"sphinx.ext.napoleon,"
"sphinx.ext.todo,"
"sphinx.ext.viewcode,"
"recommonmark"
),
"-b",
"html",
"-d",
os.path.join("docs", "_build", "doctrees", ""),
os.path.join("docs", ""),
os.path.join("docs", "_build", "html", ""),
)
| 30.112108 | 84 | 0.627103 |
794744383ecd6fac4684f953717a3b592f048394 | 1,509 | py | Python | user/migrations/0001_initial.py | Muia23/Grammer | dcc26937d88382c1da36a5f72306e6de367e90a3 | [
"Unlicense"
] | null | null | null | user/migrations/0001_initial.py | Muia23/Grammer | dcc26937d88382c1da36a5f72306e6de367e90a3 | [
"Unlicense"
] | null | null | null | user/migrations/0001_initial.py | Muia23/Grammer | dcc26937d88382c1da36a5f72306e6de367e90a3 | [
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2020-08-10 14:22
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import tinymce.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image_name', models.CharField(max_length=60)),
('caption', tinymce.models.HTMLField(blank=True)),
('post_date', models.DateTimeField(auto_now_add=True)),
('upload_image', models.ImageField(upload_to='upload/')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('prof_pic', models.ImageField(upload_to='profile/')),
('bio', models.TextField(blank=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 36.804878 | 118 | 0.616302 |
794744a5c58f8d23651d2531937c8a0cb7c3cc5f | 13,492 | py | Python | atomate/vasp/database.py | dongsenfo/atomate | 01558e8c3e38470c02bc8b50c0ee3aa6198e5206 | [
"BSD-3-Clause-LBNL"
] | 1 | 2019-09-02T00:55:26.000Z | 2019-09-02T00:55:26.000Z | atomate/vasp/database.py | dongsenfo/atomate | 01558e8c3e38470c02bc8b50c0ee3aa6198e5206 | [
"BSD-3-Clause-LBNL"
] | null | null | null | atomate/vasp/database.py | dongsenfo/atomate | 01558e8c3e38470c02bc8b50c0ee3aa6198e5206 | [
"BSD-3-Clause-LBNL"
] | 3 | 2018-09-01T00:08:51.000Z | 2021-11-17T01:32:14.000Z | # coding: utf-8
from __future__ import division, print_function, unicode_literals, absolute_import
from monty.json import MontyEncoder, MontyDecoder
"""
This module defines the database classes.
"""
import zlib
import json
from bson import ObjectId
from pymatgen.electronic_structure.bandstructure import BandStructure, BandStructureSymmLine
from pymatgen.electronic_structure.dos import CompleteDos
import gridfs
from pymongo import ASCENDING, DESCENDING
from atomate.utils.database import CalcDb
from atomate.utils.utils import get_logger
__author__ = 'Kiran Mathew'
__credits__ = 'Anubhav Jain'
__email__ = '[email protected]'
logger = get_logger(__name__)
class VaspCalcDb(CalcDb):
"""
Class to help manage database insertions of Vasp drones
"""
def __init__(self, host="localhost", port=27017, database="vasp", collection="tasks", user=None,
password=None, **kwargs):
super(VaspCalcDb, self).__init__(host, port, database, collection, user,
password, **kwargs)
def build_indexes(self, indexes=None, background=True):
"""
Build the indexes.
Args:
indexes (list): list of single field indexes to be built.
background (bool): Run in the background or not.
TODO: make sure that the index building is sensible and check for
existing indexes.
"""
_indices = indexes if indexes else [
"formula_pretty", "formula_anonymous",
"output.energy", "output.energy_per_atom", "dir_name"
]
self.collection.create_index("task_id", unique=True, background=background)
# build single field indexes
for i in _indices:
self.collection.create_index(i, background=background)
# build compound indexes
for formula in ("formula_pretty", "formula_anonymous"):
self.collection.create_index([(formula, ASCENDING),
("output.energy", DESCENDING),
("completed_at", DESCENDING)],
background=background)
self.collection.create_index([(formula, ASCENDING),
("output.energy_per_atom", DESCENDING),
("completed_at", DESCENDING)],
background=background)
def insert_task(self, task_doc, use_gridfs=False):
"""
Inserts a task document (e.g., as returned by Drone.assimilate()) into the database.
Handles putting DOS, band structure and charge density into GridFS as needed.
During testing, a percentage of runs on some clusters had corrupted AECCAR files when even if everything else about the calculation looked OK.
So we do a quick check here and only record the AECCARs if they are valid
Args:
task_doc: (dict) the task document
use_gridfs (bool) use gridfs for bandstructures and DOS
Returns:
(int) - task_id of inserted document
"""
dos = None
bs = None
chgcar = None
aeccar0 = None
write_aeccar = False
# move dos BS and CHGCAR from doc to gridfs
if use_gridfs and "calcs_reversed" in task_doc:
if "dos" in task_doc["calcs_reversed"][0]: # only store idx=0 (last step)
dos = json.dumps(task_doc["calcs_reversed"][0]["dos"], cls=MontyEncoder)
del task_doc["calcs_reversed"][0]["dos"]
if "bandstructure" in task_doc["calcs_reversed"][0]: # only store idx=0 (last step)
bs = json.dumps(task_doc["calcs_reversed"][0]["bandstructure"], cls=MontyEncoder)
del task_doc["calcs_reversed"][0]["bandstructure"]
if "chgcar" in task_doc["calcs_reversed"][0]: # only store idx=0 DOS
chgcar = json.dumps(task_doc["calcs_reversed"][0]["chgcar"], cls=MontyEncoder)
del task_doc["calcs_reversed"][0]["chgcar"]
if "aeccar0" in task_doc["calcs_reversed"][0]:
aeccar0 = task_doc["calcs_reversed"][0]["aeccar0"]
aeccar2 = task_doc["calcs_reversed"][0]["aeccar2"]
# check if the aeccar is valid before insertion
if (aeccar0.data['total'] + aeccar2.data['total']).min() < 0:
logger.warning(f"The AECCAR seems to be corrupted for task_in directory {task_doc['dir_name']}\nSkipping storage of AECCARs")
write_aeccar = False
else:
# overwrite the aeccar variable with their string representations to be inserted in GridFS
aeccar0 = json.dumps(task_doc["calcs_reversed"][0]["aeccar0"], cls=MontyEncoder)
aeccar2 = json.dumps(task_doc["calcs_reversed"][0]["aeccar2"], cls=MontyEncoder)
write_aeccar = True
del task_doc["calcs_reversed"][0]["aeccar0"]
del task_doc["calcs_reversed"][0]["aeccar2"]
# insert the task document
t_id = self.insert(task_doc)
# insert the dos into gridfs and update the task document
if dos:
dos_gfs_id, compression_type = self.insert_gridfs(dos, "dos_fs", task_id=t_id)
self.collection.update_one(
{"task_id": t_id}, {"$set": {"calcs_reversed.0.dos_compression": compression_type}})
self.collection.update_one({"task_id": t_id}, {"$set": {"calcs_reversed.0.dos_fs_id": dos_gfs_id}})
# insert the bandstructure into gridfs and update the task documents
if bs:
bfs_gfs_id, compression_type = self.insert_gridfs(bs, "bandstructure_fs", task_id=t_id)
self.collection.update_one(
{"task_id": t_id}, {"$set": {"calcs_reversed.0.bandstructure_compression": compression_type}})
self.collection.update_one(
{"task_id": t_id}, {"$set": {"calcs_reversed.0.bandstructure_fs_id": bfs_gfs_id}})
# insert the CHGCAR file into gridfs and update the task documents
if chgcar:
chgcar_gfs_id, compression_type = self.insert_gridfs(chgcar, "chgcar_fs", task_id=t_id)
self.collection.update_one(
{"task_id": t_id}, {"$set": {"calcs_reversed.0.chgcar_compression": compression_type}})
self.collection.update_one({"task_id": t_id}, {"$set": {"calcs_reversed.0.chgcar_fs_id": chgcar_gfs_id}})
# insert the AECCARs file into gridfs and update the task documents
if write_aeccar:
aeccar0_gfs_id, compression_type = self.insert_gridfs(aeccar0, "aeccar0_fs", task_id=t_id)
self.collection.update_one(
{"task_id": t_id}, {"$set": {"calcs_reversed.0.aeccar0_compression": compression_type}})
self.collection.update_one({"task_id": t_id}, {"$set": {"calcs_reversed.0.aeccar0_fs_id": aeccar0_gfs_id}})
aeccar2_gfs_id, compression_type = self.insert_gridfs(aeccar2, "aeccar2_fs", task_id=t_id)
self.collection.update_one(
{"task_id": t_id}, {"$set": {"calcs_reversed.0.aeccar2_compression": compression_type}})
self.collection.update_one({"task_id": t_id}, {"$set": {"calcs_reversed.0.aeccar2_fs_id": aeccar2_gfs_id}})
return t_id
def retrieve_task(self, task_id):
"""
Retrieves a task document and unpacks the band structure and DOS as dict
Args:
task_id: (int) task_id to retrieve
Returns:
(dict) complete task document with BS + DOS included
"""
task_doc = self.collection.find_one({"task_id": task_id})
calc = task_doc["calcs_reversed"][0]
if 'bandstructure_fs_id' in calc:
bs = self.get_band_structure(task_id)
calc["bandstructure"] = bs.as_dict()
if 'dos_fs_id' in calc:
dos = self.get_dos(task_id)
calc["dos"] = dos.as_dict()
if 'chgcar_fs_id' in calc:
chgcar = self.get_chgcar(task_id)
calc["chgcar"] = chgcar
if 'aeccar0_fs_id' in calc:
aeccar = self.get_aeccar(task_id)
calc["aeccar0"] = aeccar['aeccar0']
calc["aeccar2"] = aeccar['aeccar2']
return task_doc
def insert_gridfs(self, d, collection="fs", compress=True, oid=None, task_id=None):
"""
Insert the given document into GridFS.
Args:
d (dict): the document
collection (string): the GridFS collection name
compress (bool): Whether to compress the data or not
oid (ObjectId()): the _id of the file; if specified, it must not already exist in GridFS
task_id(int or str): the task_id to store into the gridfs metadata
Returns:
file id, the type of compression used.
"""
oid = oid or ObjectId()
compression_type = None
if compress:
d = zlib.compress(d.encode(), compress)
compression_type = "zlib"
fs = gridfs.GridFS(self.db, collection)
if task_id:
# Putting task id in the metadata subdocument as per mongo specs:
# https://github.com/mongodb/specifications/blob/master/source/gridfs/gridfs-spec.rst#terms
fs_id = fs.put(d, _id=oid, metadata={"task_id": task_id, "compression": compression_type})
else:
fs_id = fs.put(d, _id=oid, metadata={"compression": compression_type})
return fs_id, compression_type
def get_band_structure(self, task_id):
m_task = self.collection.find_one({"task_id": task_id}, {"calcs_reversed": 1})
fs_id = m_task['calcs_reversed'][0]['bandstructure_fs_id']
fs = gridfs.GridFS(self.db, 'bandstructure_fs')
bs_json = zlib.decompress(fs.get(fs_id).read())
bs_dict = json.loads(bs_json.decode())
if bs_dict["@class"] == "BandStructure":
return BandStructure.from_dict(bs_dict)
elif bs_dict["@class"] == "BandStructureSymmLine":
return BandStructureSymmLine.from_dict(bs_dict)
else:
raise ValueError("Unknown class for band structure! {}".format(bs_dict["@class"]))
def get_dos(self, task_id):
m_task = self.collection.find_one({"task_id": task_id}, {"calcs_reversed": 1})
fs_id = m_task['calcs_reversed'][0]['dos_fs_id']
fs = gridfs.GridFS(self.db, 'dos_fs')
dos_json = zlib.decompress(fs.get(fs_id).read())
dos_dict = json.loads(dos_json.decode())
return CompleteDos.from_dict(dos_dict)
def get_chgcar_string(self, task_id):
# Not really used now, consier deleting
m_task = self.collection.find_one({"task_id": task_id}, {"calcs_reversed": 1})
fs_id = m_task['calcs_reversed'][0]['chgcar_fs_id']
fs = gridfs.GridFS(self.db, 'chgcar_fs')
return zlib.decompress(fs.get(fs_id).read())
def get_chgcar(self, task_id):
"""
Read the CHGCAR grid_fs data into a Chgcar object
Args:
task_id(int or str): the task_id containing the gridfs metadata
Returns:
chgcar: Chgcar object
"""
m_task = self.collection.find_one({"task_id": task_id}, {"calcs_reversed": 1})
fs_id = m_task['calcs_reversed'][0]['chgcar_fs_id']
fs = gridfs.GridFS(self.db, 'chgcar_fs')
chgcar_json = zlib.decompress(fs.get(fs_id).read())
chgcar= json.loads(chgcar_json, cls=MontyDecoder)
return chgcar
def get_aeccar(self, task_id, check_valid = True):
"""
Read the AECCAR0 + AECCAR2 grid_fs data into a Chgcar object
Args:
task_id(int or str): the task_id containing the gridfs metadata
check_valid (bool): make sure that the aeccar is positive definite
Returns:
{"aeccar0" : Chgcar, "aeccar2" : Chgcar}: dict of Chgcar objects
"""
m_task = self.collection.find_one({"task_id": task_id}, {"calcs_reversed": 1})
fs_id = m_task['calcs_reversed'][0]['aeccar0_fs_id']
fs = gridfs.GridFS(self.db, 'aeccar0_fs')
aeccar_json = zlib.decompress(fs.get(fs_id).read())
aeccar0 = json.loads(aeccar_json, cls=MontyDecoder)
fs_id = m_task['calcs_reversed'][0]['aeccar2_fs_id']
fs = gridfs.GridFS(self.db, 'aeccar2_fs')
aeccar_json = zlib.decompress(fs.get(fs_id).read())
aeccar2 = json.loads(aeccar_json, cls=MontyDecoder)
if check_valid and (aeccar0.data['total'] + aeccar2.data['total']).min() < 0:
ValueError(f"The AECCAR seems to be corrupted for task_id = {task_id}")
return {'aeccar0': aeccar0, 'aeccar2': aeccar2}
def reset(self):
self.collection.delete_many({})
self.db.counter.delete_one({"_id": "taskid"})
self.db.counter.insert_one({"_id": "taskid", "c": 0})
self.db.boltztrap.delete_many({})
self.db.dos_fs.files.delete_many({})
self.db.dos_fs.chunks.delete_many({})
self.db.dos_boltztrap_fs.files.delete_many({})
self.db.dos_boltztrap_fs.chunks.delete_many({})
self.db.bandstructure_fs.files.delete_many({})
self.db.bandstructure_fs.chunks.delete_many({})
self.build_indexes()
# TODO: @albalu, @matk86, @computron - add BoltztrapCalcDB management here -computron, matk86
| 45.275168 | 150 | 0.619552 |
7947455a17a425c8a7b20e839f3f3d776fcfe2a0 | 6,903 | py | Python | sonybraviaremote/tv.py | njdavis/py-sony-bravia-remote | a5f0399717326e1aed746b5de4cbe0651384f1d9 | [
"MIT"
] | null | null | null | sonybraviaremote/tv.py | njdavis/py-sony-bravia-remote | a5f0399717326e1aed746b5de4cbe0651384f1d9 | [
"MIT"
] | null | null | null | sonybraviaremote/tv.py | njdavis/py-sony-bravia-remote | a5f0399717326e1aed746b5de4cbe0651384f1d9 | [
"MIT"
] | null | null | null | from typing import Callable
import json
import requests
from .tvconfig import TVConfig
class TV:
"""Represents a Sony Bravia TV that can
be controlled remotely."""
def __init__(self, auth_key: str, config: TVConfig):
"""Initializes a new instance of
:see:TV with the specified configuration."""
self.auth_key = auth_key
self.config = config
self._irc_codes = self.irc_codes()
def irc_codes(self) -> dict:
"""Gets a complete list of the supported
IRC codes from this TV.
Returns:
A dictionary of all available
IRC codes. Where the key is the
name of the IRC code and the value
the actual IRC code.
"""
url = 'http://%s/sony/system' % self.config.host
payload = {
'method': 'getRemoteControllerInfo',
'params':[],
'id': 10,
'version':'1.0'
}
response = requests.post(
url,
data=json.dumps(payload),
headers={
'SOAPAction': 'urn:schemas-sony-com:service:IRCC:1#X_SendIRCC'
}
)
if response.status_code != 200:
raise RuntimeError(response.body)
original_data = response.json()
irc_codes = dict()
for entry in original_data['result'][1]:
irc_codes[entry['name']] = entry['value']
return irc_codes
def is_on(self):
"""Gets whether the TV is turned on or not."""
url = 'http://%s/sony/system' % self.config.host
payload = {
'method': 'getPowerStatus',
'params':[],
'id': 10,
'version':'1.0'
}
response = requests.post(url, data=json.dumps(payload))
if response.status_code != 200:
raise RuntimeError(response.body)
data = response.json()
return data['result'][0]['status'] == 'active'
def get_input(self):
"""Gets whether the current input."""
url = 'http://%s/sony/avContent' % self.config.host
payload = {
'method': 'getPlayingContentInfo',
'params':[],
'id': 103,
'version':'1.0'
}
response = requests.post(
url,
data=json.dumps(payload),
headers={'X-Auth-PSK':'0000' }
)
if response.status_code != 200:
raise RuntimeError(response)
data = response.json()
return data['result'][0]
def input(self):
self._send_irc_code('Input')
def set_hdmi1(self):
self._send_irc_code('Hdmi1')
def set_hdmi2(self):
self._send_irc_code('Hdmi2')
def set_hdmi3(self):
self._send_irc_code('Hdmi3')
def set_hdmi4(self):
self._send_irc_code('Hdmi4')
def mute(self):
self._send_irc_code('Mute')
def volume_up(self, amount=5):
for _ in range(0, amount):
self._send_irc_code('VolumeUp')
def volume_down(self, amount=5):
for _ in range(0, amount):
self._send_irc_code('VolumeDown')
def pause(self):
self._send_irc_code('Pause')
def play(self):
self._send_irc_code('Play')
def power_off(self):
self._send_irc_code('PowerOff')
def wake_up(self):
self._send_irc_code('WakeUp')
def home(self):
self._send_irc_code('Home')
def netflix(self):
self._send_irc_code('Netflix')
def enter(self):
self._send_irc_code('Enter')
def confirm(self):
self._send_irc_code('Confirm')
@classmethod
def connect(cls, config: TVConfig, callback: Callable[[], str]) -> 'TV':
"""Attempts to connect to the specified TV.
Arguments:
config:
The configuration describing
the TV to connect to.
calback:
The method to call to resolve
the authentication challenge.
Returns:
A new instance of :see:TV upon a succesful
connection.
"""
auth_key = cls._attempt_auth(config)
if auth_key:
return TV(auth_key, config)
pincode = callback()
auth_key = cls._attempt_auth(config, pincode)
if auth_key:
return TV(auth_key, config)
raise RuntimeError('Could not pair with the TV')
@staticmethod
def _attempt_auth(config: TVConfig, pincode=None):
"""Attempts authentication at the TV.
Arguments:
config:
The TV at which to attempt the authentication.
pincode:
The pincode displayed on the screen to pair
with. This is only needed on very first
authentication.
"""
url = 'http://%s/sony/accessControl' % config.host
client_id = '%s:1' % config.device_name
payload = {
'id': 13,
'method': 'actRegister',
'version': '1.0',
'params': [
{
'clientid': client_id,
'nickname': config.device_name
},
[{
'clientid': client_id,
'value': 'yes',
'nickname': config.device_name,
'function': 'WOL'
}]
]
}
params = dict(data=json.dumps(payload))
if pincode:
params['auth'] = ('', pincode)
response = requests.post(url, **params)
if response.status_code != 200:
return False
return response.headers['Set-Cookie']
def _send_irc_code(self, code):
"""Sends an IRC code to the TV.
Each action that can be performed has a IRC code
associated with it.
Arguments:
code:
The name of the IRC code to send.
"""
url = 'http://%s/sony/IRCC' % self.config.host
payload = ('<?xml version="1.0"?>'
'<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/" s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">'
'<s:Body>'
'<u:X_SendIRCC xmlns:u="urn:schemas-sony-com:service:IRCC:1">'
'<IRCCCode>%s</IRCCCode>'
'</u:X_SendIRCC>'
'</s:Body>'
'</s:Envelope>') % self._irc_codes[code]
response = requests.post(
url,
data=payload,
headers={
'Cookie': self.auth_key,
'SOAPAction': '"urn:schemas-sony-com:service:IRCC:1#X_SendIRCC"'
}
)
if response.status_code != 200:
raise RuntimeError(response)
| 26.755814 | 145 | 0.518181 |
794745db3fba859d1d4bf8c6eaafaff746a0f54b | 3,182 | py | Python | photos/tests.py | wanjikuciku/lony-gram | 4a0b4ead09b605b474cdbb8139cfdbdad751d63c | [
"Unlicense"
] | null | null | null | photos/tests.py | wanjikuciku/lony-gram | 4a0b4ead09b605b474cdbb8139cfdbdad751d63c | [
"Unlicense"
] | 5 | 2020-06-05T20:01:07.000Z | 2021-09-08T00:52:27.000Z | photos/tests.py | wanjikuciku/lony-gram | 4a0b4ead09b605b474cdbb8139cfdbdad751d63c | [
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TestCase
from .models import Image,Comments,Profile
from django.contrib.auth.models import User
# Create your tests here.
class ImageTestCase(TestCase):
"""
This is the class I will use to test the images
"""
def setUp(self):
"""
This will create a new imae before each test
"""
self.new_user = User(username = "Hey", email = "[email protected]",password = "milkshake")
self.new_user.save()
self.new_image = Image(name = 'Hey', user = self.new_user)
self.new_image.save()
def tearDown(self):
"""
This will clear the db after each test
"""
Image.objects.all().delete()
def test_instance(self):
"""
This will test whether the new image created is an instance of the Image class
"""
self.assertTrue(isinstance(self.new_image, Image))
def test_init(self):
"""
This will test whether the new image is instantiated correctly
"""
self.assertTrue(self.new_image.name == "Hey")
def test_save_image(self):
"""
This will test whether the new image is added to the db
"""
self.new_image.save_image()
self.assertTrue(len(Image.objects.all()) > 0)
# def test_image_delete(self):
# """
# This will test whether the image is deleted from the db
# """
# self.new_image.save_image()
# self.assertTrue(len(Image.objects.all()) > 0)
# self.new_image.delete_image()
# self.assertTrue(len(Image.objects.all()) == 0)
class CommentTestCases(TestCase):
"""
This is the class I will use to test the comments
"""
def setUp(self):
"""
This will create a new comment before every test
"""
self.new_user = User(username = "Hey")
self.new_user.save()
self.new_image = Image(name = 'hey', user = self.new_user)
self.new_image.save_image()
self.new_comment = Comments(comment = "Cool", image = self.new_image)
def tearDown(self):
"""
This will clear the dbs after each test
"""
User.objects.all().delete()
Image.objects.all().delete()
Comments.objects.all().delete()
def test_save_comment(self):
"""
This will test whether the new comment is added to the db
"""
self.new_comment.save_comment()
self.assertTrue(len(Comments.objects.all()) > 0)
class ProfileTestCases(testCase):
"""
This will test the profiles
"""
def setUp(self):
"""
This will add a new profile before each test
"""
self.new_user = User(username = "Hey")
self.new_user.save()
def tearDown(self):
User.objects.all().delete()
Profile.objects.all().delete()
def test_search_users(self):
"""
This will test whether the search function works
"""
users = Profile.search_user("hey")
self.assertTrue(len(users) == 1)
# Create your tests here.
| 27.669565 | 106 | 0.591138 |
7947477bca8d4c6b3b94f6c5ac82714034d8b463 | 159 | py | Python | pyidea/__init__.py | madelyneriksen/PyIdea | 0ed29acc0247c7c6802d17a01f935e6eec9652ed | [
"MIT"
] | null | null | null | pyidea/__init__.py | madelyneriksen/PyIdea | 0ed29acc0247c7c6802d17a01f935e6eec9652ed | [
"MIT"
] | null | null | null | pyidea/__init__.py | madelyneriksen/PyIdea | 0ed29acc0247c7c6802d17a01f935e6eec9652ed | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Top-level package for PyIdea."""
__author__ = """Madelyn Eriksen"""
__email__ = '[email protected]'
__version__ = '0.1.0'
| 19.875 | 39 | 0.647799 |
794747a28490edd4f4f0299e6243b3d70b80e72d | 12,168 | py | Python | script.mrknow.urlresolver/lib/urlresolver9/lib/net.py | mrknow/filmkodi | 0162cde9ae25ddbf4a69330948714833ff2f78c9 | [
"Apache-2.0"
] | 105 | 2015-11-28T00:03:11.000Z | 2021-05-05T20:47:42.000Z | script.mrknow.urlresolver/lib/urlresolver9/lib/net.py | rrosajp/filmkodi | 0162cde9ae25ddbf4a69330948714833ff2f78c9 | [
"Apache-2.0"
] | 918 | 2015-11-28T14:12:40.000Z | 2022-03-23T20:24:49.000Z | script.mrknow.urlresolver/lib/urlresolver9/lib/net.py | rrosajp/filmkodi | 0162cde9ae25ddbf4a69330948714833ff2f78c9 | [
"Apache-2.0"
] | 111 | 2015-12-01T14:06:10.000Z | 2020-08-01T10:44:39.000Z | '''
common XBMC Module
Copyright (C) 2011 t0mm0
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import random
import cookielib
import gzip
import re
import StringIO
import urllib
import urllib2
import socket
import time
import kodi
# Set Global timeout - Useful for slow connections and Putlocker.
socket.setdefaulttimeout(10)
BR_VERS = [
['%s.0' % i for i in xrange(18, 50)],
['37.0.2062.103', '37.0.2062.120', '37.0.2062.124', '38.0.2125.101', '38.0.2125.104', '38.0.2125.111', '39.0.2171.71', '39.0.2171.95', '39.0.2171.99', '40.0.2214.93', '40.0.2214.111',
'40.0.2214.115', '42.0.2311.90', '42.0.2311.135', '42.0.2311.152', '43.0.2357.81', '43.0.2357.124', '44.0.2403.155', '44.0.2403.157', '45.0.2454.101', '45.0.2454.85', '46.0.2490.71',
'46.0.2490.80', '46.0.2490.86', '47.0.2526.73', '47.0.2526.80', '48.0.2564.116', '49.0.2623.112', '50.0.2661.86'],
['11.0'],
['8.0', '9.0', '10.0', '10.6']]
WIN_VERS = ['Windows NT 10.0', 'Windows NT 7.0', 'Windows NT 6.3', 'Windows NT 6.2', 'Windows NT 6.1', 'Windows NT 6.0', 'Windows NT 5.1', 'Windows NT 5.0']
FEATURES = ['; WOW64', '; Win64; IA64', '; Win64; x64', '']
RAND_UAS = ['Mozilla/5.0 ({win_ver}{feature}; rv:{br_ver}) Gecko/20100101 Firefox/{br_ver}',
'Mozilla/5.0 ({win_ver}{feature}) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/{br_ver} Safari/537.36',
'Mozilla/5.0 ({win_ver}{feature}; Trident/7.0; rv:{br_ver}) like Gecko',
'Mozilla/5.0 (compatible; MSIE {br_ver}; {win_ver}{feature}; Trident/6.0)']
def get_ua():
try: last_gen = int(kodi.get_setting('last_ua_create'))
except: last_gen = 0
if not kodi.get_setting('current_ua') or last_gen < (time.time() - (7 * 24 * 60 * 60)):
index = random.randrange(len(RAND_UAS))
versions = {'win_ver': random.choice(WIN_VERS), 'feature': random.choice(FEATURES), 'br_ver': random.choice(BR_VERS[index])}
user_agent = RAND_UAS[index].format(**versions)
# log_utils.log('Creating New User Agent: %s' % (user_agent), log_utils.LOGDEBUG)
kodi.set_setting('current_ua', user_agent)
kodi.set_setting('last_ua_create', str(int(time.time())))
else:
user_agent = kodi.get_setting('current_ua')
return user_agent
class Net:
'''
This class wraps :mod:`urllib2` and provides an easy way to make http
requests while taking care of cookies, proxies, gzip compression and
character encoding.
Example::
from addon.common.net import Net
net = Net()
response = net.http_GET('http://xbmc.org')
print response.content
'''
_cj = cookielib.LWPCookieJar()
_proxy = None
_user_agent = 'Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0'
_http_debug = False
def __init__(self, cookie_file='', proxy='', user_agent='', http_debug=False):
'''
Kwargs:
cookie_file (str): Full path to a file to be used to load and save
cookies to.
proxy (str): Proxy setting (eg.
``'http://user:[email protected]:1234'``)
user_agent (str): String to use as the User Agent header. If not
supplied the class will use a default user agent (chrome)
http_debug (bool): Set ``True`` to have HTTP header info written to
the XBMC log for all requests.
'''
if cookie_file:
self.set_cookies(cookie_file)
if proxy:
self.set_proxy(proxy)
if user_agent:
self.set_user_agent(user_agent)
self._http_debug = http_debug
self._update_opener()
def set_cookies(self, cookie_file):
'''
Set the cookie file and try to load cookies from it if it exists.
Args:
cookie_file (str): Full path to a file to be used to load and save
cookies to.
'''
try:
self._cj.load(cookie_file, ignore_discard=True)
self._update_opener()
return True
except:
return False
def get_cookies(self):
'''Returns A dictionary containing all cookie information by domain.'''
return self._cj._cookies
def save_cookies(self, cookie_file):
'''
Saves cookies to a file.
Args:
cookie_file (str): Full path to a file to save cookies to.
'''
self._cj.save(cookie_file, ignore_discard=True)
def set_proxy(self, proxy):
'''
Args:
proxy (str): Proxy setting (eg.
``'http://user:[email protected]:1234'``)
'''
self._proxy = proxy
self._update_opener()
def get_proxy(self):
'''Returns string containing proxy details.'''
return self._proxy
def set_user_agent(self, user_agent):
'''
Args:
user_agent (str): String to use as the User Agent header.
'''
self._user_agent = user_agent
def get_user_agent(self):
'''Returns user agent string.'''
return self._user_agent
def _update_opener(self):
'''
Builds and installs a new opener to be used by all future calls to
:func:`urllib2.urlopen`.
'''
if self._http_debug:
http = urllib2.HTTPHandler(debuglevel=1)
else:
http = urllib2.HTTPHandler()
if self._proxy:
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self._cj),
urllib2.ProxyHandler({'http':
self._proxy}),
urllib2.HTTPBasicAuthHandler(),
http)
else:
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self._cj),
urllib2.HTTPBasicAuthHandler(),
http)
urllib2.install_opener(opener)
def http_GET(self, url, headers={}, compression=True):
'''
Perform an HTTP GET request.
Args:
url (str): The URL to GET.
Kwargs:
headers (dict): A dictionary describing any headers you would like
to add to the request. (eg. ``{'X-Test': 'testing'}``)
compression (bool): If ``True`` (default), try to use gzip
compression.
Returns:
An :class:`HttpResponse` object containing headers and other
meta-information about the page and the page content.
'''
return self._fetch(url, headers=headers, compression=compression)
def http_POST(self, url, form_data, headers={}, compression=True):
'''
Perform an HTTP POST request.
Args:
url (str): The URL to POST.
form_data (dict): A dictionary of form data to POST.
Kwargs:
headers (dict): A dictionary describing any headers you would like
to add to the request. (eg. ``{'X-Test': 'testing'}``)
compression (bool): If ``True`` (default), try to use gzip
compression.
Returns:
An :class:`HttpResponse` object containing headers and other
meta-information about the page and the page content.
'''
return self._fetch(url, form_data, headers=headers, compression=compression)
def http_HEAD(self, url, headers={}):
'''
Perform an HTTP HEAD request.
Args:
url (str): The URL to GET.
Kwargs:
headers (dict): A dictionary describing any headers you would like
to add to the request. (eg. ``{'X-Test': 'testing'}``)
Returns:
An :class:`HttpResponse` object containing headers and other
meta-information about the page.
'''
request = urllib2.Request(url)
request.get_method = lambda: 'HEAD'
request.add_header('User-Agent', self._user_agent)
for key in headers:
request.add_header(key, headers[key])
response = urllib2.urlopen(request)
return HttpResponse(response)
def _fetch(self, url, form_data={}, headers={}, compression=True):
'''
Perform an HTTP GET or POST request.
Args:
url (str): The URL to GET or POST.
form_data (dict): A dictionary of form data to POST. If empty, the
request will be a GET, if it contains form data it will be a POST.
Kwargs:
headers (dict): A dictionary describing any headers you would like
to add to the request. (eg. ``{'X-Test': 'testing'}``)
compression (bool): If ``True`` (default), try to use gzip
compression.
Returns:
An :class:`HttpResponse` object containing headers and other
meta-information about the page and the page content.
'''
req = urllib2.Request(url)
if form_data:
if isinstance(form_data, basestring):
form_data = form_data
else:
form_data = urllib.urlencode(form_data, True)
req = urllib2.Request(url, form_data)
req.add_header('User-Agent', self._user_agent)
for key in headers:
req.add_header(key, headers[key])
if compression:
req.add_header('Accept-Encoding', 'gzip')
req.add_unredirected_header('Host', req.get_host())
response = urllib2.urlopen(req)
return HttpResponse(response)
class HttpResponse:
'''
This class represents a resoponse from an HTTP request.
The content is examined and every attempt is made to properly encode it to
Unicode.
.. seealso::
:meth:`Net.http_GET`, :meth:`Net.http_HEAD` and :meth:`Net.http_POST`
'''
content = ''
'''Unicode encoded string containing the body of the reposne.'''
def __init__(self, response):
'''
Args:
response (:class:`mimetools.Message`): The object returned by a call
to :func:`urllib2.urlopen`.
'''
self._response = response
@property
def content(self):
html = self._response.read()
encoding = None
try:
if self._response.headers['content-encoding'].lower() == 'gzip':
html = gzip.GzipFile(fileobj=StringIO.StringIO(html)).read()
except:
pass
try:
content_type = self._response.headers['content-type']
if 'charset=' in content_type:
encoding = content_type.split('charset=')[-1]
except:
pass
r = re.search('<meta\s+http-equiv="Content-Type"\s+content="(?:.+?);\s+charset=(.+?)"', html, re.IGNORECASE)
if r:
encoding = r.group(1)
if encoding is not None:
try: html = html.decode(encoding)
except: pass
return html
def get_headers(self, as_dict=False):
'''Returns headers returned by the server.
If as_dict is True, headers are returned as a dictionary otherwise a list'''
if as_dict:
return dict([(item[0].title(), item[1]) for item in self._response.info().items()])
else:
return self._response.info().headers
def get_url(self):
'''
Return the URL of the resource retrieved, commonly used to determine if
a redirect was followed.
'''
return self._response.geturl()
| 35.683284 | 187 | 0.585059 |
7947494fb9f5134af231dfc39bd0cc89046e63fb | 62,136 | py | Python | google/cloud/aiplatform/v1/aiplatform-v1-py/tests/unit/gapic/aiplatform_v1/test_featurestore_online_serving_service.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 7 | 2021-02-21T10:39:41.000Z | 2021-12-07T07:31:28.000Z | google/cloud/aiplatform/v1/aiplatform-v1-py/tests/unit/gapic/aiplatform_v1/test_featurestore_online_serving_service.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 6 | 2021-02-02T23:46:11.000Z | 2021-11-15T01:46:02.000Z | google/cloud/aiplatform/v1/aiplatform-v1-py/tests/unit/gapic/aiplatform_v1/test_featurestore_online_serving_service.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 4 | 2021-01-28T23:25:45.000Z | 2021-08-30T01:55:16.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import packaging.version
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.aiplatform_v1.services.featurestore_online_serving_service import FeaturestoreOnlineServingServiceAsyncClient
from google.cloud.aiplatform_v1.services.featurestore_online_serving_service import FeaturestoreOnlineServingServiceClient
from google.cloud.aiplatform_v1.services.featurestore_online_serving_service import transports
from google.cloud.aiplatform_v1.services.featurestore_online_serving_service.transports.base import _GOOGLE_AUTH_VERSION
from google.cloud.aiplatform_v1.types import feature_selector
from google.cloud.aiplatform_v1.types import featurestore_online_service
from google.oauth2 import service_account
import google.auth
# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively
# through google-api-core:
# - Delete the auth "less than" test cases
# - Delete these pytest markers (Make the "greater than or equal to" tests the default).
requires_google_auth_lt_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"),
reason="This test requires google-auth < 1.25.0",
)
requires_google_auth_gte_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"),
reason="This test requires google-auth >= 1.25.0",
)
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(None) is None
assert FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint
assert FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint
assert FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint
assert FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [
FeaturestoreOnlineServingServiceClient,
FeaturestoreOnlineServingServiceAsyncClient,
])
def test_featurestore_online_serving_service_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == 'aiplatform.googleapis.com:443'
@pytest.mark.parametrize("transport_class,transport_name", [
(transports.FeaturestoreOnlineServingServiceGrpcTransport, "grpc"),
(transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_featurestore_online_serving_service_client_service_account_always_use_jwt(transport_class, transport_name):
with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize("client_class", [
FeaturestoreOnlineServingServiceClient,
FeaturestoreOnlineServingServiceAsyncClient,
])
def test_featurestore_online_serving_service_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == 'aiplatform.googleapis.com:443'
def test_featurestore_online_serving_service_client_get_transport_class():
transport = FeaturestoreOnlineServingServiceClient.get_transport_class()
available_transports = [
transports.FeaturestoreOnlineServingServiceGrpcTransport,
]
assert transport in available_transports
transport = FeaturestoreOnlineServingServiceClient.get_transport_class("grpc")
assert transport == transports.FeaturestoreOnlineServingServiceGrpcTransport
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(FeaturestoreOnlineServingServiceClient, transports.FeaturestoreOnlineServingServiceGrpcTransport, "grpc"),
(FeaturestoreOnlineServingServiceAsyncClient, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, "grpc_asyncio"),
])
@mock.patch.object(FeaturestoreOnlineServingServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreOnlineServingServiceClient))
@mock.patch.object(FeaturestoreOnlineServingServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreOnlineServingServiceAsyncClient))
def test_featurestore_online_serving_service_client_client_options(client_class, transport_class, transport_name):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(FeaturestoreOnlineServingServiceClient, 'get_transport_class') as gtc:
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials()
)
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(FeaturestoreOnlineServingServiceClient, 'get_transport_class') as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [
(FeaturestoreOnlineServingServiceClient, transports.FeaturestoreOnlineServingServiceGrpcTransport, "grpc", "true"),
(FeaturestoreOnlineServingServiceAsyncClient, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"),
(FeaturestoreOnlineServingServiceClient, transports.FeaturestoreOnlineServingServiceGrpcTransport, "grpc", "false"),
(FeaturestoreOnlineServingServiceAsyncClient, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"),
])
@mock.patch.object(FeaturestoreOnlineServingServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreOnlineServingServiceClient))
@mock.patch.object(FeaturestoreOnlineServingServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreOnlineServingServiceAsyncClient))
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_featurestore_online_serving_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
options = client_options.ClientOptions(client_cert_source=client_cert_source_callback)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch.object(transport_class, '__init__') as patched:
with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True):
with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch.object(transport_class, '__init__') as patched:
with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(FeaturestoreOnlineServingServiceClient, transports.FeaturestoreOnlineServingServiceGrpcTransport, "grpc"),
(FeaturestoreOnlineServingServiceAsyncClient, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_featurestore_online_serving_service_client_client_options_scopes(client_class, transport_class, transport_name):
# Check the case scopes are provided.
options = client_options.ClientOptions(
scopes=["1", "2"],
)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(FeaturestoreOnlineServingServiceClient, transports.FeaturestoreOnlineServingServiceGrpcTransport, "grpc"),
(FeaturestoreOnlineServingServiceAsyncClient, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_featurestore_online_serving_service_client_client_options_credentials_file(client_class, transport_class, transport_name):
# Check the case credentials file is provided.
options = client_options.ClientOptions(
credentials_file="credentials.json"
)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_featurestore_online_serving_service_client_client_options_from_dict():
with mock.patch('google.cloud.aiplatform_v1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = FeaturestoreOnlineServingServiceClient(
client_options={'api_endpoint': 'squid.clam.whelk'}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_read_feature_values(transport: str = 'grpc', request_type=featurestore_online_service.ReadFeatureValuesRequest):
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.read_feature_values),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = featurestore_online_service.ReadFeatureValuesResponse(
)
response = client.read_feature_values(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == featurestore_online_service.ReadFeatureValuesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, featurestore_online_service.ReadFeatureValuesResponse)
def test_read_feature_values_from_dict():
test_read_feature_values(request_type=dict)
def test_read_feature_values_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.read_feature_values),
'__call__') as call:
client.read_feature_values()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == featurestore_online_service.ReadFeatureValuesRequest()
@pytest.mark.asyncio
async def test_read_feature_values_async(transport: str = 'grpc_asyncio', request_type=featurestore_online_service.ReadFeatureValuesRequest):
client = FeaturestoreOnlineServingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.read_feature_values),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(featurestore_online_service.ReadFeatureValuesResponse(
))
response = await client.read_feature_values(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == featurestore_online_service.ReadFeatureValuesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, featurestore_online_service.ReadFeatureValuesResponse)
@pytest.mark.asyncio
async def test_read_feature_values_async_from_dict():
await test_read_feature_values_async(request_type=dict)
def test_read_feature_values_field_headers():
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = featurestore_online_service.ReadFeatureValuesRequest()
request.entity_type = 'entity_type/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.read_feature_values),
'__call__') as call:
call.return_value = featurestore_online_service.ReadFeatureValuesResponse()
client.read_feature_values(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'entity_type=entity_type/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_read_feature_values_field_headers_async():
client = FeaturestoreOnlineServingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = featurestore_online_service.ReadFeatureValuesRequest()
request.entity_type = 'entity_type/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.read_feature_values),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_online_service.ReadFeatureValuesResponse())
await client.read_feature_values(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'entity_type=entity_type/value',
) in kw['metadata']
def test_read_feature_values_flattened():
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.read_feature_values),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = featurestore_online_service.ReadFeatureValuesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.read_feature_values(
entity_type='entity_type_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].entity_type == 'entity_type_value'
def test_read_feature_values_flattened_error():
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.read_feature_values(
featurestore_online_service.ReadFeatureValuesRequest(),
entity_type='entity_type_value',
)
@pytest.mark.asyncio
async def test_read_feature_values_flattened_async():
client = FeaturestoreOnlineServingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.read_feature_values),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = featurestore_online_service.ReadFeatureValuesResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_online_service.ReadFeatureValuesResponse())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.read_feature_values(
entity_type='entity_type_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].entity_type == 'entity_type_value'
@pytest.mark.asyncio
async def test_read_feature_values_flattened_error_async():
client = FeaturestoreOnlineServingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.read_feature_values(
featurestore_online_service.ReadFeatureValuesRequest(),
entity_type='entity_type_value',
)
def test_streaming_read_feature_values(transport: str = 'grpc', request_type=featurestore_online_service.StreamingReadFeatureValuesRequest):
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.streaming_read_feature_values),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = iter([featurestore_online_service.ReadFeatureValuesResponse()])
response = client.streaming_read_feature_values(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == featurestore_online_service.StreamingReadFeatureValuesRequest()
# Establish that the response is the type that we expect.
for message in response:
assert isinstance(message, featurestore_online_service.ReadFeatureValuesResponse)
def test_streaming_read_feature_values_from_dict():
test_streaming_read_feature_values(request_type=dict)
def test_streaming_read_feature_values_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.streaming_read_feature_values),
'__call__') as call:
client.streaming_read_feature_values()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == featurestore_online_service.StreamingReadFeatureValuesRequest()
@pytest.mark.asyncio
async def test_streaming_read_feature_values_async(transport: str = 'grpc_asyncio', request_type=featurestore_online_service.StreamingReadFeatureValuesRequest):
client = FeaturestoreOnlineServingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.streaming_read_feature_values),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True)
call.return_value.read = mock.AsyncMock(side_effect=[featurestore_online_service.ReadFeatureValuesResponse()])
response = await client.streaming_read_feature_values(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == featurestore_online_service.StreamingReadFeatureValuesRequest()
# Establish that the response is the type that we expect.
message = await response.read()
assert isinstance(message, featurestore_online_service.ReadFeatureValuesResponse)
@pytest.mark.asyncio
async def test_streaming_read_feature_values_async_from_dict():
await test_streaming_read_feature_values_async(request_type=dict)
def test_streaming_read_feature_values_field_headers():
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = featurestore_online_service.StreamingReadFeatureValuesRequest()
request.entity_type = 'entity_type/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.streaming_read_feature_values),
'__call__') as call:
call.return_value = iter([featurestore_online_service.ReadFeatureValuesResponse()])
client.streaming_read_feature_values(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'entity_type=entity_type/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_streaming_read_feature_values_field_headers_async():
client = FeaturestoreOnlineServingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = featurestore_online_service.StreamingReadFeatureValuesRequest()
request.entity_type = 'entity_type/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.streaming_read_feature_values),
'__call__') as call:
call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True)
call.return_value.read = mock.AsyncMock(side_effect=[featurestore_online_service.ReadFeatureValuesResponse()])
await client.streaming_read_feature_values(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'entity_type=entity_type/value',
) in kw['metadata']
def test_streaming_read_feature_values_flattened():
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.streaming_read_feature_values),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = iter([featurestore_online_service.ReadFeatureValuesResponse()])
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.streaming_read_feature_values(
entity_type='entity_type_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].entity_type == 'entity_type_value'
def test_streaming_read_feature_values_flattened_error():
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.streaming_read_feature_values(
featurestore_online_service.StreamingReadFeatureValuesRequest(),
entity_type='entity_type_value',
)
@pytest.mark.asyncio
async def test_streaming_read_feature_values_flattened_async():
client = FeaturestoreOnlineServingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.streaming_read_feature_values),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = iter([featurestore_online_service.ReadFeatureValuesResponse()])
call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.streaming_read_feature_values(
entity_type='entity_type_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].entity_type == 'entity_type_value'
@pytest.mark.asyncio
async def test_streaming_read_feature_values_flattened_error_async():
client = FeaturestoreOnlineServingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.streaming_read_feature_values(
featurestore_online_service.StreamingReadFeatureValuesRequest(),
entity_type='entity_type_value',
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.FeaturestoreOnlineServingServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.FeaturestoreOnlineServingServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = FeaturestoreOnlineServingServiceClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.FeaturestoreOnlineServingServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = FeaturestoreOnlineServingServiceClient(
client_options={"scopes": ["1", "2"]},
transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.FeaturestoreOnlineServingServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = FeaturestoreOnlineServingServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.FeaturestoreOnlineServingServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize("transport_class", [
transports.FeaturestoreOnlineServingServiceGrpcTransport,
transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport,
])
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport,
transports.FeaturestoreOnlineServingServiceGrpcTransport,
)
def test_featurestore_online_serving_service_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.FeaturestoreOnlineServingServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json"
)
def test_featurestore_online_serving_service_base_transport():
# Instantiate the base transport.
with mock.patch('google.cloud.aiplatform_v1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceTransport.__init__') as Transport:
Transport.return_value = None
transport = transports.FeaturestoreOnlineServingServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
'read_feature_values',
'streaming_read_feature_values',
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
@requires_google_auth_gte_1_25_0
def test_featurestore_online_serving_service_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.FeaturestoreOnlineServingServiceTransport(
credentials_file="credentials.json",
quota_project_id="octopus",
)
load_creds.assert_called_once_with("credentials.json",
scopes=None,
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
quota_project_id="octopus",
)
@requires_google_auth_lt_1_25_0
def test_featurestore_online_serving_service_base_transport_with_credentials_file_old_google_auth():
# Instantiate the base transport with a credentials file
with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.FeaturestoreOnlineServingServiceTransport(
credentials_file="credentials.json",
quota_project_id="octopus",
)
load_creds.assert_called_once_with("credentials.json", scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
quota_project_id="octopus",
)
def test_featurestore_online_serving_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.FeaturestoreOnlineServingServiceTransport()
adc.assert_called_once()
@requires_google_auth_gte_1_25_0
def test_featurestore_online_serving_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
FeaturestoreOnlineServingServiceClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
quota_project_id=None,
)
@requires_google_auth_lt_1_25_0
def test_featurestore_online_serving_service_auth_adc_old_google_auth():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
FeaturestoreOnlineServingServiceClient()
adc.assert_called_once_with(
scopes=( 'https://www.googleapis.com/auth/cloud-platform',),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.FeaturestoreOnlineServingServiceGrpcTransport,
transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport,
],
)
@requires_google_auth_gte_1_25_0
def test_featurestore_online_serving_service_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class",
[
transports.FeaturestoreOnlineServingServiceGrpcTransport,
transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport,
],
)
@requires_google_auth_lt_1_25_0
def test_featurestore_online_serving_service_transport_auth_adc_old_google_auth(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus")
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.FeaturestoreOnlineServingServiceGrpcTransport, grpc_helpers),
(transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, grpc_helpers_async)
],
)
def test_featurestore_online_serving_service_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(
quota_project_id="octopus",
scopes=["1", "2"]
)
create_channel.assert_called_with(
"aiplatform.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
scopes=["1", "2"],
default_host="aiplatform.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize("transport_class", [transports.FeaturestoreOnlineServingServiceGrpcTransport, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport])
def test_featurestore_online_serving_service_grpc_transport_client_cert_source_for_mtls(
transport_class
):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert,
private_key=expected_key
)
def test_featurestore_online_serving_service_host_no_port():
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'),
)
assert client.transport._host == 'aiplatform.googleapis.com:443'
def test_featurestore_online_serving_service_host_with_port():
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'),
)
assert client.transport._host == 'aiplatform.googleapis.com:8000'
def test_featurestore_online_serving_service_grpc_transport_channel():
channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.FeaturestoreOnlineServingServiceGrpcTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_featurestore_online_serving_service_grpc_asyncio_transport_channel():
channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize("transport_class", [transports.FeaturestoreOnlineServingServiceGrpcTransport, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport])
def test_featurestore_online_serving_service_transport_channel_mtls_with_client_cert_source(
transport_class
):
with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred:
with mock.patch.object(transport_class, "create_channel") as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize("transport_class", [transports.FeaturestoreOnlineServingServiceGrpcTransport, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport])
def test_featurestore_online_serving_service_transport_channel_mtls_with_adc(
transport_class
):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(transport_class, "create_channel") as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_entity_type_path():
project = "squid"
location = "clam"
featurestore = "whelk"
entity_type = "octopus"
expected = "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}".format(project=project, location=location, featurestore=featurestore, entity_type=entity_type, )
actual = FeaturestoreOnlineServingServiceClient.entity_type_path(project, location, featurestore, entity_type)
assert expected == actual
def test_parse_entity_type_path():
expected = {
"project": "oyster",
"location": "nudibranch",
"featurestore": "cuttlefish",
"entity_type": "mussel",
}
path = FeaturestoreOnlineServingServiceClient.entity_type_path(**expected)
# Check that the path construction is reversible.
actual = FeaturestoreOnlineServingServiceClient.parse_entity_type_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "winkle"
expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, )
actual = FeaturestoreOnlineServingServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "nautilus",
}
path = FeaturestoreOnlineServingServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = FeaturestoreOnlineServingServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "scallop"
expected = "folders/{folder}".format(folder=folder, )
actual = FeaturestoreOnlineServingServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "abalone",
}
path = FeaturestoreOnlineServingServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = FeaturestoreOnlineServingServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "squid"
expected = "organizations/{organization}".format(organization=organization, )
actual = FeaturestoreOnlineServingServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "clam",
}
path = FeaturestoreOnlineServingServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = FeaturestoreOnlineServingServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "whelk"
expected = "projects/{project}".format(project=project, )
actual = FeaturestoreOnlineServingServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "octopus",
}
path = FeaturestoreOnlineServingServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = FeaturestoreOnlineServingServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "oyster"
location = "nudibranch"
expected = "projects/{project}/locations/{location}".format(project=project, location=location, )
actual = FeaturestoreOnlineServingServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "cuttlefish",
"location": "mussel",
}
path = FeaturestoreOnlineServingServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = FeaturestoreOnlineServingServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(transports.FeaturestoreOnlineServingServiceTransport, '_prep_wrapped_messages') as prep:
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(transports.FeaturestoreOnlineServingServiceTransport, '_prep_wrapped_messages') as prep:
transport_class = FeaturestoreOnlineServingServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = FeaturestoreOnlineServingServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc_asyncio",
)
with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport
)
with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
'grpc',
]
for transport in transports:
client = FeaturestoreOnlineServingServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
| 43.060291 | 275 | 0.720919 |
79474b33e63f653274bd579c21b2b7ebaaefb49c | 1,361 | py | Python | pycasper/decorators.py | dondongwon/CC_NCE_GENEA | c37e187800dbbce8447c0fdf11be1b76e863b4c6 | [
"MIT"
] | 5 | 2021-09-01T18:45:06.000Z | 2021-11-30T15:14:43.000Z | pycasper/decorators.py | dondongwon/CC_NCE_GENEA | c37e187800dbbce8447c0fdf11be1b76e863b4c6 | [
"MIT"
] | null | null | null | pycasper/decorators.py | dondongwon/CC_NCE_GENEA | c37e187800dbbce8447c0fdf11be1b76e863b4c6 | [
"MIT"
] | null | null | null | import time
__all__ = ['calculate_time']
class BaseDecorator():
'''
Build a decorator using before and after execution functions
Must-Implement-Methods::
before_exec(func) - Stuff to do before function execution
after_exec(func, before_values) - Stuff to do after function execution
Methods::
decorator_name = self.build_decorator(before_exec, after_exec)
'''
def __init__(self):
pass
def before_exec(self, func):
raise NotImplementedError('`before_exec` must be implemented')
def after_exec(self, func, before_values):
raise NotImplementedError('`after_exec` must be implemented')
def build_decorator(self):
def base_decorator(func):
def inner(*args, **kwargs):
before_values = self.before_exec(func)
returned_values = func(*args, **kwargs)
self.after_exec(func, before_values)
return returned_values
return inner
return base_decorator
class CalculateTime(BaseDecorator):
def __init__(self):
super(CalculateTime, self).__init__()
def before_exec(self, func=None):
begin = time.time()
return begin
def after_exec(self, func=None, before_values=None):
end = time.time()
print('Execution Time for {}: {:.2f} seconds'.format(func.__name__, end-before_values))
calculate_time = CalculateTime().build_decorator() | 28.957447 | 91 | 0.70169 |
79474b46e30a888b96503516d55c29d9133ee59f | 7,440 | py | Python | clinica/pipelines/t1_volume/t1_volume_cli.py | Chengwei94/clinica | 0e9d837baf9064a626198422b2a70fe120f227f0 | [
"MIT"
] | null | null | null | clinica/pipelines/t1_volume/t1_volume_cli.py | Chengwei94/clinica | 0e9d837baf9064a626198422b2a70fe120f227f0 | [
"MIT"
] | null | null | null | clinica/pipelines/t1_volume/t1_volume_cli.py | Chengwei94/clinica | 0e9d837baf9064a626198422b2a70fe120f227f0 | [
"MIT"
] | null | null | null | # coding: utf8
import clinica.engine as ce
class T1VolumeCLI(ce.CmdParser):
def define_name(self):
"""Define the sub-command name to run this pipeline."""
self._name = "t1-volume"
def define_description(self):
"""Define a description of this pipeline."""
self._description = (
"Volume-based processing of T1-weighted MR images:\n"
"https://aramislab.paris.inria.fr/clinica/docs/public/latest/Pipelines/T1_Volume/"
)
def define_options(self):
"""Define the sub-command arguments."""
from clinica.engine.cmdparser import PIPELINE_CATEGORIES
# Clinica compulsory arguments (e.g. BIDS, CAPS, group_label)
clinica_comp = self._args.add_argument_group(
PIPELINE_CATEGORIES["CLINICA_COMPULSORY"]
)
clinica_comp.add_argument("bids_directory", help="Path to the BIDS directory.")
clinica_comp.add_argument("caps_directory", help="Path to the CAPS directory.")
clinica_comp.add_argument(
"group_label",
help="User-defined identifier for the provided group of subjects.",
)
# Optional arguments (e.g. FWHM)
optional = self._args.add_argument_group(PIPELINE_CATEGORIES["OPTIONAL"])
optional.add_argument(
"-s",
"--smooth",
nargs="+",
type=int,
default=[8],
help="A list of integers specifying the different isomorphic FWHM in millimeters "
"to smooth the image (default: --smooth 8).",
)
# Clinica standard arguments (e.g. --n_procs)
self.add_clinica_standard_arguments()
# Advanced arguments (i.e. tricky parameters)
advanced = self._args.add_argument_group(PIPELINE_CATEGORIES["ADVANCED"])
# t1-volume-tissue-segmentation
advanced.add_argument(
"-tc",
"--tissue_classes",
metavar="",
nargs="+",
type=int,
default=[1, 2, 3],
choices=range(1, 7),
help="Tissue classes (1: gray matter (GM), 2: white matter (WM), "
"3: cerebrospinal fluid (CSF), 4: bone, 5: soft-tissue, 6: background) to save "
"(default: GM, WM and CSF i.e. --tissue_classes 1 2 3).",
)
advanced.add_argument(
"-tpm",
"--tissue_probability_maps",
metavar="TissueProbabilityMap.nii",
default=None,
help="Tissue probability maps to use for segmentation "
"(default: TPM.nii from SPM software).",
)
advanced.add_argument(
"-dswu",
"--dont_save_warped_unmodulated",
action="store_true",
default=False,
help="Do not save warped unmodulated images for tissues specified "
"in --tissue_classes flag.",
)
advanced.add_argument(
"-swm",
"--save_warped_modulated",
action="store_true",
default=False,
help="Save warped modulated images for tissues specified in --tissue_classes flag.",
)
# t1-volume-tissue-segmentation / t1-volume-create-dartel
advanced.add_argument(
"-dt",
"--dartel_tissues",
metavar="",
nargs="+",
type=int,
default=[1, 2, 3],
choices=range(1, 7),
help="Tissues to use for DARTEL template calculation "
"(default: GM, WM and CSF i.e. --dartel_tissues 1 2 3).",
)
# t1-volume-dartel2mni
advanced.add_argument(
"-t",
"--tissues",
metavar="",
nargs="+",
type=int,
default=[1, 2, 3],
choices=range(1, 7),
help="Tissues to create flow fields to DARTEL template "
"(default: GM, WM and CSF i.e. --tissues 1 2 3).",
)
advanced.add_argument(
"-m",
"--modulate",
type=bool,
default=True,
metavar=("True/False"),
help="A boolean. Modulate output images - no modulation preserves concentrations "
"(default: --modulate True).",
)
advanced.add_argument(
"-vs",
"--voxel_size",
metavar=("float"),
nargs=3,
type=float,
help="A list of 3 floats specifying the voxel sizeof the output image "
"(default: --voxel_size 1.5 1.5 1.5).",
)
def run_command(self, args):
"""Run the pipeline with defined args."""
import datetime
import os
from colorama import Fore
from clinica.utils.filemanip import save_participants_sessions
from clinica.utils.participant import get_subject_session_list
from clinica.utils.stream import cprint
from ..t1_volume_create_dartel.t1_volume_create_dartel_cli import (
T1VolumeCreateDartelCLI,
)
from ..t1_volume_dartel2mni.t1_volume_dartel2mni_cli import (
T1VolumeDartel2MNICLI,
)
from ..t1_volume_parcellation.t1_volume_parcellation_cli import (
T1VolumeParcellationCLI,
)
from ..t1_volume_tissue_segmentation.t1_volume_tissue_segmentation_cli import (
T1VolumeTissueSegmentationCLI,
)
cprint(
f"The t1-volume pipeline is divided into 4 parts:\n"
f"\t{Fore.BLUE}t1-volume-tissue-segmentation pipeline{Fore.RESET}: "
f"Tissue segmentation, bias correction and spatial normalization to MNI space\n"
f"\t{Fore.BLUE}t1-volume-create-dartel pipeline{Fore.RESET}: "
f"Inter-subject registration with the creation of a new DARTEL template\n"
f"\t{Fore.BLUE}t1-volume-dartel2mni pipeline{Fore.RESET}: "
f"DARTEL template to MNI\n"
f"\t{Fore.BLUE}t1-volume-parcellation pipeline{Fore.RESET}: "
f"Atlas statistics"
)
if not self.absolute_path(args.subjects_sessions_tsv):
session_ids, participant_ids = get_subject_session_list(
self.absolute_path(args.bids_directory), None, True, False
)
now = datetime.datetime.now().strftime("%H%M%S")
args.subjects_sessions_tsv = now + "_participants.tsv"
save_participants_sessions(
participant_ids, session_ids, os.getcwd(), args.subjects_sessions_tsv
)
cprint(
f"{Fore.BLUE}\nPart 1/4: Running t1-volume-segmentation pipeline{Fore.RESET}"
)
tissue_segmentation_cli = T1VolumeTissueSegmentationCLI()
tissue_segmentation_cli.run_command(args)
cprint(
f"{Fore.BLUE}\nPart 2/4: Running t1-volume-create-dartel pipeline{Fore.RESET}"
)
create_dartel_cli = T1VolumeCreateDartelCLI()
create_dartel_cli.run_command(args)
cprint(
f"{Fore.BLUE}\nPart 3/4: Running t1-volume-dartel2mni pipeline{Fore.RESET}"
)
dartel2mni_cli = T1VolumeDartel2MNICLI()
dartel2mni_cli.run_command(args)
cprint(
f"{Fore.BLUE}\nPart 4/4: Running t1-volume-parcellation pipeline{Fore.RESET}"
)
parcellation_cli = T1VolumeParcellationCLI()
parcellation_cli.run_command(args)
| 38.153846 | 96 | 0.585081 |
79474d0316741883ae7ef61f26cc2b527cd5dd44 | 5,285 | py | Python | test/functional/test_framework/address.py | umkoin/umkoin | ba28fce6f6b22099ba7b900619653f9e342d3cd1 | [
"MIT"
] | 6 | 2018-02-28T22:23:46.000Z | 2020-02-13T13:49:44.000Z | test/functional/test_framework/address.py | umkoin/umkoin | ba28fce6f6b22099ba7b900619653f9e342d3cd1 | [
"MIT"
] | null | null | null | test/functional/test_framework/address.py | umkoin/umkoin | ba28fce6f6b22099ba7b900619653f9e342d3cd1 | [
"MIT"
] | 6 | 2018-02-05T12:51:25.000Z | 2020-04-26T10:42:49.000Z | #!/usr/bin/env python3
# Copyright (c) 2016-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Encode and decode Umkoin addresses.
- base58 P2PKH and P2SH addresses.
- bech32 segwit v0 P2WPKH and P2WSH addresses."""
import enum
import unittest
from .script import hash256, hash160, sha256, CScript, OP_0
from .segwit_addr import encode_segwit_address
from .util import assert_equal
ADDRESS_BCRT1_UNSPENDABLE = 'bcrt1qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq3xueyj'
ADDRESS_BCRT1_UNSPENDABLE_DESCRIPTOR = 'addr(bcrt1qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq3xueyj)#juyq9d97'
# Coins sent to this address can be spent with a witness stack of just OP_TRUE
ADDRESS_BCRT1_P2WSH_OP_TRUE = 'bcrt1qft5p2uhsdcdc3l2ua4ap5qqfg4pjaqlp250x7us7a8qqhrxrxfsqseac85'
class AddressType(enum.Enum):
bech32 = 'bech32'
p2sh_segwit = 'p2sh-segwit'
legacy = 'legacy' # P2PKH
chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
def byte_to_base58(b, version):
result = ''
str = b.hex()
str = chr(version).encode('latin-1').hex() + str
checksum = hash256(bytes.fromhex(str)).hex()
str += checksum[:8]
value = int('0x' + str, 0)
while value > 0:
result = chars[value % 58] + result
value //= 58
while (str[:2] == '00'):
result = chars[0] + result
str = str[2:]
return result
def base58_to_byte(s):
"""Converts a base58-encoded string to its data and version.
Throws if the base58 checksum is invalid."""
if not s:
return b''
n = 0
for c in s:
n *= 58
assert c in chars
digit = chars.index(c)
n += digit
h = '%x' % n
if len(h) % 2:
h = '0' + h
res = n.to_bytes((n.bit_length() + 7) // 8, 'big')
pad = 0
for c in s:
if c == chars[0]:
pad += 1
else:
break
res = b'\x00' * pad + res
# Assert if the checksum is invalid
assert_equal(hash256(res[:-4])[:4], res[-4:])
return res[1:-4], int(res[0])
def keyhash_to_p2pkh(hash, main=False):
assert len(hash) == 20
version = 0 if main else 111
return byte_to_base58(hash, version)
def scripthash_to_p2sh(hash, main=False):
assert len(hash) == 20
version = 5 if main else 196
return byte_to_base58(hash, version)
def key_to_p2pkh(key, main=False):
key = check_key(key)
return keyhash_to_p2pkh(hash160(key), main)
def script_to_p2sh(script, main=False):
script = check_script(script)
return scripthash_to_p2sh(hash160(script), main)
def key_to_p2sh_p2wpkh(key, main=False):
key = check_key(key)
p2shscript = CScript([OP_0, hash160(key)])
return script_to_p2sh(p2shscript, main)
def program_to_witness(version, program, main=False):
if (type(program) is str):
program = bytes.fromhex(program)
assert 0 <= version <= 16
assert 2 <= len(program) <= 40
assert version > 0 or len(program) in [20, 32]
return encode_segwit_address("bc" if main else "bcrt", version, program)
def script_to_p2wsh(script, main=False):
script = check_script(script)
return program_to_witness(0, sha256(script), main)
def key_to_p2wpkh(key, main=False):
key = check_key(key)
return program_to_witness(0, hash160(key), main)
def script_to_p2sh_p2wsh(script, main=False):
script = check_script(script)
p2shscript = CScript([OP_0, sha256(script)])
return script_to_p2sh(p2shscript, main)
def check_key(key):
if (type(key) is str):
key = bytes.fromhex(key) # Assuming this is hex string
if (type(key) is bytes and (len(key) == 33 or len(key) == 65)):
return key
assert False
def check_script(script):
if (type(script) is str):
script = bytes.fromhex(script) # Assuming this is hex string
if (type(script) is bytes or type(script) is CScript):
return script
assert False
class TestFrameworkScript(unittest.TestCase):
def test_base58encodedecode(self):
def check_base58(data, version):
self.assertEqual(base58_to_byte(byte_to_base58(data, version)), (data, version))
check_base58(bytes.fromhex('1f8ea1702a7bd4941bca0941b852c4bbfedb2e05'), 111)
check_base58(bytes.fromhex('3a0b05f4d7f66c3ba7009f453530296c845cc9cf'), 111)
check_base58(bytes.fromhex('41c1eaf111802559bad61b60d62b1f897c63928a'), 111)
check_base58(bytes.fromhex('0041c1eaf111802559bad61b60d62b1f897c63928a'), 111)
check_base58(bytes.fromhex('000041c1eaf111802559bad61b60d62b1f897c63928a'), 111)
check_base58(bytes.fromhex('00000041c1eaf111802559bad61b60d62b1f897c63928a'), 111)
check_base58(bytes.fromhex('1f8ea1702a7bd4941bca0941b852c4bbfedb2e05'), 0)
check_base58(bytes.fromhex('3a0b05f4d7f66c3ba7009f453530296c845cc9cf'), 0)
check_base58(bytes.fromhex('41c1eaf111802559bad61b60d62b1f897c63928a'), 0)
check_base58(bytes.fromhex('0041c1eaf111802559bad61b60d62b1f897c63928a'), 0)
check_base58(bytes.fromhex('000041c1eaf111802559bad61b60d62b1f897c63928a'), 0)
check_base58(bytes.fromhex('00000041c1eaf111802559bad61b60d62b1f897c63928a'), 0)
| 34.318182 | 120 | 0.698392 |
79474d385abbc13b2d94514a20747da0bd9161f9 | 1,216 | py | Python | nvp/core/process_utils.py | roche-emmanuel/nervproj | f784e88957868a17a40f499bef75cc226cf94e69 | [
"MIT"
] | null | null | null | nvp/core/process_utils.py | roche-emmanuel/nervproj | f784e88957868a17a40f499bef75cc226cf94e69 | [
"MIT"
] | null | null | null | nvp/core/process_utils.py | roche-emmanuel/nervproj | f784e88957868a17a40f499bef75cc226cf94e69 | [
"MIT"
] | null | null | null | """ProcessUtils utility component"""
import logging
import os
import psutil
from nvp.nvp_component import NVPComponent
from nvp.nvp_context import NVPContext
logger = logging.getLogger(__name__)
def create_component(ctx: NVPContext):
"""Create an instance of the component"""
return ProcessUtils(ctx)
class ProcessUtils(NVPComponent):
"""ProcessUtils component used to send automatic messages ono ProcessUtils server"""
def __init__(self, ctx: NVPContext):
"""Script runner constructor"""
NVPComponent.__init__(self, ctx)
def get_cpu_usage(self):
"""Retrieve the CPU usage percent in this process"""
return psutil.Process().cpu_percent()/os.cpu_count()
def get_ram_usage(self):
"""Retrieve process RAM usage in MB"""
return psutil.Process().memory_info().rss / (1024 * 1024)
def get_system_cpu_load_15mins(self):
"""Retrieve the CPU load percent over the last 15 mins"""
_, _, load15 = psutil.getloadavg()
cpu_usage = (load15/os.cpu_count()) * 100.0
return cpu_usage
def get_system_ram_usage(self):
"""Retrieve System RAM usage in percent"""
return psutil.virtual_memory()[2]
| 28.952381 | 88 | 0.689967 |
79474d5aa2b130445eec2966caea26fff9a4c29c | 3,493 | py | Python | vendor-local/lib/python/easy_thumbnails/migrations/0012_build_storage_hashes.py | Koenkk/popcorn_maker | 0978b9f98dacd4e8eb753404b24eb584f410aa11 | [
"BSD-3-Clause"
] | 24 | 2016-08-06T18:10:54.000Z | 2022-03-04T11:47:39.000Z | vendor-local/lib/python/easy_thumbnails/migrations/0012_build_storage_hashes.py | Koenkk/popcorn_maker | 0978b9f98dacd4e8eb753404b24eb584f410aa11 | [
"BSD-3-Clause"
] | 1 | 2017-03-28T02:36:50.000Z | 2017-03-28T07:18:57.000Z | vendor-local/lib/python/easy_thumbnails/migrations/0012_build_storage_hashes.py | Koenkk/popcorn_maker | 0978b9f98dacd4e8eb753404b24eb584f410aa11 | [
"BSD-3-Clause"
] | 16 | 2015-02-18T21:43:31.000Z | 2021-11-09T22:50:03.000Z | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from django.core.files.storage import default_storage
from django.utils.hashcompat import md5_constructor
import pickle
class Migration(DataMigration):
"""
Migrate storage hashes.
"""
def get_storage_hash(self, storage):
"""
Return a hex string hash for a storage object (or string containing a
pickle of a storage object).
"""
try:
# Make sure that pickle is getting a string, since it can choke
# with unicode.
storage_obj = pickle.loads(str(self.pickle))
except:
# We need to return some storage, and if there's an exception then
# it is most likely the default_storage (since that fails with a
# recursion error due to LazyObject "awesomeness").
storage_obj = default_storage
storage_cls = storage_obj.__class__
name = '%s.%s' % (storage_cls.__module__, storage_cls.__name__)
return md5_constructor(name).hexdigest()
def forwards(self, orm):
"Write your forwards methods here."
for storage in orm.Storage.objects.all():
storage_hash = self.get_storage_hash(storage)
orm.Source.objects.filter(storage=storage).update(
storage_hash=storage_hash)
orm.Thumbnail.objects.filter(storage=storage).update(
storage_hash=storage_hash)
def backwards(self, orm):
"Write your backwards methods here."
models = {
'easy_thumbnails.source': {
'Meta': {'object_name': 'Source'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2010, 9, 8, 0, 32, 41, 855399)'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'storage': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['easy_thumbnails.Storage']"}),
'storage_hash': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'easy_thumbnails.storage': {
'Meta': {'object_name': 'Storage'},
'hash': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pickle': ('django.db.models.fields.TextField', [], {})
},
'easy_thumbnails.thumbnail': {
'Meta': {'object_name': 'Thumbnail'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2010, 9, 8, 0, 32, 41, 855399)'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'thumbnails'", 'to': "orm['easy_thumbnails.Source']"}),
'storage': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['easy_thumbnails.Storage']"}),
'storage_hash': ('django.db.models.fields.CharField', [], {'max_length': '40'})
}
}
complete_apps = ['easy_thumbnails']
| 46.573333 | 146 | 0.588892 |
7947519b2f14bb879a21c50a4cfd97f2044d4763 | 798 | py | Python | aim_mt_2d/architectures/controller.py | sisl/neat | 42758d910f453686366eddfd1aed440e34c94828 | [
"MIT"
] | 183 | 2021-08-18T13:22:37.000Z | 2022-03-31T08:40:48.000Z | aim_mt_2d/architectures/controller.py | sisl/neat | 42758d910f453686366eddfd1aed440e34c94828 | [
"MIT"
] | 10 | 2021-09-24T15:30:06.000Z | 2022-03-25T11:19:23.000Z | aim_mt_2d/architectures/controller.py | sisl/neat | 42758d910f453686366eddfd1aed440e34c94828 | [
"MIT"
] | 21 | 2021-09-11T13:32:54.000Z | 2022-03-23T16:55:53.000Z | from collections import deque
import numpy as np
import torch
from torch import nn
class PIDController(object):
def __init__(self, K_P=1.0, K_I=0.0, K_D=0.0, n=20):
self._K_P = K_P
self._K_I = K_I
self._K_D = K_D
self._window = deque([0 for _ in range(n)], maxlen=n)
self._max = 0.0
self._min = 0.0
def step(self, error):
self._window.append(error)
self._max = max(self._max, abs(error))
self._min = -abs(self._max)
if len(self._window) >= 2:
integral = np.mean(self._window)
derivative = (self._window[-1] - self._window[-2])
else:
integral = 0.0
derivative = 0.0
return self._K_P * error + self._K_I * integral + self._K_D * derivative
| 25.741935 | 80 | 0.568922 |
794751f645f37119c5beecdd41d0e8222332e297 | 16,212 | py | Python | autotest/test_gwf_sfr_badfactor.py | scharlton2/modflow6 | 83ac72ee3b6f580aaffef6352cf15c1697d3ce66 | [
"CC0-1.0"
] | 3 | 2019-07-10T21:16:57.000Z | 2021-10-08T00:56:20.000Z | autotest/test_gwf_sfr_badfactor.py | scharlton2/modflow6 | 83ac72ee3b6f580aaffef6352cf15c1697d3ce66 | [
"CC0-1.0"
] | null | null | null | autotest/test_gwf_sfr_badfactor.py | scharlton2/modflow6 | 83ac72ee3b6f580aaffef6352cf15c1697d3ce66 | [
"CC0-1.0"
] | 3 | 2019-11-28T16:26:50.000Z | 2020-02-05T11:08:37.000Z | import os
import pytest
import sys
import numpy as np
import shutil
import subprocess
try:
import flopy
except:
msg = "Error. FloPy package is not available.\n"
msg += "Try installing using the following command:\n"
msg += " pip install flopy"
raise Exception(msg)
from framework import testing_framework
from simulation import Simulation
import targets
mf6_exe = os.path.abspath(targets.target_dict["mf6"])
paktest = "sfr"
testname = "ts_sfr01"
testdir = os.path.join("temp", testname)
os.makedirs(testdir, exist_ok=True)
everything_was_successful = True
def build_model(timeseries=False):
# static model data
# temporal discretization
nper = 1
tdis_rc = []
for idx in range(nper):
tdis_rc.append((1.0, 1, 1.0))
ts_times = np.arange(0.0, 2.0, 1.0, dtype=float)
auxnames = ["temp", "conc"]
temp, conc = 32.5, 0.1
# spatial discretization data
nlay, nrow, ncol = 3, 10, 10
delr, delc = 100.0, 100.0
top = 0.0
botm = [-10, -20, -30]
strt = 0.0
# calculate hk
hk = 1.0e-4
# solver options
nouter, ninner = 600, 100
hclose, rclose, relax = 1e-6, 0.1, 1.0
newtonoptions = "NEWTON"
imsla = "BICGSTAB"
# build MODFLOW 6 files
name = testname
ws = testdir
sim = flopy.mf6.MFSimulation(
sim_name=name, version="mf6", exe_name=mf6_exe, sim_ws=ws
)
# create tdis package
tdis = flopy.mf6.ModflowTdis(
sim, time_units="DAYS", nper=nper, perioddata=tdis_rc
)
# set ims csv files
csv0 = "{}.outer.ims.csv".format(name)
csv1 = "{}.inner.ims.csv".format(name)
# create iterative model solution and register the gwf model with it
ims = flopy.mf6.ModflowIms(
sim,
print_option="ALL",
csv_outer_output_filerecord=csv0,
csv_inner_output_filerecord=csv1,
outer_dvclose=hclose,
outer_maximum=nouter,
under_relaxation="NONE",
inner_maximum=ninner,
inner_dvclose=hclose,
rcloserecord=rclose,
linear_acceleration=imsla,
scaling_method="NONE",
reordering_method="NONE",
relaxation_factor=relax,
)
# create gwf model
gwf = flopy.mf6.ModflowGwf(
sim,
modelname=name,
newtonoptions=newtonoptions,
save_flows=True,
print_flows=True,
)
dis = flopy.mf6.ModflowGwfdis(
gwf,
nlay=nlay,
nrow=nrow,
ncol=ncol,
delr=delr,
delc=delc,
top=top,
botm=botm,
)
# initial conditions
ic = flopy.mf6.ModflowGwfic(gwf, strt=strt)
# node property flow
npf = flopy.mf6.ModflowGwfnpf(gwf, icelltype=0, k=hk)
# chd files
# chd data
spd = [
[(0, 0, 0), 1.0],
[(0, nrow - 1, ncol - 1), 0.0],
]
chd = flopy.mf6.modflow.ModflowGwfchd(
gwf, stress_period_data=spd, pname="chd-1"
)
# drn file
drn6 = [
[(0, 1, 2), -1.0, 1.0],
[(0, 2, 3), -1.0, 1.0],
]
drn = flopy.mf6.modflow.ModflowGwfdrn(
gwf, mover=True, stress_period_data=drn6, pname="drn-1"
)
# sfr file
packagedata = [
[
0,
(1 - 1, 4 - 1, 1 - 1),
3.628e001,
1.0,
1.0e-003,
0.0,
1.0,
1.0e-4,
1.0e-1,
2,
0.0,
1,
temp,
conc,
],
[
1,
(1 - 1, 4 - 1, 2 - 1),
1.061e002,
1.0,
1.0e-003,
0.0,
1.0,
1.0e-4,
1.0e-1,
3,
1.0,
1,
temp,
conc,
],
[
2,
(1 - 1, 4 - 1, 3 - 1),
6.333e001,
1.0,
1.0e-003,
0.0,
1.0,
1.0e-4,
1.0e-1,
4,
1.0,
2,
temp,
conc,
],
[
3,
(1 - 1, 5 - 1, 3 - 1),
4.279e001,
1.0,
1.0e-003,
0.0,
1.0,
1.0e-4,
1.0e-1,
3,
1.0,
1,
temp,
conc,
],
[
4,
(1 - 1, 5 - 1, 4 - 1),
6.532e001,
1.0,
1.0e-003,
0.0,
1.0,
1.0e-4,
1.0e-1,
1,
1.0,
0,
temp,
conc,
],
[
5,
(1 - 1, 4 - 1, 1 - 1),
10.0,
1.0,
1.0e-003,
0.0,
1.0,
0.0,
1.0e-1,
1,
0.0,
0,
temp,
conc,
],
[
6,
(1 - 1, 4 - 1, 2 - 1),
10.0,
1.0,
1.0e-003,
0.0,
1.0,
0.0,
1.0e-1,
1,
0.0,
0,
temp,
conc,
],
[
7,
(1 - 1, 4 - 1, 3 - 1),
10.0,
1.0,
1.0e-003,
0.0,
1.0,
0.0,
1.0e-1,
1,
0.0,
0,
temp,
conc,
],
[
8,
(1 - 1, 4 - 1, 3 - 1),
10.0,
1.0,
1.0e-003,
0.0,
1.0,
0.0,
1.0e-1,
1,
0.0,
0,
temp,
conc,
],
[
9,
(1 - 1, 5 - 1, 4 - 1),
10.0,
1.0,
1.0e-003,
0.0,
1.0,
0.0,
1.0e-1,
1,
0.0,
0,
temp,
conc,
],
]
connectiondata = [
[0, -1, -5],
[1, 0, -2, -6],
[2, -3, -7, -8, 1],
[3, -4, -9, 2],
[4, 3],
[5, 0],
[6, 1],
[7, 2],
[8, 2],
[9, 3],
]
cprior1 = "upto"
cprior2 = "fraction"
divdata = [
[0, 0, 5, cprior1],
[1, 0, 6, cprior1],
[2, 1, 7, cprior1],
[2, 0, 8, cprior1],
[3, 0, 9, cprior2],
]
inflow, divflow, divflow2, upstream_fraction = 1.0, 0.05, 0.04, 0.0
divflow3 = 2.0 # A bad specification of factor
ts_names = ["inflow", "divflow", "ustrf"] + auxnames
perioddata = [
[0, "status", "active"],
[1, "status", "active"],
[2, "status", "active"],
[3, "status", "active"],
[4, "status", "active"],
[0, "diversion", 0, divflow],
[1, "diversion", 0, divflow],
[2, "diversion", 0, divflow2],
[3, "diversion", 0, divflow3],
]
if timeseries:
perioddata.append([0, "inflow", "inflow"])
perioddata.append([2, "diversion", 1, "divflow"])
perioddata.append([0, "AUXILIARY", "conc", "conc"])
perioddata.append([2, "AUXILIARY", "temp", "temp"])
perioddata.append([5, "upstream_fraction", "ustrf"])
perioddata.append([7, "upstream_fraction", "ustrf"])
perioddata.append([9, "upstream_fraction", "ustrf"])
ts_methods = ["linearend"] * len(ts_names)
ts_data = []
for t in ts_times:
ts_data.append((t, inflow, divflow, upstream_fraction, temp, conc))
else:
perioddata.append([0, "inflow", inflow])
perioddata.append([2, "diversion", 1, divflow])
budpth = "{}.{}.cbc".format(name, paktest)
cnvgpth = "{}.sfr.cnvg.csv".format(name)
sfr = flopy.mf6.ModflowGwfsfr(
gwf,
print_stage=True,
maximum_picard_iterations=1,
auxiliary=auxnames,
print_input=True,
budget_filerecord=budpth,
mover=True,
nreaches=len(packagedata),
maximum_depth_change=1.0e-5,
package_convergence_filerecord=cnvgpth,
packagedata=packagedata,
connectiondata=connectiondata,
diversions=divdata,
perioddata=perioddata,
pname="sfr-1",
)
if timeseries:
fname = "{}.sfr.ts".format(name)
sfr.ts.initialize(
filename=fname,
timeseries=ts_data,
time_series_namerecord=ts_names,
interpolation_methodrecord=ts_methods,
)
packagedata = [
[0, 1.0, -20.0, 0.0, "SPECIFIED", 2],
]
nmawwells = len(packagedata)
connectiondata = [
[1 - 1, 1 - 1, (1 - 1, 5 - 1, 8 - 1), 0.0, -20, 1.0, 1.1],
[1 - 1, 2 - 1, (2 - 1, 5 - 1, 8 - 1), 0.0, -20, 1.0, 1.1],
]
perioddata = [[0, "FLOWING_WELL", 0.0, 0.0, 0.0], [0, "RATE", 1.0e-3]]
maw = flopy.mf6.ModflowGwfmaw(
gwf,
print_head=True,
mover=True,
nmawwells=nmawwells,
packagedata=packagedata,
connectiondata=connectiondata,
perioddata=perioddata,
pname="maw-1",
)
packagedata = [(0, 1.0, 11), (1, 0.5, 11)]
outlets = [(0, 0, 1, "manning", 0.001, 0.0, 0.1, 0.001)]
nlakes = len(packagedata)
noutlets = len(outlets)
connectiondata = [
(0, 0, (0, 0, 5), "horizontal", 1.0e-05, -5.0, 0.0, 100.0, 100.0),
(0, 1, (0, 1, 4), "horizontal", 1.0e-05, -5.0, 0.0, 100.0, 100.0),
(0, 2, (1, 1, 5), "vertical", 1.0e-05, -5.0, 0.0, 1.0, 0.0),
(0, 3, (0, 2, 4), "horizontal", 1.0e-05, -5.0, 0.0, 100.0, 100.0),
(0, 4, (0, 3, 5), "horizontal", 1.0e-05, -5.0, 0.0, 100.0, 100.0),
(0, 5, (0, 2, 6), "horizontal", 1.0e-05, -5.0, 0.0, 100.0, 100.0),
(0, 6, (1, 2, 5), "vertical", 1.0e-05, -5.0, 0.0, 1.0, 0.0),
(0, 7, (0, 0, 6), "horizontal", 1.0e-05, -5.0, 0.0, 100.0, 100.0),
(0, 8, (0, 2, 6), "horizontal", 1.0e-05, -5.0, 0.0, 100.0, 100.0),
(0, 9, (0, 1, 7), "horizontal", 1.0e-05, -5.0, 0.0, 100.0, 100.0),
(0, 10, (1, 1, 6), "vertical", 1.0e-05, -5.0, 0.0, 1.0, 0.0),
(1, 0, (0, 0, 8), "horizontal", 1.0e-05, -1.0, 0.0, 100.0, 100.0),
(1, 1, (0, 1, 7), "horizontal", 1.0e-05, -1.0, 0.0, 100.0, 100.0),
(1, 2, (0, 1, 9), "horizontal", 1.0e-05, -1.0, 0.0, 100.0, 100.0),
(1, 3, (1, 1, 8), "vertical", 1.0e-05, -1.0, 0.0, 0.0, 0.0),
(1, 4, (0, 2, 7), "horizontal", 1.0e-05, -1.0, 0.0, 100.0, 100.0),
(1, 5, (0, 2, 9), "horizontal", 1.0e-05, -1.0, 0.0, 100.0, 100.0),
(1, 6, (1, 2, 8), "vertical", 1.0e-05, -1.0, 0.0, 0.0, 0.0),
(1, 7, (0, 3, 7), "horizontal", 1.0e-05, -1.0, 0.0, 100.0, 100.0),
(1, 8, (0, 4, 8), "horizontal", 1.0e-05, -1.0, 0.0, 100.0, 100.0),
(1, 9, (0, 3, 9), "horizontal", 1.0e-05, -1.0, 0.0, 100.0, 100.0),
(1, 10, (1, 3, 8), "vertical", 1.0e-05, -1.0, 0.0, 0.0, 0.0),
]
perioddata = [
(1, "status", "active"),
(1, "rainfall", "0.0"),
(1, "evaporation", "0.000000000000e+000"),
(1, "runoff", "0.000000000000e+000"),
(1, "withdrawal", "0.000000000000e+000"),
(0, "rate", "1.000000000000e+000"),
(0, "invert", "1.000000000000e-003"),
(0, "width", "0.000000000000e+000"),
(0, "slope", "1.000000000000e-003"),
(0, "rough", "1.000000000000e-001"),
]
cnvgpth = "{}.lak.cnvg.csv".format(name)
lak = flopy.mf6.ModflowGwflak(
gwf,
mover=True,
nlakes=nlakes,
noutlets=noutlets,
print_stage=True,
print_flows=True,
package_convergence_filerecord=cnvgpth,
packagedata=packagedata,
connectiondata=connectiondata,
outlets=outlets,
perioddata=perioddata,
pname="lak-1",
)
packagedata = [
(0, (0, 5, 1), 1, -1, 1.0, 1.0e-05, 0.2, 0.4, 0.3, 3.5),
(1, (0, 5, 2), 1, -1, 1.0, 1.0e-05, 0.2, 0.4, 0.3, 3.5),
(2, (0, 5, 3), 1, -1, 1.0, 1.0e-05, 0.2, 0.4, 0.3, 3.5),
(3, (0, 6, 1), 1, -1, 1.0, 1.0e-05, 0.2, 0.4, 0.3, 3.5),
(4, (0, 6, 2), 1, -1, 1.0, 1.0e-05, 0.2, 0.4, 0.3, 3.5),
(5, (0, 6, 3), 1, -1, 1.0, 1.0e-05, 0.2, 0.4, 0.3, 3.5),
(6, (0, 7, 1), 1, -1, 1.0, 1.0e-05, 0.2, 0.4, 0.3, 3.5),
(7, (0, 7, 2), 1, -1, 1.0, 1.0e-05, 0.2, 0.4, 0.3, 3.5),
(8, (0, 7, 3), 1, -1, 1.0, 1.0e-05, 0.2, 0.4, 0.3, 3.5),
]
perioddata = [
[0, 1.0e-8, 0, 0, 0, 0, 0, 0],
[1, 1.0e-8, 0, 0, 0, 0, 0, 0],
[2, 1.0e-8, 0, 0, 0, 0, 0, 0],
[3, 1.0e-8, 0, 0, 0, 0, 0, 0],
[4, 1.0e-8, 0, 0, 0, 0, 0, 0],
[5, 1.0e-8, 0, 0, 0, 0, 0, 0],
[6, 1.0e-8, 0, 0, 0, 0, 0, 0],
[7, 1.0e-8, 0, 0, 0, 0, 0, 0],
[8, 1.0e-8, 0, 0, 0, 0, 0, 0],
]
cnvgpth = "{}.uzf.cnvg.csv".format(name)
uzf = flopy.mf6.ModflowGwfuzf(
gwf,
mover=True,
package_convergence_filerecord=cnvgpth,
nuzfcells=len(packagedata),
ntrailwaves=7,
nwavesets=40,
packagedata=packagedata,
perioddata=perioddata,
pname="uzf-1",
)
packages = [("drn-1",), ("lak-1",), ("maw-1",), ("sfr-1",), ("uzf-1",)]
perioddata = [
("drn-1", 0, "lak-1", 1, "excess", 1.0),
("drn-1", 0, "maw-1", 0, "threshold", 2.0),
("drn-1", 0, "sfr-1", 2, "upto", 3.0),
("drn-1", 1, "lak-1", 1, "excess", 1.0),
("drn-1", 1, "maw-1", 0, "threshold", 2.0),
("drn-1", 1, "sfr-1", 2, "upto", 3.0),
("lak-1", 0, "sfr-1", 0, "factor", 1.0),
("uzf-1", 0, "sfr-1", 0, "factor", 1.0),
("uzf-1", 1, "sfr-1", 0, "factor", 1.0),
("uzf-1", 2, "sfr-1", 0, "factor", 1.0),
("uzf-1", 3, "sfr-1", 0, "factor", 1.0),
("uzf-1", 4, "sfr-1", 0, "factor", 1.0),
("uzf-1", 5, "sfr-1", 0, "factor", 1.0),
("uzf-1", 6, "sfr-1", 0, "factor", 1.0),
("uzf-1", 7, "sfr-1", 0, "factor", 1.0),
("uzf-1", 8, "sfr-1", 0, "factor", 1.0),
("sfr-1", 2, "sfr-1", 3, "factor", 0.5),
("sfr-1", 6, "sfr-1", 4, "factor", 0.5),
("sfr-1", 8, "sfr-1", 4, "factor", 0.5),
]
mvr = flopy.mf6.ModflowGwfmvr(
gwf,
maxmvr=len(perioddata),
budget_filerecord="{}.mvr.bud".format(name),
maxpackages=len(packages),
print_flows=True,
packages=packages,
perioddata=perioddata,
)
# output control
oc = flopy.mf6.ModflowGwfoc(
gwf,
budget_filerecord="{}.cbc".format(name),
head_filerecord="{}.hds".format(name),
saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")],
printrecord=[("BUDGET", "LAST"), ("HEAD", "LAST")],
)
return sim
# - No need to change any code below
def test_mf6model():
# build and run the test model
sim = build_model()
sim.write_simulation()
sim.run_simulation()
# ensure that the error msg is contained in the mfsim.lst file
f = open(os.path.join(testdir, "mfsim.lst"), "r")
lines = f.readlines()
error_count = 0
expected_msg = False
for line in lines:
if "cprior" and "divflow not within" in line:
expected_msg = True
error_count += 1
assert error_count == 1, (
"error count = " + str(error_count) + "but should equal 1"
)
print("Finished running surfdep check")
return
def main():
# build and run the test model
sim = build_model()
sim.write_simulation()
sim.run_simulation()
# ensure that the error msg is contained in the mfsim.lst file
f = open(os.path.join(testdir, "mfsim.lst"), "r")
lines = f.readlines()
error_count = 0
expected_msg = False
for line in lines:
if "cprior" and "divflow not within" in line:
expected_msg = True
error_count += 1
assert error_count == 1, (
"error count = " + str(error_count) + "but should equal 1"
)
print("Finished running surfdep check")
return
if __name__ == "__main__":
# print message
print("standalone run of {}".format(os.path.basename(__file__)))
# run main routine
main()
| 27.524618 | 79 | 0.449914 |
79475227dfe181c428665f6d5e23a6091307ed94 | 2,176 | py | Python | NEW GUI.py | Hassan0072/CP_HS99 | 365ecd119e19fcf2c69ccaef42b278a1a9256741 | [
"MIT"
] | null | null | null | NEW GUI.py | Hassan0072/CP_HS99 | 365ecd119e19fcf2c69ccaef42b278a1a9256741 | [
"MIT"
] | 3 | 2019-05-05T16:30:58.000Z | 2019-05-11T00:23:02.000Z | NEW GUI.py | Hassan0072/CP_HS99 | 365ecd119e19fcf2c69ccaef42b278a1a9256741 | [
"MIT"
] | 1 | 2019-04-21T18:46:01.000Z | 2019-04-21T18:46:01.000Z | from tkinter import *
from tkinter import messagebox
from socket import *
import _thread
root = Tk()
root.title("Remote File Sharing System")
root.geometry("1030x600")
root.minsize(width=500 , height=500)
topFrame = Frame(root)
bottomFrame = Frame(root)
theLabel = Label(root, text="Remote File Sharing System (User Interface)",bg="black",fg="white",font="Times 22",width=60)
theLabel.grid(row=0, column=0, padx=8, pady=8, sticky="NSNESWSE")
topFrame.grid(row=50,column=50)
bottomFrame.grid(row=10,column=10)
root.configure(width=600,height=800)
import socket
s= socket.socket()
host = socket.gethostname()
port= 8080
s.bind((host,port))
s.listen(1)
label = Label(root, text="Server Software ", font=",16",bg="blue",fg="White",width=10)
label.grid(row=1, column=0, padx=8, pady=8, sticky="NSNESWSE")
l_host=Label(root,text="Enter Host NAME")
l_host.grid(row=2, column=0, padx=3, pady=2, sticky="NSNESWSE")
e_host=Entry(root)
e_host.place(x=550,y=100)
e_host.insert(END,'127.0.0.1')
l_port=Label(root,text="Enter Port")
l_port.grid(row=3, column=0, padx=8, pady=8, sticky="NSNESWSE")
e_port=Entry(root)
e_port.place(x=550,y=125)
e_port.insert(END,12121)
message_label=Label(root,text="Client Message",font="Arial,12",width=100)
message_label.grid(row=4 , column=0, padx=8, pady=8,sticky="NSEW")
scrollbar_y = Scrollbar(root)
scrollbar_y.grid(row=5, column=3,rowspan=6)
show_1=Text(root,height=8, width=100, yscrollcommand=scrollbar_y.set,
bg="Grey",fg="White")
show_1.grid(row=5, column=0,rowspan=3,columnspan=2,sticky="NSEW")
b_connect=Button(root,text=" Connect")
b_connect.grid(row=14,column=0,padx=10,pady=10,sticky="nsew")
b_disconnect=Button(root,text=" disconnect")
b_disconnect.grid(row=14,column=1,padx=10,pady=10,sticky="nsew")
def connect():
# CONNECT COM PORT
print(e_host.get())
e_host_v=e_host.get()
e_port_v=int(e_port.get())
_thread.start_new_thread(my_server,(show_1,e_host_v,e_port_v))
#start_new_thread(my_server,(show_1,e_host_v,e_port_v))
global secs
secs = 0
#runner() # start repeated checking
root.mainloop() | 32 | 121 | 0.702206 |
79475248015ea6699066c9acefca9174448c1bb2 | 400 | py | Python | sols_python/1259.py | souzajackson/Beecrowd | c7323e51cd5132c523a1812be5ad5de1a152a63f | [
"MIT"
] | null | null | null | sols_python/1259.py | souzajackson/Beecrowd | c7323e51cd5132c523a1812be5ad5de1a152a63f | [
"MIT"
] | null | null | null | sols_python/1259.py | souzajackson/Beecrowd | c7323e51cd5132c523a1812be5ad5de1a152a63f | [
"MIT"
] | null | null | null | entrada1 = int(input())
valores = list()
pares = list()
impares = list()
for c in range(entrada1):
entrada = int(input())
valores.append(entrada)
for elemento in valores:
if elemento % 2 == 0:
pares.append(elemento)
else:
impares.append(elemento)
pares.sort()
impares.sort(reverse = True)
for numero in pares:
print(numero)
for numero in impares:
print(numero) | 22.222222 | 32 | 0.66 |
794752801622e84270802dd59d03a6cd2eeb9c3c | 393 | py | Python | setup.py | depaolim/pysrvany | cdb1e6786303192156584545149bb5a8226973c3 | [
"MIT"
] | null | null | null | setup.py | depaolim/pysrvany | cdb1e6786303192156584545149bb5a8226973c3 | [
"MIT"
] | null | null | null | setup.py | depaolim/pysrvany | cdb1e6786303192156584545149bb5a8226973c3 | [
"MIT"
] | null | null | null | from setuptools import setup
setup(
name='pysrvany',
version='0.0.1',
description='Run Windows Executables as Services',
license="MIT",
author='Marco De Paoli',
author_email='[email protected]',
url="https://github.com/depaolim/pysrvany",
packages=['pysrvany'],
install_requires=[], # external packages as dependencies
scripts=['pysrvany_cli.py']
)
| 24.5625 | 61 | 0.676845 |
794752ad248ac1e49d852f894df16c035754b838 | 294 | py | Python | towers/monkey_village.py | 56kyle/bloons_auto | 419d55b51d1cddc49099593970adf1c67985b389 | [
"MIT"
] | null | null | null | towers/monkey_village.py | 56kyle/bloons_auto | 419d55b51d1cddc49099593970adf1c67985b389 | [
"MIT"
] | null | null | null | towers/monkey_village.py | 56kyle/bloons_auto | 419d55b51d1cddc49099593970adf1c67985b389 | [
"MIT"
] | null | null | null | from tower import Tower
from config import keybinds
class MonkeyVillage(Tower):
name = 'monkey_village'
range = 215
width = 119
height = 103
size = 'xl'
keybind = keybinds[name]
aquatic = False
def __init__(self, **kwargs):
super().__init__(**kwargs)
| 18.375 | 34 | 0.636054 |
794753a918f4eca3894e9ae52a9b382999b3b433 | 101,596 | py | Python | functions_NoiseStudy.py | silviaelisabeth/Noise-vs-Resolution | cc317f52a15263e4abb5cd94c8c524e18a6dfa85 | [
"Apache-2.0"
] | 1 | 2021-07-07T08:24:21.000Z | 2021-07-07T08:24:21.000Z | functions_NoiseStudy.py | silviaelisabeth/Noise-vs-Resolution | cc317f52a15263e4abb5cd94c8c524e18a6dfa85 | [
"Apache-2.0"
] | null | null | null | functions_NoiseStudy.py | silviaelisabeth/Noise-vs-Resolution | cc317f52a15263e4abb5cd94c8c524e18a6dfa85 | [
"Apache-2.0"
] | null | null | null | __author__ = 'Silvia E Zieger'
__project__ = 'noise vs resolution'
"""Copyright 2020. All rights reserved.
This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable
for any damages arising from the use of this software.
Permission is granted to anyone to use this software within the scope of evaluating mutli-analyte sensing. No permission
is granted to use the software for commercial applications, and alter it or redistribute it.
This notice may not be removed or altered from any distribution.
"""
import matplotlib
import matplotlib.pylab as plt
from matplotlib.path import Path
import matplotlib.patches as patches
from mpl_toolkits.mplot3d import Axes3D
from shapely.geometry import LineString
import cv2
import math
import multiprocessing as mp
import seaborn as sns
import pandas as pd
import numpy as np
import random
from lmfit import Model
import scipy.signal
from scipy.interpolate import interp1d
from scipy.signal import savgol_filter
from mcerp import *
from uncertainties import *
from uncertainties import unumpy
import h5py
import os
from glob import glob
from PIL import Image
import datetime
# global variables
sns.set(style="darkgrid")
sns.set_context('paper')
col = ['#14274e', '#f6830f', '#bb2205']
mark = ['o', 'd']
fs = 13
depth_lim = dict({'optode1': (-5, 4), 'optode2': (-5, 4)})
# =====================================================================================
def prep_plotting_avSD(error, dfoptode, uncer_op1, uncer_op2):
if error == 'SD' or error == 'sd':
df_error = [pd.DataFrame([[i.s for i in dfoptode[en][se]] for se in dfoptode[en].columns],
columns=dfoptode[en].index, index=dfoptode[en].columns).T
for en in range(len(dfoptode))]
else:
df_error = [uncer_op1['sem'], uncer_op2['sem']]
return df_error
def prepPlot_optodeSet(o, s, error, dfoptode, uncer_op1, uncer_op2):
if '1' in o:
dfop, optode_sem, interpol = dfoptode[0], uncer_op1['sem'], uncer_op1['SD_interpol'][s]
optode_sd = pd.DataFrame([[i.s for i in dfop[se]] for se in dfop.columns], index=dfop.columns,
columns=dfop.index).T
else:
dfop, optode_sem, interpol = dfoptode[1], uncer_op2['sem'], uncer_op2['SD_interpol'][s]
optode_sd = pd.DataFrame([[i.s for i in dfop[se]] for se in dfop.columns], index=dfop.columns,
columns=dfop.index).T
if error == 'SD':
dferr = optode_sd
else:
dferr = optode_sem
return dfop, interpol, dferr
def prepPlot_SVerrprop(error, dop1_value, dop2_value, op1_normSEM, op2_normSEM):
if error == 'SD' or error == 'sd':
derror1 = dict(map(lambda s: (s, dop1_value[s][['O2 SD', 'iratio SD']]), dop1_value.keys()))
derror2 = dict(map(lambda s: (s, dop2_value[s][['O2 SD', 'iratio SD']]), dop2_value.keys()))
else:
derror1, derror2 = op1_normSEM, op2_normSEM
for s in derror1.keys():
derror1[s].columns, derror2[s].columns = ['O2', 'iratio'], ['O2', 'iratio']
derror = [derror1, derror2]
return derror
def prepPlot_SVerrprop_ex(o, s, error, dop1_value=None, dop1_param=None, op1_normSEM=None, f1inter_mc=None,
dop2_value=None, dop2_param=None, op2_normSEM=None, f2inter_mc=None):
if '1' in o:
ls_df = [dop1_value, dop1_param, op1_normSEM, f1inter_mc]
if any(i == None for i in ls_df):
raise ValueError('To plot the example, provide all relevant data! Please check dop_value, dop_param, '
', op_normSEM, and finter_mc')
dfop, dop_para, df_SEM, finter_mc = dop1_value[s], dop1_param[s], op1_normSEM[s], f1inter_mc[s]
else:
ls_df = [dop2_value, dop2_param, op2_normSEM, f2inter_mc]
if any(i == None for i in ls_df):
raise ValueError('To plot the example, provide all relevant data! Please check dop_value, dop_param, '
', op_normSEM, and finter_mc')
dfop, dop_para, df_SEM, finter_mc = dop2_value[s], dop2_param[s], op2_normSEM[s], f2inter_mc[s]
if error == 'SD' or error == 'sd':
dferr = dfop[['O2 SD', 'iratio SD']]
else:
dferr = pd.concat([df_SEM['O2'], pd.DataFrame([i.s for i in df_SEM['iratio']], index=df_SEM.index)], axis=1)
dferr.columns = ['O2', 'iratio']
return dfop, dop_para, df_SEM, dferr, finter_mc
def prepMS_plot(index_lp, dic_micro, offset):
# microsensor preparation
df_micro = dic_micro['run1'].set_index('Intensity (mV)')
df_micro['Depth (mm)'] = df_micro['Depth (µm)'] / 1000 # depth in mm
# microsensor extension to same depth as selected for the optode
df_ms = pd.DataFrame([df_micro['Depth (mm)'].index, df_micro['Depth (mm)']], index=['Intensity', 'Depth (mm)']).T
xnew = np.linspace(1, len(df_ms.index), num=int(len(df_ms.index)))
df_ms.index = xnew
df_ms.loc[0, :] = [df_ms['Intensity'].loc[:3].to_numpy().mean(), index_lp[0] * 1.05]
df_ms = df_ms.sort_index()
df_ms.loc[xnew[-1] + 1, :] = [df_ms['Intensity'].loc[df_ms.shape[0] - 3:].to_numpy().mean(), index_lp[-1] * 1.05]
df_ms = df_ms.sort_index()
df_ms['Depth (mm)'] = [i - offset for i in df_ms['Depth (mm)']]
return df_ms
def sgolay2d(z, window_size, order, derivative=None):
# number of terms in the polynomial expression
n_terms = (order + 1) * (order + 2) / 2.0
if window_size % 2 == 0:
raise ValueError('window_size must be odd')
if window_size**2 < n_terms:
raise ValueError('order is too high for the window size')
half_size = window_size // 2
# exponents of the polynomial: p(x,y) = a0 + a1*x + a2*y + a3*x^2 + a4*y^2 + a5*x*y + ...
# this line gives a list of two item tuple. Each tuple contains the exponents of the k-th term. First element of
# tuple is for x second element for y.
exps = [(k-n, n) for k in range(order+1) for n in range(k+1)]
# coordinates of points
ind = np.arange(-half_size, half_size+1, dtype=np.float64)
dx = np.repeat(ind, window_size)
dy = np.tile(ind, [window_size, 1]).reshape(window_size**2, )
# build matrix of system of equation
A = np.empty((window_size**2, len(exps)))
for i, exp in enumerate(exps):
A[:, i] = (dx**exp[0]) * (dy**exp[1])
# pad input array with appropriate values at the four borders
new_shape = z.shape[0] + 2*half_size, z.shape[1] + 2*half_size
Z = np.zeros((new_shape))
# top band
band = z[0, :]
Z[:half_size, half_size:-half_size] = band - np.abs( np.flipud(z[1:half_size+1, :]) - band)
# bottom band
band = z[-1, :]
Z[-half_size:, half_size:-half_size] = band + np.abs(np.flipud(z[-half_size-1:-1, :]) -band)
# left band
band = np.tile(z[:, 0].reshape(-1, 1), [1, half_size])
Z[half_size:-half_size, :half_size] = band - np.abs(np.fliplr(z[:, 1:half_size+1]) - band)
# right band
band = np.tile(z[:, -1].reshape(-1, 1), [1, half_size])
Z[half_size:-half_size, -half_size:] = band + np.abs(np.fliplr(z[:, -half_size-1:-1]) - band)
# central band
Z[half_size:-half_size, half_size:-half_size] = z
# top left corner
band = z[0, 0]
Z[:half_size, :half_size] = band - np.abs(np.flipud(np.fliplr(z[1:half_size+1, 1:half_size+1])) - band)
# bottom right corner
band = z[-1, -1]
Z[-half_size:, -half_size:] = band + np.abs(np.flipud(np.fliplr(z[-half_size-1:-1, -half_size-1:-1])) - band)
# top right corner
band = Z[half_size, -half_size:]
Z[:half_size, -half_size:] = band - np.abs(np.flipud(Z[half_size+1:2*half_size+1, -half_size:]) - band)
# bottom left corner
band = Z[-half_size:, half_size].reshape(-1, 1)
Z[-half_size:, :half_size] = band - np.abs(np.fliplr(Z[-half_size:, half_size+1:2*half_size+1]) - band)
# solve system and convolve
if derivative == None:
m = np.linalg.pinv(A)[0].reshape((window_size, -1))
return scipy.signal.fftconvolve(Z, m, mode='valid')
elif derivative == 'col':
c = np.linalg.pinv(A)[1].reshape((window_size, -1))
return scipy.signal.fftconvolve(Z, -c, mode='valid')
elif derivative == 'row':
r = np.linalg.pinv(A)[2].reshape((window_size, -1))
return scipy.signal.fftconvolve(Z, -r, mode='valid')
elif derivative == 'both':
c = np.linalg.pinv(A)[1].reshape((window_size, -1))
r = np.linalg.pinv(A)[2].reshape((window_size, -1))
return scipy.signal.fftconvolve(Z, -r, mode='valid'), scipy.signal.fftconvolve(Z, -c, mode='valid')
# ------------------------------------------------------------------------
def plot_optode_avSD_v1(conc, dfoptode, error, col, mark, fs, RoI_op):
fig2, ax2 = plt.subplots(figsize=(8, 8), nrows=3, ncols=len(RoI_op), sharex=True, sharey=True, frameon=False)
if len(RoI_op) == 1:
ax2[0].set_title('Optode with different settings', fontsize=fs * 0.9)
else:
for n in range(len(RoI_op)):
ax2[0][n].set_title('Optode-' + str(n+1), fontsize=fs*0.9)
# plotting part
ls_handels = list()
if len(RoI_op) == 1:
for en in range(len(dfoptode[0].columns)):
l = ax2[en].errorbar(conc, [i.n for i in dfoptode[0][en]], error[0][en].values, linestyle='None',
marker=mark[0], fillstyle='none', color=col[en], ms=6, capsize=6, label=en)
ls_handels.append(l)
else:
for o in range(len(RoI_op)):
for en, s in enumerate(dfoptode[o].columns):
l = ax2[en][o].errorbar(conc, [i.n for i in dfoptode[o][s]], error[o][s].values, linestyle='None',
marker=mark[0], fillstyle='none', color=col[en], ms=6, capsize=6,
label=s.split('t')[0] + 'tting ' + s.split('t')[-1])
if o == 1:
ls_handels.append(l)
# legend and axis layout / labelling
if len(RoI_op) == 1:
ax2[1].legend(handles=ls_handels, loc="upper left", bbox_to_anchor=[1, 0.9], shadow=True, fancybox=True)
else:
ax2[1][len(RoI_op)-1].legend(handles=ls_handels, loc="upper left", bbox_to_anchor=[1, 0.9], shadow=True,
fancybox=True)
if len(RoI_op) == 1:
ax2[0].tick_params(axis='both', which='both', direction='out', labelsize=fs * 0.8) # top row
ax2[1].tick_params(axis='both', which='both', direction='out', labelsize=fs * 0.8) # middle row
ax2[2].tick_params(axis='both', which='both', direction='out', labelsize=fs * 0.8) # bottom row
else:
for o in range(len(RoI_op)):
ax2[0][o].tick_params(axis='both', which='both', direction='out', labelsize=fs * 0.8) # top row
ax2[1][o].tick_params(axis='both', which='both', direction='out', labelsize=fs * 0.8) # middle row
ax2[2][o].tick_params(axis='both', which='both', direction='out', labelsize=fs * 0.8) # bottom row
# x,y label position
fig2.text(0.5, 0.075, 'O$_2$ concentration [%air]', va='center', ha='center', fontsize=fs * 1.2)
fig2.text(0.025, 0.55, 'Ratio $R/G$', va='center', ha='center', rotation='vertical', fontsize=fs * 1.2)
plt.subplots_adjust(left=0.1, bottom=0.15, right=0.85, top=0.95)
plt.show()
return fig2, ax2
def plot_optode_set(o, s, conc, xinter, dfop, interpol, optode_sem, fs=11):
fig2, ax2 = plt.subplots(figsize=(5, 3), frameon=False)
ax2.set_title(o, fontsize=fs*0.9)
# plotting part
ax2.errorbar(conc, [i.n for i in dfop[s]], optode_sem[s].values, linestyle='None', marker=mark[int(s[-1])-1],
fillstyle='none', color=col[int(s[-1])-1], ms=6, capsize=5, label=s)
ax2.fill_between(x=xinter, y1=interpol[0](xinter), y2=interpol[1](xinter), color=col[int(s[-1])-1], alpha=0.2, lw=0)
# legend and axis layout / labelling
ax2.legend(loc="upper left", bbox_to_anchor=[1, 0.9], shadow=True, fancybox=True)
ax2.tick_params(axis='both', which='both', direction='out', labelsize=fs*0.8)
# x,y label position
ax2.set_xlabel('O$_2$ concentration [%air]', va='center', ha='center', fontsize=fs*0.9)
ax2.set_ylabel('Ratio $R/G$', va='center', ha='center', rotation='vertical', fontsize=fs*0.9)
plt.tight_layout()
plt.show()
return fig2, ax2
def plot_SVerrorprop(dop1_value, dop1_param, derror, f1inter_mc, RoI1_av, RoI2_av=None, dop2_value=None,
dop2_param=None, f2inter_mc=None, fs=11.):
n = 1
if RoI2_av:
n += 1
ls = [dop2_value, dop2_param, f2inter_mc]
if any([i == None for i in ls]):
raise ValueError('To plot both optodes, all data are required! Please check dop_value, dop_param, '
'and finter_mc')
# -----------------------------------------------------------------------------------------
fig2, ax2 = plt.subplots(figsize=(8, 8), nrows=3, ncols=n, sharex=True, sharey=True, frameon=False)
if n == 1:
ax2[0].set_title('Optode 1', fontsize=fs*0.9)
else:
ax2[0][0].set_title('Optode 1', fontsize=fs*0.9), ax2[0][1].set_title('Optode 2', fontsize=fs*0.9)
num = int(100/0.5 + 1)
xnew = np.linspace(0, 100, num=num)
ls_handels = list()
if RoI1_av:
for en, s in enumerate(dop1_value.keys()):
name = s.split('t')[0] + 'tting ' + s.split('t')[-1]
O2new = np.linspace(dop1_value[s]['O2 mean'].loc[0], dop1_value[s]['O2 mean'].loc[100], num=num)
ynew = _simplifiedSV(xnew, k=dop1_param[s]['k'].mean, f=dop1_param[s]['f'].mean)
ydata = f1inter_mc[s]
# dashed line for bestFit
if n == 1:
ax2[en].plot(xnew, ynew, ls='-.', lw=1., color=col[en], label='bestFit')
l = ax2[en].errorbar(dop1_value[s]['O2 mean'], dop1_value[s]['iratio mean'].values, capsize=6, ms=6,
xerr=derror[0][s]['O2'].values, linestyle='None', marker=mark[0], color=col[en],
yerr=derror[0][s]['iratio'].values, fillstyle='none', label=name)
ax2[en].fill_between(x=O2new, y1=ydata[0](O2new), y2=ydata[1](O2new), color=col[en], alpha=0.2, lw=0)
ls_handels.append(l)
else:
ax2[en][0].plot(xnew, ynew, ls='-.', lw=1., color=col[en], label='bestFit')
l = ax2[en][0].errorbar(dop1_value[s]['O2 mean'], dop1_value[s]['iratio mean'].values, capsize=6, ms=6,
xerr=derror[0][s]['O2'].values, linestyle='None', marker=mark[0], color=col[en],
fillstyle='none', label=name)
ax2[en][0].fill_between(x=O2new, y1=ydata[0](O2new), y2=ydata[1](O2new), color=col[en], lw=0, alpha=0.2)
ls_handels.append(l)
ls_handels = list()
if RoI2_av:
for en, s in enumerate(dop2_value.keys()):
name = s.split('t')[0] + 'tting ' + s.split('t')[-1]
O2new = np.linspace(dop2_value[s]['O2 mean'].loc[0], dop2_value[s]['O2 mean'].loc[100], num=num)
ynew = _simplifiedSV(xnew, k=dop2_param[s]['k'].mean, f=dop2_param[s]['f'].mean)
ydata = f2inter_mc[s]
# dashed line for bestFit
if n == 1:
ax2[en].plot(xnew, ynew, ls='-.', lw=1., color=col[en], label='bestFit')
l = ax2[en].errorbar(dop2_value[s]['O2 mean'], dop2_value[s]['iratio mean'].values, capsize=6, ms=6,
xerr=derror[1][s]['O2'].values, linestyle='None', marker=mark[0], color=col[en],
fillstyle='none', label=name)
ax2[en].fill_between(x=O2new, y1=ydata[0](O2new), color=col[en], alpha=0.2, lw=0, y2=ydata[1](O2new))
ls_handels.append(l)
else:
ax2[en][1].plot(xnew, ynew, ls='-.', lw=1., color=col[en], label='bestFit')
l = ax2[en][1].errorbar(dop2_value[s]['O2 mean'], dop2_value[s]['iratio mean'].values, capsize=6, ms=6,
xerr=derror[1][s]['O2'].values, linestyle='None', marker=mark[0], color=col[en],
fillstyle='none', label=name)
ax2[en][1].fill_between(x=O2new, y1=ydata[0](O2new), color=col[en], lw=0, alpha=0.2, y2=ydata[1](O2new))
ls_handels.append(l)
# legend and axis layout / labelling
if n == 1:
ax2[1].legend(handles=ls_handels, loc="upper left", bbox_to_anchor=[1, 0.9], shadow=True, fancybox=True)
else:
ax2[1][1].legend(handles=ls_handels, loc="upper left", bbox_to_anchor=[1, 0.9], shadow=True, fancybox=True)
# x,y label position
fig2.text(0.5, 0.018, 'O$_2$ concentration [%air]', va='center', ha='center', fontsize=fs*1.2)
fig2.text(0.025, 0.55, 'Ratio $R/G$', va='center', ha='center', rotation='vertical', fontsize=fs*1.2)
plt.subplots_adjust(left=0.1, bottom=0.1, right=0.85, top=0.95)
plt.show()
return fig2, ax2
def plot_optode_set_SV(o, s, en, dfop, dop_para, dferr, finter_mc, fs=11):
fig2, ax2 = plt.subplots(figsize=(5, 3), frameon=False)
title = o + ' - ' + s
ax2.set_title(title, loc='left', fontsize=fs * 0.9)
xnew = np.linspace(0, 100, num=int(100 / 0.5 + 1))
O2new = np.linspace(dfop['O2 mean'].loc[0], dfop['O2 mean'].loc[100], num=int(100 / 0.5 + 1))
ynew = _simplifiedSV(xnew, k=dop_para['k'].mean, f=dop_para['f'].mean)
ax2.plot(xnew, ynew, ls='-.', lw=1., color=col[en - 1], label='bestFit')
ax2.errorbar(dfop['O2 mean'], dfop['iratio mean'].values, capsize=6, xerr=dferr['O2'].values,color=col[en - 1],
linestyle='None', marker=mark[0], fillstyle='none', ms=6, label=s)
ax2.fill_between(x=O2new, y1=finter_mc[0](O2new), y2=finter_mc[1](O2new), color=col[en - 1], alpha=0.2, lw=0)
# x,y label position
fig2.text(0.5, 0.04, 'O$_2$ concentration [%air]', va='center', ha='center', fontsize=fs)
fig2.text(0.025, 0.55, 'Ratio $R/G$', va='center', ha='center', rotation='vertical', fontsize=fs)
plt.subplots_adjust(left=0.1, bottom=0.2, right=0.95, top=0.9)
plt.show()
return fig2, ax2
def plot_wholeImage3D(dO2_mean, unit, pad=2):
xx, yy = np.meshgrid(dO2_mean.index.to_numpy(), dO2_mean.columns.to_numpy())
# 3D image of full area
fig = plt.figure(figsize=(10, 8))
ax = fig.gca(projection='3d')
surf = ax.plot_surface(xx, yy, dO2_mean.T.fillna(limit=5, method='ffill'), cmap='magma_r', linewidth=0, vmin=0,
vmax=100, antialiased=False, rstride=5, cstride=10)
cbar = fig.colorbar(surf, aspect=20, shrink=0.8)
ax.view_init(16, 45)
ax.tick_params(axis='x', labelsize=fs*0.9)
ax.tick_params(axis='y', labelsize=fs*0.9)
ax.tick_params(axis='z', labelsize=fs*0.9)
cbar.ax.tick_params(labelsize=fs*0.8)
ax.set_xlabel('Image height [{}]'.format(unit), fontsize=fs, labelpad=pad)
ax.set_ylabel('Image width [{}]'.format(unit), fontsize=fs, labelpad=pad)
ax.set_zlabel('$O_2$ concentration [%air]', fontsize=fs, labelpad=pad)
plt.tight_layout()
plt.draw()
return fig, ax
def plot_optode2D(o, s, px2mm, surface, dO2_av, depth_range, width_range, figsize=(6, 2), unit='mm', fs=11, vmin=None,
vmax=None):
# prepare optode for plotting; baseline correction and cropping the image to the depth and width of interest
df_data = optodePrep2D(o=o, s=s, px2mm=px2mm, baseline=surface, dO2_av=dO2_av, depth_range=depth_range,
width_range=width_range)
# resetting the axis ticks with extent
extent = [df_data.columns[0], df_data.columns[-1], # x-axis, e.g. columns
df_data.index[0], df_data.index[-1]] # y-axis, e.g. index
# plotting
fig, ax = plt.subplots(figsize=figsize)
sur = ax.imshow(df_data, extent=extent, cmap='magma_r', vmin=vmin, vmax=vmax)
if vmin is None:
vmin = int(df_data.min().min())
if vmax is None:
vmax = int(df_data.max().max())
plt.colorbar(sur, shrink=0.75, fraction=0.1, aspect=10, ticks=np.linspace(vmin, vmax, num=5))
ax.set_xlabel('Image width [{}]'.format(unit), fontsize=fs)
ax.set_ylabel('Image height [{}]'.format(unit), fontsize=fs)
plt.tight_layout()
return fig, ax
def plotLP(dO2_lp, df_ms, header_ms, depth, kshape, depth_lp, s, arg, dO2_optode=None):
# additional information
col_ = int(s[-1])-1
# figure creation
fig_lp = plt.figure(figsize=(arg['figsize']), dpi=100)
with plt.style.context('seaborn-darkgrid'):
ax1 = fig_lp.add_subplot(131)
ax2 = fig_lp.add_subplot(132, sharex=ax1, sharey=ax1)
ax3 = fig_lp.add_subplot(133, sharex=ax1, sharey=ax1)
if dO2_optode:
with plt.style.context('classic'):
ax11 = fig_lp.add_axes([0.13, 0.2, 0.2, 0.2])
ax21 = fig_lp.add_axes([0.44, 0.2, 0.2, 0.2])
if len(dO2_lp[kshape]['square'].keys()) != 0:
ax31 = fig_lp.add_axes([0.75, 0.2, 0.2, 0.2])
ax1.set_title('(A) Horizontal blur', fontsize=fs, loc='left')
ax2.set_title('(B) Vertical blur', fontsize=fs, loc='left')
if len(dO2_lp[kshape]['square'].keys()) != 0:
ax3.set_title('(C) Square blur', fontsize=fs, loc='left')
# plot line profile
# horizontal
df_h = dO2_lp[kshape]['horizontal'][arg['lw']].fillna(limit=5, method='ffill').loc[depth_lp[0]: depth_lp[1]]
ax1.plot(df_h['mean'].values, df_h.index, lw=arg['curve lw'], color=col[col_])
ax1.fill_betweenx(df_h.index, df_h['mean'].values - df_h['SD'].values, df_h['mean'].values + df_h['SD'].values,
facecolor=col[col_], alpha=0.25)
# vertical
df_v = dO2_lp[kshape]['vertical'][arg['lw']].fillna(limit=5, method='ffill').loc[depth_lp[0]: depth_lp[1]]
ax2.plot(df_v['mean'].values, df_v.index, lw=arg['curve lw'], color=col[col_])
ax2.fill_betweenx(df_v.index, df_v['mean'].values - df_v['SD'].values, df_v['mean'].values + df_v['SD'].values,
facecolor=col[col_], alpha=0.25)
# squared
if len(dO2_lp[kshape]['square'].keys()) == 0:
ax3.axis('off')
else:
df_s = dO2_lp[kshape]['square'][arg['lw']].fillna(limit=5, method='ffill').loc[depth_lp[0]: depth_lp[1]]
ax3.plot(df_s['mean'].values, df_s.index, lw=arg['curve lw'], color=col[col_])
ax3.fill_betweenx(df_s.index, df_s['mean'].values - df_s['SD'].values, df_s['mean'].values + df_s['SD'].values,
facecolor=col[col_], alpha=0.25)
# ..........................................
# 2D imshow
if dO2_optode:
opt_h = dO2_optode[kshape]['horizontal']
extent = [opt_h.columns[0], opt_h.columns[-1], # x-axis, e.g. columns
opt_h.index[-1], opt_h.index[0]] # y-axis, e.g. index
op1 = ax11.imshow(opt_h, extent=extent, aspect=arg['aspect'], cmap=arg['cmap'], vmin=arg['vmin op'],
vmax=arg['vmax op'])
op2 = ax21.imshow(dO2_optode[kshape]['vertical'], extent=extent, aspect=arg['aspect'], cmap=arg['cmap'],
vmin=arg['vmin op'], vmax=arg['vmax op'])
if len(dO2_lp[kshape]['square'].keys()) != 0:
op3 = ax31.imshow(dO2_optode[kshape]['square'], extent=extent, aspect=arg['aspect'], cmap=arg['cmap'],
vmin=arg['vmin op'], vmax=arg['vmax op'])
# color bar
fig_lp.colorbar(op1, aspect=10, shrink=0.8, ax=ax11)
fig_lp.colorbar(op2, aspect=10, shrink=0.8, ax=ax21)
if len(dO2_lp[kshape]['square'].keys()) != 0:
fig_lp.colorbar(op3, aspect=10, shrink=0.8, ax=ax31)
# ..........................................
# microsensor
ax1.plot(df_ms[header_ms[1]].to_numpy(), depth, lw=arg['curve lw'], color='black', label='microsensor')
ax2.plot(df_ms[header_ms[1]].to_numpy(), depth, lw=arg['curve lw'], color='black', label='microsensor')
if len(dO2_lp[kshape]['square'].keys()) != 0:
ax3.plot(df_ms[header_ms[1]].to_numpy(), depth, lw=arg['curve lw'], color='black', label='microsensor')
# ..........................................
# adjust axes
ax1.set_xlim(arg['vmin'], arg['vmax'])
ax1.set_ylim(df_h.index[-1] * 1.05, df_h.index[0] * 1.05)
ax1.tick_params(labelsize=arg['fontsize']*0.9)
ax2.tick_params(labelsize=arg['fontsize']*0.9)
if len(dO2_lp[kshape]['square'].keys()) != 0:
ax3.tick_params(labelsize=arg['fontsize']*0.9)
if dO2_optode:
ax11.tick_params(labelsize=arg['fontsize']*0.7)
ax21.tick_params(labelsize=arg['fontsize']*0.7)
if len(dO2_lp[kshape]['square'].keys()) != 0:
ax31.tick_params(labelsize=arg['fontsize']*0.7)
ax11.set_xlabel('Width [mm]', fontsize=arg['fontsize']*0.7)
ax11.set_ylabel('Height [mm]', fontsize=arg['fontsize']*0.7)
ax21.set_xlabel('Width [mm]', fontsize=arg['fontsize']*0.7)
ax21.set_ylabel('Height [mm]', fontsize=arg['fontsize']*0.7)
if len(dO2_lp[kshape]['square'].keys()) != 0:
ax31.set_xlabel('Width [mm]', fontsize=arg['fontsize']*0.7)
ax31.set_ylabel('Height [mm]', fontsize=arg['fontsize']*0.7)
fig_lp.text(0.4, 0.02, '$O_2$ concentration [%air]', fontsize=arg['fontsize'])
fig_lp.text(0.01, 0.48, 'Depth [mm]', fontsize=arg['fontsize'], rotation='vertical')
fig_lp.subplots_adjust(bottom=0.12, right=0.95, top=0.95, left=0.05, wspace=0.2, hspace=0.2)
return fig_lp
def plot_penetrationDepth(depth, ls_kernel, arg):
if isinstance(ls_kernel[0], tuple):
kernel_s = [k[1] for k in ls_kernel]
else:
kernel_s = ls_kernel
# .....................
fig, ax = plt.subplots(figsize=(5, 3.5))
for en, c in enumerate(depth.columns):
ax.plot(kernel_s, depth[c], lw=1., ls='-.', marker=arg['marker'][en], ms=7,
color=arg['colors'][en], fillstyle='none', label=c.split('-')[0] + ' blur')
ax.legend(loc=0, frameon=True, fancybox=True, fontsize=fs * 0.8)
ax.tick_params(axis='both', labelsize=fs * 0.8)
ax.set_xlabel('kernel size', fontsize=fs)
ax.set_ylabel('$O_2$ penetration depth [mm]', fontsize=fs)
plt.tight_layout()
return fig
# =====================================================================================
def crop_optode(dratio, RoI1, RoI2):
# optode 1
if RoI1 == None:
optode1 = None
else:
optode1 = dict()
for en, c in enumerate(dratio.keys()):
ls_av = list()
for av in range(len(RoI1)):
height = RoI1[av][1][1] - RoI1[av][0][1]
im_ratio = dratio[c][0][RoI1[av][0][1]:RoI1[av][1][1] + 1]
ls_av.append(np.stack([im_ratio[n][RoI1[av][0][0]:RoI1[av][2][1] + 1] for n in np.arange(height + 1)],
axis=0))
optode1[c] = ls_av
# -------------------------------------------------------------------------
# optode 2
if RoI2 == None:
optode2 = None
else:
optode2 = dict()
for en, c in enumerate(dratio.keys()):
ls_av = list()
for av in range(len(RoI2)):
height2 = RoI2[av][1][1] - RoI2[av][0][1]
im_ratio2 = dratio[c][1][RoI2[av][0][1]:RoI2[av][1][1] + 1]
ls_av.append(np.stack([im_ratio2[n][RoI2[av][0][0]:RoI2[av][2][1] + 1] for n in np.arange(height2 + 1)],
axis=0))
optode2[c] = ls_av
return optode1, optode2
def image_resolution(px, dist_mm, inch=None):
px2mm = px / dist_mm * 1
if inch:
dpi = px / inch
else:
dpi = None
return px2mm, dpi
def px2mm_conversion(df, px2mm, surface):
ind_new = df.index.to_numpy() / px2mm - surface
col_new = df.columns.to_numpy() / px2mm
df.index, df.columns = ind_new, col_new
return df
def round_decimals_up(number, decimals=2):
"""
Returns a value rounded up to a specific number of decimal places.
"""
if not isinstance(decimals, int):
raise TypeError("decimal places must be an integer")
elif decimals < 0:
raise ValueError("decimal places has to be 0 or more")
elif decimals == 0:
return math.ceil(number)
factor = 10 ** decimals
return math.ceil(number * factor) / factor
def round_decimals_down(number, decimals=2):
"""
Returns a value rounded down to a specific number of decimal places.
"""
if not isinstance(decimals, int):
raise TypeError("decimal places must be an integer")
elif decimals < 0:
raise ValueError("decimal places has to be 0 or more")
elif decimals == 0:
return math.floor(number)
factor = 10 ** decimals
return math.floor(number * factor) / factor
# =====================================================================================
def averaging_areas(doptode_set):
l = dict(map(lambda c: (c, pd.DataFrame([(np.mean(doptode_set[c][av]), np.std(doptode_set[c][av]))
for av in range(len(doptode_set[c]))], columns=['mean', 'SD_area'])),
doptode_set.keys()))
dfop_set = pd.concat(l, axis=0).sort_index(axis=0)
return dfop_set
def averaging_deriv(ls_x):
der = 1/len(ls_x) * ls_x
return der
def optode_normalization(dfop):
optode_norm = dict(map(lambda s: (s, dfop[s].loc[0] / dfop[s]), dfop.keys()))
# update calibration point zero
for s in optode_norm.keys():
sd = ((dfop[s][0].std_dev / dfop[s][0].n)**2)*2
optode_norm[s].loc[0] = ufloat(dfop[s][0].n / dfop[s][0].n, np.sqrt(sd) * dfop[s][0].n / dfop[s][0].n)
return optode_norm
def interpolation_SD(conc, dfop, s, method='cubic'):
f_min = interp1d(conc, [i.n-i.s for i in dfop[s]], kind=method)
f_max = interp1d(conc, [i.n+i.s for i in dfop[s]], kind=method)
return f_min, f_max
def interpolation_SDmc(df, s, method='cubic'):
I_min = interp1d(df[s]['O2 mean'].values, (df[s]['iratio mean'] - df[s]['iratio SD']).values, kind=method)
I_max = interp1d(df[s]['O2 mean'].values, (df[s]['iratio mean'] + df[s]['iratio SD']).values, kind=method)
return I_min, I_max
def channel_division(dconc, dch_num, dch_denom, s):
dratio = dict()
for c in dconc[s]:
dratio[c] = [dch_num[s][str(c)+'%'][n] / dch_denom[s][str(c)+'%'][n] for n in range(len(dch_num[s][str(c)+'%']))]
return dratio
def ratiometric_intensity(path, crop_op, channel, RoI1=None, RoI2=None):
# RoI are areas defined anti-clockwise starting from the top left corner with P(col / ind)
if crop_op:
pass
else:
RoI_op_ = RoI1 + RoI2
crop_op = [[(RoI_op_[o][p][0] + 5, RoI_op_[o][p][1] + 5) for p in range(len(RoI_op_[o]))]
for o in range(len(RoI_op_))]
# -------------------------------
# RoI for different settings
height = list(map(lambda n: (crop_op[n][1][1] - crop_op[n][0][1]), range(len(crop_op))))
# load all calibration images - collect information
dict_red, dict_green, dict_conc = load_calibration_info(path=path, RoI1=crop_op, height=height, channel=channel,
server=False)
# calculating the ratio R/G of the whole optode
dratio = dict(map(lambda k: (k, channel_division(dconc=dict_conc, dch_num=dict_red, dch_denom=dict_green, s=k)),
dict_red.keys()))
# combine cropped info
optode1 = dict(map(lambda s: (s, crop_optode(dratio[s], RoI1=RoI1, RoI2=RoI2)[0]), dratio.keys()))
optode2 = dict(map(lambda s: (s, crop_optode(dratio[s], RoI1=RoI1, RoI2=RoI2)[1]), dratio.keys()))
# determine number of pixels within the defined RoI = sample size
if RoI1:
npx1 = (RoI1[0][1][1] - RoI1[0][0][1]) * (RoI1[0][2][0] - RoI1[0][0][0])
else:
npx1 = 0
if RoI2:
npx2 = (RoI2[0][1][1] - RoI2[0][0][1]) * (RoI2[0][2][0] - RoI2[0][0][0])
else:
npx2 = 0
# ----------------------------------------------------------
# signal averaged within RoI used as start/input signal for uncertainty propagation averaging each RoI for all
# optodes and settings
if RoI1: # optode 1
dfop1_set1 = averaging_areas(doptode_set=optode1['set1'])
dfop1_set2 = averaging_areas(doptode_set=optode1['set2'])
dfop1_set3 = averaging_areas(doptode_set=optode1['set3'])
conc = dfop1_set1.index.levels[0].to_numpy()
dfop1 = dict({'set1': [ufloat(dfop1_set1.loc[i, 'mean'], dfop1_set1.loc[i, 'SD_area']) for i in dfop1_set1.index],
'set2': [ufloat(dfop1_set2.loc[i, 'mean'], dfop1_set2.loc[i, 'SD_area']) for i in dfop1_set2.index],
'set3': [ufloat(dfop1_set3.loc[i, 'mean'], dfop1_set3.loc[i, 'SD_area']) for i in dfop1_set3.index]})
dfop1 = pd.DataFrame(dfop1, index=conc)
else:
dfop1 = None
if RoI2: # optode 2
dfop2_set1 = averaging_areas(doptode_set=optode2['set1'])
dfop2_set2 = averaging_areas(doptode_set=optode2['set2'])
dfop2_set3 = averaging_areas(doptode_set=optode2['set3'])
conc = dfop2_set1.index.levels[0].to_numpy()
dfop2 = dict({'set1': [ufloat(dfop2_set1.loc[i, 'mean'], dfop2_set1.loc[i, 'SD_area']) for i in dfop2_set1.index],
'set2': [ufloat(dfop2_set2.loc[i, 'mean'], dfop2_set2.loc[i, 'SD_area']) for i in dfop2_set2.index],
'set3': [ufloat(dfop2_set3.loc[i, 'mean'], dfop2_set3.loc[i, 'SD_area']) for i in dfop2_set3.index]})
dfop2 = pd.DataFrame(dfop2, index=conc)
else:
dfop2 = None
# prepare for output
dfoptode = [dfop1, dfop2]
para = dict({'sample size': (npx1, npx2), 'concentration': conc, 'ch1': dict_red, 'ch2': dict_green})
return dfoptode, para
def reduce_dict(name, dint1, dint2=None, nopt=1, option='ratio'):
if option == 'ratio':
dop1 = dict(map(lambda s: (s, np.divide(dint1[s][name][0], dint2[s][name][0])), dint1.keys()))
if nopt > 1:
dop2 = dict(map(lambda s: (s, np.divide(dint1[s][name][1], dint2[s][name][1])), dint1.keys()))
else:
dop2 = None
else:
dop1 = dict(map(lambda s: (s, dint1[s][name][0]), dint1.keys()))
if nopt > 1:
dop2 = dict(map(lambda s: (s, dint1[s][name][1]), dint1.keys()))
else:
dop2 = None
dint = dict({'optode1': dop1, 'optode2': dop2})
return dint
def splitImage(path, RoI_op):
# RoI for different settings
height = dict(map(lambda o: (o, RoI_op[o][1][1] - RoI_op[o][0][1]), range(len(RoI_op))))
dict_red, dict_green = load_files(path, RoI_op, height)
# split into smaller dictionaries
name = list(dict_red['set1'].keys())[0]
dint_red = reduce_dict(name=name, dint1=dict_red, dint2=None, nopt=len(RoI_op), option='single')
dint_green = reduce_dict(name=name, dint1=dict_green, dint2=None, nopt=len(RoI_op), option='single')
dint_ratio = reduce_dict(name=name, dint1=dict_red, dint2=dict_green, nopt=len(RoI_op), option='ratio')
return dint_red, dint_green, dint_ratio
def split2statics(dO2):
# mean value
dic_av = dict(map(lambda o:
(o, dict(map(lambda s:
(s, pd.DataFrame(list(map(lambda j: [i.n if i is not np.nan else i
for i in dO2[o][s][j]], dO2[o][s].columns)),
columns=dO2[o][s].index, index=dO2[o][s].columns).T),
dO2[o].keys()))), dO2.keys()))
# standard error
dic_sd = dict(map(lambda o:
(o, dict(map(lambda s:
(s, pd.DataFrame(list(map(lambda j: [i.s if i is not np.nan else i
for i in dO2[o][s][j]], dO2[o][s].columns)),
columns=dO2[o][s].index, index=dO2[o][s].columns).T),
dO2[o].keys()))), dO2.keys()))
return dic_av, dic_sd
def line_profile_v1(df, lp, lw):
if df.empty is True:
df_lp = None
else:
# find closest value in df.columns
diff_min, diff_max = (lp - lw / 2) - df.columns, (lp + lw / 2) - df.columns
for en, i in enumerate(diff_min):
if i == min(np.abs(diff_min)):
pos_min = (en, df.columns[en])
for en, i in enumerate(diff_max):
if i == min(np.abs(diff_max)):
pos_max = (en, df.columns[en])
if pos_min:
pass
else:
pos_min = (None, None)
if pos_max:
pass
else:
pos_max = (None, None)
if pos_min == pos_max:
df_lp = pd.DataFrame(df[pos_min[1]])
else:
df_lp = df.loc[:, pos_min[1]:pos_max[1]]
return df_lp
def optodePrep2D(o, s, dO2_av, px2mm, baseline, depth_range=None, width_range=None):
# image preparation and cropping image depth/width
df_ex = dO2_av[o][s]
xnew = df_ex.index
df = df_ex.copy()
df.index = reversed(xnew)
if depth_range is None:
df_ = df
else:
px_range = np.arange(0, len(df.index) + 1, step=1)
px_range_mm = px_range / px2mm - baseline[int(o.split('e')[-1])-1]
crop_px1 = list()
for en, p in enumerate(px_range_mm):
if p.round(1) == depth_range[0]:
crop_px1.append(en)
crop_px2 = list()
for en, p in enumerate(px_range_mm):
if p.round(1) == depth_range[1]:
crop_px2.append(en)
crop_px = int(np.mean(crop_px1)), int(np.mean(crop_px2))
df_ = df.loc[df.index[min(crop_px)]:df.index[max(crop_px)], :]
df_.index = reversed(df_ex.index[min(crop_px):max(crop_px) + 1])
if width_range is None:
df_data = df_
else:
df_data = df_.loc[:, width_range[0]:width_range[1]]
return df_data
def sem_optode(dfop, RoI, conc):
n = np.sqrt(sum([(RoI[i][1][1] - RoI[i][0][1])*(RoI[i][2][0] - RoI[i][0][0]) for i in range(len(RoI))]))
dfop_sem = dict(map(lambda s: (s, [i.s/n for i in dfop[s]]), dfop.keys()))
optode_sem = pd.concat(list(map(lambda s: pd.DataFrame([np.mean(dfop_sem[s][c:(c+1)]) for c in range(len(conc))],
index=conc, columns=[s]), dfop.keys())), axis=1)
return optode_sem, n
def uncertainty(para, RoI1, RoI2, conc, dfop1=None, dfop2=None, method='cubic'):
# interpolation for SD
if isinstance(dfop1, pd.DataFrame):
f_op1 = dict(map(lambda s: (s, interpolation_SD(conc=para['concentration'], dfop=dfop1, s=s, method=method)),
dfop1.columns))
# standard error of the mean
optode1_sem, n1 = sem_optode(dfop=dfop1, RoI=RoI1, conc=conc)
# combine for output
uncer_op1 = dict({'SD_interpol': f_op1, 'sem': optode1_sem, 'sample size': n1})
else:
uncer_op1 = None
if isinstance(dfop2, pd.DataFrame):
f_op2 = dict(map(lambda s: (s, interpolation_SD(conc=para['concentration'], dfop=dfop2, s=s, method=method)),
dfop2.columns))
# standard error of the mean
optode2_sem, n2 = sem_optode(dfop=dfop2, RoI=RoI2, conc=conc)
# combine for output
uncer_op2 = dict({'SD_interpol': f_op2, 'sem': optode2_sem, 'sample size': n2})
else:
uncer_op2 = None
return uncer_op1, uncer_op2
def lin_propagation(dfop1, dfop2, n1, n2, RoI1, RoI2, conc):
# normalization
if RoI1: # optode 1
optode1_norm = optode_normalization(dfop=dfop1)
else:
optode1_norm = None
if RoI2: # optode 2
optode2_norm = optode_normalization(dfop=dfop2)
else:
optode2_norm = None
optode_norm = [optode1_norm, optode2_norm]
# standard error of the mean
if RoI1:
optode1_norm_SEM = dict(map(lambda s: (s, pd.DataFrame([i.s / n1 for i in optode1_norm[s]],
index=optode1_norm[s].index)), optode1_norm.keys()))
# interpolation for SD
fnorm_op1 = dict(map(lambda s: (s, interpolation_SD(conc=conc, dfop=optode1_norm, s=s)), optode1_norm.keys()))
else:
optode1_norm_SEM, fnorm_op1 = None, None
if RoI2:
optode2_norm_SEM = dict(map(lambda s: (s, pd.DataFrame([i.s / n2 for i in optode2_norm[s]],
index=optode2_norm[s].index)), optode2_norm.keys()))
# interpolation for SD
fnorm_op2 = dict(map(lambda s: (s, interpolation_SD(conc=conc, dfop=optode2_norm, s=s)), optode2_norm.keys()))
else:
optode2_norm_SEM, fnorm_op2 = None, None
return optode_norm, optode1_norm_SEM, optode2_norm_SEM, fnorm_op1, fnorm_op2
def mc_propagation(conc, dfop, optode_norm, optode_norm_SEM, RoI, uncer_op):
dic_optode_value = dict()
dic_optode_param = dict()
for s in dfop.columns:
if RoI:
[dic_optode_param[s], dic_optode_value[s]] = mcerp_simplifiedSVFit(optode=optode_norm[s].to_numpy(),
conc=conc)
for s in dic_optode_param.keys():
if RoI:
dic_optode_param[s]['I0'] = dfop.loc[0][s]
# -------------------------------------------------------------------
# uncertainty propagation for SEM
# intensity
iratio_normSEM = dict(map(lambda s: (s, pd.Series([ufloat(optode_norm[s].loc[c].n, optode_norm_SEM[s].loc[c][0])
for c in optode_norm[s].index], index=optode_norm[s].index,
name=s)), optode_norm.keys()))
# concentration
ox_normSEM = dict(map(lambda s: (s, dic_optode_value[s]['O2 SD'] / uncer_op['sample size']),
dic_optode_value.keys()))
optode_normSEM = dict(map(lambda s: (s, pd.concat([ox_normSEM[s], iratio_normSEM[s]], axis=1,
keys=['O2 SEM', 'iratio SEM'])), iratio_normSEM.keys()))
return dic_optode_value, dic_optode_param, optode_normSEM
# =====================================================================================
def _simplifiedSV(x, f, k):
"""
fitting function according to the common two site model. In general, x represents the pO2 or pCO2 content, whereas
m, k and f are the common fitting parameters
:param x: list
:param k: np.float
:param f: np.float
:return: iratio: normalized signal i0/i
"""
return 1 / (f / (1. + k*x) + (1.-f))
def _simplified_SVFit_1run(data, conc, par0=None):
simply_sv = Model(_simplifiedSV)
if par0:
params_sens = simply_sv.make_params(k=par0['k'], f=par0['f'])
else:
params_sens = simply_sv.make_params(k=0.165, f=0.887)
params_sens['k'].min = 0.
params_sens['f'].max = 1.
params_sens['f'].vary = True
params_sens['k'].vary = True
# use i0/i data for fit and re-calculate i afterwards
# full concentration range
result = simply_sv.fit(data, params_sens, x=conc, nan_policy='omit')
return result
def mcerp_simplifiedSVFit(optode, conc):
# use simplifiedSV_run1 to calculate best fit
res = _simplified_SVFit_1run(data=[i.n for i in optode], conc=conc)
# evaluate the covariance matrix of your parameters
covariance = res.covar
# draw random samples from a normal multivariate distribution using the best value of your parameters
# and their covariance matrix
f = N(res.params['f'].value, res.params['f'].stderr**2) # stderr**2 := cov(f,f)
k = N(res.params['k'].value, res.params['k'].stderr**2)
y = [N(o.n, o.s) for o in optode]
params = dict({'f': f, 'k': k, 'covariance': covariance, 'fit result': res})
# calculate x for each point of the sample
O2_calc = O2_analysis_v2(f=f, k=k, iratio=y)
# estimate the mean and standard deviation of x
ox_out = [(O2_calc[ox].mean, np.sqrt(O2_calc[ox].var), optode[ox].n, optode[ox].s) for ox in range(len(conc))]
out = pd.DataFrame(ox_out, index=conc, columns=['O2 mean', 'O2 SD', 'iratio mean', 'iratio SD'])
return params, out
def o2_calculation(inp, dict_ratio_run1, dict_ratio_run2, dpara, surface, px2mm, splitdata=True, run=2, vmin=-50,
vmax=150):
o, s, run = inp.split(',')[0].strip(), inp.split(',')[1].strip(), int(run)
if run == 1:
dratio = dict_ratio_run1
else:
dratio = dict_ratio_run2
dO2_calc = dict()
for o in dratio.keys():
if dratio[o]:
dic_cal = dict(map(lambda s:
(s, O2_analysis_area(para=dpara[o][s], iratio=dratio[o][s])), dratio[o].keys()))
dO2_calc[o] = dic_cal
# post-processing
dO2, dO2_av, dO2_SD = postprocessing_v1(dO2_calc=dO2_calc, px2mm=px2mm, surface=surface, split=splitdata, vmin=vmin,
vmax=vmax)
return dO2, dO2_av, dO2_SD
def O2_analysis_v2(f, k, iratio):
"""
:param f: mcerp.UncertainVariable contaning a normal distributed sample of values around the best value of the
fit parameter f and its covariance value as sigma
:param k: mcerp.UncertainVariable contaning a normal distributed sample of values around the best value of
the fit parameter k and its covariance value as sigma
:param iratio: list of mcerp.UncertainVariables containing a normal distributed sample of the intensity ratio
(mu is the average value and sigma is the proagated error)
return x:
"""
# mean O2 concentration
x = [1/k * (f / ((1/y) + f -1) -1) for y in iratio]
return x
def O2_analysis_area(para, iratio, iratio_std=None, int_type='norm'):
"""
:param f: mcerp.UncertainVariable contaning a normal distributed sample of values around the best value of the
fit parameter f and its covariance value as sigma
:param k: mcerp.UncertainVariable contaning a normal distributed sample of values around the best value of the
fit parameter k and its covariance value as sigma
:param iratio: array of mcerp.UncertainVariables containing a normal distributed sample of the intensity ratio
(mu is the average value and sigma is the proagated error) or only mean values as np.float64
return x:
"""
# create ufloat for uncertainty propagation via parameter
f_mp = ufloat(para.loc['f'][0], para.loc['f'][1])
k_mp = ufloat(para.loc['k'][0], para.loc['k'][1])
if int_type == 'norm':
int_arr = unumpy.uarray(np.array(iratio.to_numpy()), np.array(iratio_std.to_numpy()))
else:
i0_mp = ufloat(para.loc['I0'][0], para.loc['I0'][1])
if isinstance(iratio, (np.ndarray, np.generic)):
iratio_arr = unumpy.uarray(iratio, np.array(np.zeros(shape=(iratio.shape))))
else:
iratio_arr = unumpy.uarray(iratio.values, np.array(np.zeros(shape=(iratio.shape))))
int_arr = iratio_arr / i0_mp
# intermediate value calculation for x = 1/k * (np.divide(f, np.divide(1, inorm) + f - 1) - 1)
a = int_arr + f_mp - 1
b = f_mp / a - 1
# final O2 concentration
x = 1 / k_mp * b
df_x = pd.DataFrame(x, index=pd.DataFrame(iratio).index, columns=pd.DataFrame(iratio).columns)
return df_x
# =====================================================================================
def fsigmoid(x, a, b, c):
return c / (1.0 + np.exp(-a * (x - b)))
def interpolation_microsensor(df_ms, profile_ex):
smodel = Model(fsigmoid)
# interpolation of microsensor to step width of optode
params = smodel.make_params(a=-15, b=1, c=50)
res_ms = smodel.fit(df_ms.loc[1:16, :]['Intensity'].to_numpy(), x=df_ms.loc[1:16, :]['Depth (mm)'].to_numpy(),
params=params)
xnew = profile_ex.index
ydata = fsigmoid(x=xnew, a=res_ms.best_values['a'], b=res_ms.best_values['b'], c=res_ms.best_values['c'])
data_ms = pd.DataFrame(ydata, index=xnew)
data_ms.columns = ['microsensor']
return data_ms
def geometric_intersection(treshold, dd, column):
# generate curve
second_line = LineString(np.column_stack((dd.index, [treshold]*dd.shape[0])))
first_line = LineString(np.column_stack((dd.index, dd[column].to_numpy())))
# geometric determination of intersection points
intersection = first_line.intersection(second_line)
try:
xdata = LineString(intersection).xy
except:
xdata = intersection.xy
return xdata
def penetration_depth(dO2_lp, ls_kernel, df_ms, treshold):
# combine relevant line profiles
dprofile = dict()
for kshape in ls_kernel:
if len(dO2_lp[kshape]['square'].keys()) != 0:
depth = pd.concat([dO2_lp[kshape]['vertical'][0], dO2_lp[kshape]['horizontal'][0],
dO2_lp[kshape]['square'][0]], axis=1)
col = dO2_lp[kshape].keys()
else:
depth = pd.concat([dO2_lp[kshape]['vertical'][0], dO2_lp[kshape]['horizontal'][0]], axis=1)
col = ['vertical', 'horizontal']
depth.columns = [i + '-' + j for i in col for j in ['mean', 'SD']]
dprofile[kshape] = depth
# exponential decay for interpolation of micro-sensor data close to the transition
data_ms = interpolation_microsensor(df_ms=df_ms, profile_ex=dprofile[ls_kernel[0]])
# geometric intersection of line profile and O2 threshold for penetration depth
dd = dict(map(lambda k: (k, pd.concat([dprofile[k].filter(like='mean'), data_ms], axis=1)), ls_kernel))
# minimal line profile
dd_min = dict(map(lambda k: (k, pd.concat([pd.DataFrame([dprofile[k][c + '-mean'] - dprofile[k][c + '-SD']
for c in col], index=col).T, data_ms['microsensor']],
axis=1)), ls_kernel))
# maximal line profile
dd_max = dict(map(lambda k: (k, pd.concat([pd.DataFrame([dprofile[k][c + '-mean'] + dprofile[k][c + '-SD']
for c in col], index=col).T, data_ms['microsensor']],
axis=1)), ls_kernel))
ydepth = pd.concat([pd.DataFrame([geometric_intersection(treshold=treshold, dd=dd[k], column=d)[0][0]
for d in dd[k].columns], index=dd[k].columns) for k in ls_kernel], axis=1).T
ydepth_min = pd.concat([pd.DataFrame([geometric_intersection(treshold=treshold, dd=dd_min[k], column=d)[0][0]
for d in dd_min[k].columns], index=dd_min[k].columns) for k in ls_kernel],
axis=1).T
ydepth_max = pd.concat([pd.DataFrame([geometric_intersection(treshold=treshold, dd=dd_max[k], column=d)[0][0]
for d in dd_max[k].columns], index=dd_max[k].columns) for k in ls_kernel],
axis=1).T
ydepth.index, ydepth_min.index, ydepth_max.index = ls_kernel, ls_kernel, ls_kernel
ydepth.columns = [i.split('-')[0] for i in ydepth.columns]
return ydepth, ydepth_min, ydepth_max
# =====================================================================================
def saving_res(save_name, conc, crop_op, RoI1_av, RoI2_av, df_initial, df_norm, dop1_param, dop2_param, dop1_value,
dop2_value, op1_normSEM, op2_normSEM):
# open h5 file
f = h5py.File(save_name, "w")
# -----------------------------
# [group creation]
# header
grp_header = f.create_group('header')
supgrp_nRoI = grp_header.create_group("Pixels for optode")
supgrp_nRoI1 = supgrp_nRoI.create_group("optode1")
supgrp_nRoI2 = supgrp_nRoI.create_group("optode2")
supgrp_RoI = grp_header.create_group("RoI for optode")
supgrp_RoI1 = supgrp_RoI.create_group("optode1")
supgrp_RoI2 = supgrp_RoI.create_group("optode2")
supgrp_conc = grp_header.create_group("concentration point")
# data group
grp_data = f.create_group("data")
supgrp_av = grp_data.create_group('averaged')
supgrp_av1 = supgrp_av.create_group('optode1')
supgrp_av2 = supgrp_av.create_group('optode2')
supgrp_norm = grp_data.create_group('normalized')
supgrp_norm1 = supgrp_norm.create_group('optode1')
supgrp_norm2 = supgrp_norm.create_group('optode2')
# group related to fit process
grp_fit = f.create_group("fit")
supgrp_params = grp_fit.create_group("parameter")
supgrp_params1 = supgrp_params.create_group('optode1')
supgrp_params2 = supgrp_params.create_group('optode2')
supgrp_cov = grp_fit.create_group("covariance matrix")
supgrp_cov1 = supgrp_cov.create_group('optode1')
supgrp_cov2 = supgrp_cov.create_group('optode2')
supgrp_chi = grp_fit.create_group("reduced chi-square")
supgrp_chi1 = supgrp_chi.create_group('optode1')
supgrp_chi2 = supgrp_chi.create_group('optode2')
supgrp_values = grp_fit.create_group("values")
supgrp_values1 = supgrp_values.create_group('optode1')
supgrp_v1_o2av = supgrp_values1.create_group('O2 mean')
supgrp_v1_o2sd = supgrp_values1.create_group('O2 SD')
supgrp_v1_o2sem = supgrp_values1.create_group('O2 SEM')
supgrp_v1_iav = supgrp_values1.create_group('iratio mean')
supgrp_v1_isd = supgrp_values1.create_group('iratio SD')
supgrp_v1_isem = supgrp_values1.create_group('iratio SEM')
supgrp_values2 = supgrp_values.create_group('optode2')
supgrp_v2_o2av = supgrp_values2.create_group('O2 mean')
supgrp_v2_o2sd = supgrp_values2.create_group('O2 SD')
supgrp_v2_o2sem = supgrp_values2.create_group('O2 SEM')
supgrp_v2_iav = supgrp_values2.create_group('iratio mean')
supgrp_v2_isd = supgrp_values2.create_group('iratio SD')
supgrp_v2_isem = supgrp_values2.create_group('iratio SEM')
# --------------------------------------------------------
# [fill groups]
# --------------------------------------------------------
# header
# Pixels for optode
supgrp_nRoI1.create_dataset('RoI1', data=np.array(crop_op[0]))
supgrp_nRoI2.create_dataset('RoI2', data=np.array(crop_op[1]))
# concentration
supgrp_conc.create_dataset('concentration', data=conc)
# RoI within optode
supgrp_RoI1.create_dataset('RoI1', data=np.array(RoI1_av))
supgrp_RoI2.create_dataset('RoI1', data=np.array(RoI2_av))
# ------------------------------
# data
# supgroup - averaged data
for s in df_initial[0].columns:
v = np.array([[i.n for i in df_initial[0][s].values], [i.s for i in df_initial[0][s].values]])
supgrp_av1.create_dataset(str(s), data=np.array(v))
for s in df_initial[1].columns:
v = np.array([[i.n for i in df_initial[1][s].values], [i.s for i in df_initial[1][s].values]])
supgrp_av2.create_dataset(str(s), data=np.array(v))
# ------------------------------
# supgroup - normalized data
for s in df_norm[0].keys():
v = [[i.n for i in df_norm[0][s].values], [i.s for i in df_norm[0][s].values]]
supgrp_norm1.create_dataset(str(s), data=np.array(v))
for s in df_norm[1].keys():
v = [[i.n for i in df_norm[1][s].values], [i.s for i in df_norm[1][s].values]]
supgrp_norm2.create_dataset(str(s), data=np.array(v))
# ------------------------------
# supgroup - fit parameters
for s in dop1_param.keys():
v = [(dop1_param[s][l].mean, dop1_param[s][l].std) for l in ['f', 'k']]
v += [(dop1_param[s]['I0'].n, dop1_param[s]['I0'].s)]
supgrp_params1.create_dataset(str(s), data=np.array(v))
for s in dop2_param.keys():
v = [(dop2_param[s][l].mean, dop2_param[s][l].std) for l in ['f', 'k']]
v += [(dop2_param[s]['I0'].n, dop2_param[s]['I0'].s)]
supgrp_params2.create_dataset(str(s), data=np.array(v))
# ------------------------------
# supgroup - covariance matrix
for s in dop1_param.keys():
supgrp_cov1.create_dataset(str(s), data=np.array(dop1_param[s]['covariance']))
for s in dop2_param.keys():
supgrp_cov2.create_dataset(str(s), data=np.array(dop2_param[s]['covariance']))
# ------------------------------
# supgroup - reduces chi-square
for s in dop1_param.keys():
supgrp_chi1.create_dataset(str(s), data=np.array(dop1_param[s]['fit result'].redchi))
for s in dop1_param.keys():
supgrp_chi2.create_dataset(str(s), data=np.array(dop2_param[s]['fit result'].redchi))
# ------------------------------
# supgroup - fit values
# columns - [O2 mean, O2 SD, iratio mean, iratio SD, O2 SEM, iratio SEM]
for s in dop1_value.keys():
supgrp_v1_o2av.create_dataset(str(s), data=dop1_value[s]['O2 mean'].to_numpy())
supgrp_v1_o2sd.create_dataset(str(s), data=dop1_value[s]['O2 SD'].to_numpy())
supgrp_v1_o2sem.create_dataset(str(s), data=op1_normSEM[s]['O2 SEM'].to_numpy())
supgrp_v1_iav.create_dataset(str(s), data=dop1_value[s]['iratio mean'].to_numpy())
supgrp_v1_isd.create_dataset(str(s), data=dop1_value[s]['iratio SD'].to_numpy())
supgrp_v1_isem.create_dataset(str(s), data=[i.s for i in op1_normSEM['set1']['iratio SEM']])
for s in dop2_value.keys():
supgrp_v2_o2av.create_dataset(str(s), data=dop2_value[s]['O2 mean'].to_numpy())
supgrp_v2_o2sd.create_dataset(str(s), data=dop2_value[s]['O2 SD'].to_numpy())
supgrp_v2_o2sem.create_dataset(str(s), data=op2_normSEM[s]['O2 SEM'].to_numpy())
supgrp_v2_iav.create_dataset(str(s), data=dop2_value[s]['iratio mean'].to_numpy())
supgrp_v2_isd.create_dataset(str(s), data=dop2_value[s]['iratio SD'].to_numpy())
supgrp_v2_isem.create_dataset(str(s), data=[i.s for i in op2_normSEM['set1']['iratio SEM']])
print('saving done')
# ------------------------------------------------------------------------------
f.close()
def save_o2results(save_name, inp, file_meas, RoI_op, px2mm, surface, dO2_av, dO2_SD):
# preparation
name_files = list()
for f in glob(file_meas + '/*R*.tif'):
name_files.append(f.split('/')[-1])
# ------------------------------------------------------------------------------
# saving
f = h5py.File(save_name, "w")
# group creation
grp_header = f.create_group('header')
grp_data = f.create_group('data')
supgrp1 = grp_data.create_group("optode1")
supgrp1av = supgrp1.create_group("O2 mean")
supgrp1sd = supgrp1.create_group("O2 SD")
supgrp2 = grp_data.create_group("optode2")
supgrp2av = supgrp2.create_group("O2 mean")
supgrp2sd = supgrp2.create_group("O2 SD")
# --------------------------------------------
# fill groups
dt = h5py.special_dtype(vlen=str)
grp_header.create_dataset('measurement analysed', data=np.array(name_files, dtype='O'), dtype=dt)
grp_header.create_dataset('pixel selected', data=np.array(RoI_op))
grp_header.create_dataset('px2mm', data=px2mm)
v = [dO2_av[inp.split(',')[0]][inp.split(',')[1].strip()].columns.to_numpy(),
dO2_av[inp.split(',')[0]][inp.split(',')[1].strip()].index.to_numpy()]
grp_header.create_dataset('image size', data=np.array(v, dtype='O'), dtype=dt)
grp_header.create_dataset('surface level', data=np.array(surface))
# --------------------
for k, v in dO2_av['optode1'].items():
supgrp1av.create_dataset(str(k), data=np.array(v))
supgrp1sd.create_dataset(str(k), data=np.array(dO2_SD['optode1'][k]))
for k, v in dO2_av['optode2'].items():
supgrp2av.create_dataset(str(k), data=np.array(v))
supgrp2sd.create_dataset(str(k), data=np.array(dO2_SD['optode2'][k]))
print('saving done')
# ------------------------------------------------------------------------------
f.close()
# =====================================================================================
def load_analysis_v3(dir_res):
with h5py.File(dir_res, 'r') as f:
# load header infos
header = f['header']
roi = dict()
dic_header = dict()
for k in header.keys(): # concentration (point) and pixel
for ki in header[k].keys():
if ki != 'concentration' and k != 'concentration point':
for v in header[k][ki].values():
roi[ki] = (np.array(v))
dic_header[k] = roi
else:
conc = np.array(header[k][ki])
dic_header['concentration'] = conc
# --------------------------------------------------------
# load data info
data = f['data']
dic_data = dict()
for k in data.keys():
data_v = dict()
for ki in data[k].keys():
data_v1 = dict()
for en, v in enumerate(data[k][ki].values()):
data_v1['set{:.0f}'.format(en + 1)] = np.array(v)
data_v[ki] = data_v1
dic_data[k] = data_v
# --------------------------------------------------------
# load fit info
# parameters - first f then k parameter; first mean then sigma
Fit = f['fit']
dic_fit = dict()
for k in Fit.keys():
dic_values = dict()
if 'reduced' in k:
chi_ls = list()
for ki in Fit[k].keys():
chi_ls = np.array(Fit[k][ki])
dic_values[ki] = chi_ls
else:
for ki in Fit[k].keys():
dic_v = dict()
for en, v in enumerate(Fit[k][ki].values()):
dic_v['set{:.0f}'.format(en + 1)] = np.array(v)
dic_values[ki] = dic_v
dic_fit[k] = dic_values
# --------------------------------------------------------
# re-arrange data format
dnorm = dict(map(lambda o:
(o, dict(map(lambda s: (s, pd.DataFrame(dic_data['normalized'][o][s], index=['iratio', 'SD'],
columns=dic_header['concentration']).T),
dic_data['normalized'][o].keys()))), dic_data['normalized'].keys()))
dinitial = dict(map(lambda o:
(o, dict(map(lambda s: (s, pd.DataFrame(dic_data['averaged'][o][s], index=['iratio', 'SD'],
columns=dic_header['concentration']).T),
dic_data['averaged'][o].keys()))), dic_data['averaged'].keys()))
return dic_header, dinitial, dnorm, dic_fit
def load_calibration_para_v1(path_calib):
# load calibration
dic_header, dinitial, dnorm, dic_fit = load_analysis_v3(path_calib)
# extract fit parameter
para = dict(map(lambda o: (o, dict(map(lambda s:
(s, pd.DataFrame(dic_fit['parameter'][o][s], index=['f', 'k', 'I0'],
columns=['mean', 'SD'])), dic_fit['parameter'][o].keys()))),
dic_fit['parameter'].keys()))
return para
def load_calibResults(path_calib):
# load calibration
dic_header, dinitial, dnorm, dic_fit = load_analysis_v3(path_calib)
# extract fit parameter
para = dict(map(lambda o:
(o, dict(map(lambda s: (s, pd.DataFrame(dic_fit['parameter'][o][s], index=['f', 'k', 'i0'],
columns=['mean', 'SD'])), dic_fit['parameter'][o].keys()))),
dic_fit['parameter'].keys()))
return dic_header, dinitial, dnorm, para
def load_calibration_info(path, RoI1, height, server=True, channel=('R', 'G')):
# red channel of calibration point as array
dcal_R1 = dict()
dcal_R2 = dict()
dcal_R3 = dict()
# green channel (G1) of calibration point as array
dcal_G1 = dict()
dcal_G2 = dict()
dcal_G3 = dict()
# concentration of calibration point (integer)
dcal_conc1 = list()
dcal_conc2 = list()
dcal_conc3 = list()
for f in glob(path + '*_{}*.tif'.format(channel[0])):
if server is True:
fname_R = f.split('calibration/')[1].split('.')[0]
else:
fname_R = f.split('calibration')[1].split('.')[0] # calibration
if 'Cal' in fname_R:
# green channel
fname_G = f.split(channel[0])[0] + '{}.tif'.format(channel[1])
# setting 1
if 'setting1' in fname_R:
conc = fname_R.split('_')[1]
dcal_conc1.append(np.int(conc.split('%')[0]))
# -----------------------------------
# store red channel as array
pic_R = Image.open(f)
imR_ = np.array(pic_R)
# load optode into dictionary
imarrayR = list(map(lambda r: imR_[RoI1[r][0][1]:RoI1[r][1][1] + 1], range(len(RoI1))))
imR = list(map(lambda r: np.stack([imarrayR[r][n][RoI1[r][0][0]:RoI1[r][2][0] + 1]
for n in np.arange(height[r] + 1)], axis=0), range(len(RoI1))))
# combining red-channel images
dcal_R1[conc] = imR
# -----------------------------------
# store green (G1) channel as array
pic_G = Image.open(fname_G)
imG_ = np.array(pic_G)
# load optode into dictionary
imarrayG = list(map(lambda r: imG_[RoI1[r][0][1]:RoI1[r][1][1] + 1], range(len(RoI1))))
imG = list(map(lambda r: np.stack([imarrayG[r][n][RoI1[r][0][0]:RoI1[r][2][0] + 1]
for n in np.arange(height[r] + 1)], axis=0), range(len(RoI1))))
# combining red-channel images
dcal_G1[conc] = imG
# setting 2
if 'setting2' in fname_R:
conc = fname_R.split('_')[1]
dcal_conc2.append(np.int(conc.split('%')[0]))
# -----------------------------------
# store red channel as array
pic_R = Image.open(f)
imR_ = np.array(pic_R)
# load optode into dictionary
imarrayR = list(map(lambda r: imR_[RoI1[r][0][1]:RoI1[r][1][1] + 1], range(len(RoI1))))
imR = list(map(lambda r: np.stack([imarrayR[r][n][RoI1[r][0][0]:RoI1[r][2][0] + 1]
for n in np.arange(height[r] + 1)], axis=0), range(len(RoI1))))
# combining red-channel images
dcal_R2[conc] = imR
# -----------------------------------
# store green (G1) channel as array
pic_G = Image.open(fname_G)
imG_ = np.array(pic_G)
# load optode into dictionary
imarrayG = list(map(lambda r: imG_[RoI1[r][0][1]:RoI1[r][1][1] + 1], range(len(RoI1))))
imG = list(map(lambda r: np.stack([imarrayG[r][n][RoI1[r][0][0]:RoI1[r][2][0] + 1]
for n in np.arange(height[r] + 1)], axis=0), range(len(RoI1))))
# combining red-channel images
dcal_G2[conc] = imG
# setting 3
if 'setting3' in fname_R:
conc = fname_R.split('_')[1]
dcal_conc3.append(np.int(conc.split('%')[0]))
# -----------------------------------
# store red channel as array
pic_R = Image.open(f)
imR_ = np.array(pic_R)
# load optode into dictionary
imarrayR = list(map(lambda r: imR_[RoI1[r][0][1]:RoI1[r][1][1] + 1], range(len(RoI1))))
imR = list(map(lambda r: np.stack([imarrayR[r][n][RoI1[r][0][0]:RoI1[r][2][0] + 1]
for n in np.arange(height[r] + 1)], axis=0), range(len(RoI1))))
# combining red-channel images
dcal_R3[conc] = imR
# -----------------------------------
# store green (G1) channel as array
pic_G = Image.open(fname_G)
imG_ = np.array(pic_G)
# load optode into dictionary
imarrayG = list(map(lambda r: imG_[RoI1[r][0][1]:RoI1[r][1][1] + 1], range(len(RoI1))))
imG = list(map(lambda r: np.stack([imarrayG[r][n][RoI1[r][0][0]:RoI1[r][2][0] + 1]
for n in np.arange(height[r] + 1)], axis=0), range(len(RoI1))))
# combining red-channel images
dcal_G3[conc] = imG
# combine settings
dict_red = dict({'set1': dcal_R1, 'set2': dcal_R2, 'set3': dcal_R3})
dict_green = dict({'set1': dcal_G1, 'set2': dcal_G2, 'set3': dcal_G3})
dict_conc = dict({'set1': dcal_conc1, 'set2': dcal_conc2, 'set3': dcal_conc3})
return dict_red, dict_green, dict_conc
def load_calibration_info_v1(path, RoI1, height, server=True, channel=('B', 'G2')):
# red channel of calibration point as array
dcal_R1 = dict()
dcal_R2 = dict()
dcal_R3 = dict()
# green channel (G1) of calibration point as array
dcal_G1 = dict()
dcal_G2 = dict()
dcal_G3 = dict()
# concentration of calibration point (integer)
dcal_conc1 = list()
dcal_conc2 = list()
dcal_conc3 = list()
for f in glob(path + '*_{}*.tif'.format(channel[0])):
if server is True:
fname_R = f.split('calibration/')[1].split('.')[0]
else:
fname_R = 'Cal' + f.split('Cal')[1].split('.')[0] # calibration
if 'Cal' in fname_R:
# green channel
fname_G = f.split(channel[0])[0] + '{}.tif'.format(channel[1])
# setting 1
if 'setting1' in fname_R:
conc = fname_R.split('_')[1]
dcal_conc1.append(np.int(conc.split('%')[0]))
# -----------------------------------
# store red channel as array
pic_R = Image.open(f)
imR_ = np.array(pic_R)
# load optode into dictionary
imarrayR = list(map(lambda r: imR_[RoI1[r][0][1]:RoI1[r][1][1] + 1], range(len(RoI1))))
imR = list(map(lambda r: np.stack([imarrayR[r][n][RoI1[r][0][0]:RoI1[r][2][0] + 1]
for n in np.arange(height[r] + 1)], axis=0), range(len(RoI1))))
# combining red-channel images
dcal_R1[conc] = imR
# -----------------------------------
# store green (G1) channel as array
pic_G = Image.open(fname_G)
imG_ = np.array(pic_G)
# load optode into dictionary
imarrayG = list(map(lambda r: imG_[RoI1[r][0][1]:RoI1[r][1][1] + 1], range(len(RoI1))))
imG = list(map(lambda r: np.stack([imarrayG[r][n][RoI1[r][0][0]:RoI1[r][2][0] + 1]
for n in np.arange(height[r] + 1)], axis=0), range(len(RoI1))))
# combining red-channel images
dcal_G1[conc] = imG
# setting 2
if 'setting2' in fname_R:
conc = fname_R.split('_')[1]
dcal_conc2.append(np.int(conc.split('%')[0]))
# -----------------------------------
# store red channel as array
pic_R = Image.open(f)
imR_ = np.array(pic_R)
# load optode into dictionary
imarrayR = list(map(lambda r: imR_[RoI1[r][0][1]:RoI1[r][1][1] + 1], range(len(RoI1))))
imR = list(map(lambda r: np.stack([imarrayR[r][n][RoI1[r][0][0]:RoI1[r][2][0] + 1]
for n in np.arange(height[r] + 1)], axis=0), range(len(RoI1))))
# combining red-channel images
dcal_R2[conc] = imR
# -----------------------------------
# store green (G1) channel as array
pic_G = Image.open(fname_G)
imG_ = np.array(pic_G)
# load optode into dictionary
imarrayG = list(map(lambda r: imG_[RoI1[r][0][1]:RoI1[r][1][1] + 1], range(len(RoI1))))
imG = list(map(lambda r: np.stack([imarrayG[r][n][RoI1[r][0][0]:RoI1[r][2][0] + 1]
for n in np.arange(height[r] + 1)], axis=0), range(len(RoI1))))
# combining red-channel images
dcal_G2[conc] = imG
# setting 3
if 'setting3' in fname_R:
conc = fname_R.split('_')[1]
dcal_conc3.append(np.int(conc.split('%')[0]))
# -----------------------------------
# store red channel as array
pic_R = Image.open(f)
imR_ = np.array(pic_R)
# load optode into dictionary
imarrayR = list(map(lambda r: imR_[RoI1[r][0][1]:RoI1[r][1][1] + 1], range(len(RoI1))))
imR = list(map(lambda r: np.stack([imarrayR[r][n][RoI1[r][0][0]:RoI1[r][2][0] + 1]
for n in np.arange(height[r] + 1)], axis=0), range(len(RoI1))))
# combining red-channel images
dcal_R3[conc] = imR
# -----------------------------------
# store green (G1) channel as array
pic_G = Image.open(fname_G)
imG_ = np.array(pic_G)
# load optode into dictionary
imarrayG = list(map(lambda r: imG_[RoI1[r][0][1]:RoI1[r][1][1] + 1], range(len(RoI1))))
imG = list(map(lambda r: np.stack([imarrayG[r][n][RoI1[r][0][0]:RoI1[r][2][0] + 1]
for n in np.arange(height[r] + 1)], axis=0), range(len(RoI1))))
# combining red-channel images
dcal_G3[conc] = imG
# combine settings
dict_red = dict({'set1': dcal_R1, 'set2': dcal_R2, 'set3': dcal_R3})
dict_green = dict({'set1': dcal_G1, 'set2': dcal_G2, 'set3': dcal_G3})
dict_conc = dict({'set1': dcal_conc1, 'set2': dcal_conc2, 'set3': dcal_conc3})
return dict_red, dict_green, dict_conc
def load_files(path, RoI1, height, channel=('R', 'G')):
# red channel of calibration point as array
dcal_R1 = dict()
dcal_R2 = dict()
dcal_R3 = dict()
# green channel (G1) of calibration point as array
dcal_G1 = dict()
dcal_G2 = dict()
dcal_G3 = dict()
for f in glob(path + '*_{}*.tif'.format(channel[0])):
fname_R = f.split('/')[-1].split('.')[0]
if 'gradient' in fname_R:
# green channel
fname_G = f.split(channel[0])[0] + '{}.tif'.format(channel[1])
# setting 1
if 'settings1' in fname_R:
if 'new' in f:
count = 'new-' + f.split('settings')[1].split('_')[0]
else:
count = f.split('settings')[1].split('_')[0]
# -----------------------------------
# store red channel as array
pic_R = Image.open(f)
imR_ = np.array(pic_R)
imR = dict()
for o in height.keys():
imarrayR = imR_[RoI1[o][0][1]:RoI1[o][1][1] + 1]
imR1_ = np.stack([imarrayR[n][RoI1[o][0][0]:RoI1[o][2][0] + 1] for n in np.arange(height[o] + 1)],
axis=0)
imR[o] = imR1_
# 2nd optode - combining red-channel images
dcal_R1[count] = imR
# -----------------------------------
# store green (G1) channel as array
pic_G = Image.open(fname_G)
imG_ = np.array(pic_G)
imG = dict()
for o in height.keys():
imarrayG = imG_[RoI1[o][0][1]:RoI1[o][1][1] + 1]
imG1_ = np.stack([imarrayG[n][RoI1[o][0][0]:RoI1[o][2][0] + 1] for n in np.arange(height[o] + 1)],
axis=0)
imG[o] = imG1_
# 2nd optode - combining red-channel images
dcal_G1[count] = imG
# setting 2
if 'settings2' in fname_R:
# -----------------------------------
# store red channel as array
pic_R = Image.open(f)
imR_ = np.array(pic_R)
imR = dict()
for o in height.keys():
imarrayR = imR_[RoI1[o][0][1]:RoI1[o][1][1] + 1]
imR1 = np.stack([imarrayR[n][RoI1[o][0][0]:RoI1[o][2][0] + 1] for n in np.arange(height[o] + 1)],
axis=0)
imR[o] = imR1
# 2nd optode - combining red-channel images
dcal_R2[count] = imR
# -----------------------------------
# store green (G1) channel as array
pic_G = Image.open(fname_G)
imG_ = np.array(pic_G)
imG = dict()
for o in height.keys():
imarrayG = imG_[RoI1[o][0][1]:RoI1[o][1][1] + 1]
imG1 = np.stack([imarrayG[n][RoI1[o][0][0]:RoI1[o][2][0] + 1] for n in np.arange(height[o] + 1)],
axis=0)
imG[o] = imG1
# 2nd optode - combining red-channel images
dcal_G2[count] = imG
# setting 3
if 'settings3' in fname_R:
# -----------------------------------
# store red channel as array
pic_R = Image.open(f)
imR_ = np.array(pic_R)
imR = dict()
for o in height.keys():
imarrayR = imR_[RoI1[o][0][1]:RoI1[o][1][1] + 1]
imR1 = np.stack([imarrayR[n][RoI1[o][0][0]:RoI1[o][2][0] + 1] for n in np.arange(height[o] + 1)],
axis=0)
imR[o] = imR1
# 2nd optode - combining red-channel images
dcal_R3[count] = imR
# -----------------------------------
# store green (G1) channel as array
pic_G = Image.open(fname_G)
imG_ = np.array(pic_G)
# 1st optode
imG = dict()
for o in height.keys():
imarrayG = imG_[RoI1[o][0][1]:RoI1[o][1][1] + 1]
imG1 = np.stack([imarrayG[n][RoI1[o][0][0]:RoI1[o][2][0] + 1] for n in np.arange(height[o] + 1)],
axis=0)
imG[o] = imG1
# 2nd optode - combining red-channel images
dcal_G3[count] = imG
# combine settings
dict_red = dict({'set1': dcal_R1, 'set2': dcal_R2, 'set3': dcal_R3})
dict_green = dict({'set1': dcal_G1, 'set2': dcal_G2, 'set3': dcal_G3})
return dict_red, dict_green
def read_microsensor(file_ms, encoding='latin-1'):
# initial inspection - how many runs, where to find data
ls_run = list()
with open(file_ms, 'r', encoding=encoding) as f:
for en, line in enumerate(f.readlines()):
if '****' in line:
ls_run.append(en)
ddata = dict()
l_data = list()
l_data1 = list()
with open(file_ms, 'r', encoding='latin-1') as f:
for en, line in enumerate(f.readlines()):
if ls_run[0] - 1 <= en <= ls_run[1] + 2:
if 'Date' in line:
date = line.split('\t')[1]
if len(ls_run) > 2:
if ls_run[1] + 3 <= en <= ls_run[2] - 2:
l = [i.replace(',', '.') for i in line.split('\t')]
l_data.append(l)
elif en >= ls_run[3] + 3:
l = [i.replace(',', '.') for i in line.split('\t')]
l_data1.append(l)
else:
if ls_run[1] + 3 <= en:
l = [i.replace(',', '.') for i in line.split('\t')]
l_data.append(l)
ddata['run1'] = l_data
ddata['run2'] = l_data1
# re-arrangement of data
dic_micro = dict()
for k in ddata.keys():
df_ = pd.DataFrame(ddata[k])
df_ = df_.T.set_index([0, 1]).T
df = df_.set_index('Time')
df_crop = df[df.columns[:2]]
unit = df_crop.columns.levels[1][-1]
df_crop.columns = ['Depth {}'.format(unit), 'Intensity (mV)']
# convert index into datetime format
index_time = [datetime.datetime.strptime(date + ' ' + t, '%d-%m-%Y %H:%M:%S') for t in df_crop.index]
df_crop.index = index_time
df_select = df_crop.astype(float)
dic_micro[k] = df_select
return dic_micro
# ====================================================================================================
def imageblur(kernel, kshape, dic_int, direction='horizontal'):
if kernel == 'blur':
if direction == 'horizontal':
dst = cv2.blur(dic_int, kshape)
elif direction == 'vertical':
dst = cv2.blur(dic_int.T, kshape).T
else:
raise ValueError('define direction of kernel as either horizontal or vertical')
elif kernel == 'filter':
kernel = np.ones(kshape, np.float32) / (kshape[0] * kshape[1])
if direction == 'horizontal':
dst = cv2.filter2D(dic_int, -1, kernel)
elif direction == 'vertical':
dst = cv2.filter2D(dic_int.T, -1, kernel).T
else:
raise ValueError('define direction of kernel as either horizontal or vertical')
elif kernel == 'gauss':
# sigmaX and sigmaY are set as 0 --> calculation from kernel
if direction == 'horizontal':
dst = cv2.GaussianBlur(dic_int, kshape, 0)
elif direction == 'vertical':
dst = cv2.GaussianBlur(dic_int.T, kshape, 0).T
else:
raise ValueError('define direction of kernel as either horizontal or vertical')
return dst
def savgol_smooth(dic_int, direction, window, polynom):
if direction == 'horizontal':
dst = [savgol_filter(i, window, polynom) for i in dic_int]
elif direction == 'vertical':
dst = np.transpose([savgol_filter(dic_int[:, i], window, polynom) for i in range(dic_int.shape[1])])
elif direction == 'square':
dst = sgolay2d(dic_int, window_size=window, order=polynom)
else:
raise ValueError('define direction of kernel as either horizontal or vertical')
return dst
def blurimage(o, s, kernel, kshape, dint, px2mm=None, surface=None, conversion=True):
# Depth profile with (horizontal, vertical, and square) Gaussian blur for one example
if kernel == 'savgol':
# vertical blur
dst_v = savgol_smooth(dic_int=dint[o][s], window=kshape[1], polynom=kshape[0], direction='vertical')
# horizontal blur
dst_h = savgol_smooth(dic_int=dint[o][s], window=kshape[1], polynom=kshape[0], direction='horizontal')
# square blur
dst = savgol_smooth(dic_int=dint[o][s], window=kshape[1], polynom=kshape[0], direction='square')
else:
# vertical blur
dst_v = imageblur(kernel=kernel, kshape=(1, kshape[0]), dic_int=dint[o][s], direction='horizontal')
# horizontal blur
dst_h = imageblur(kernel=kernel, kshape=(kshape[0], 1), dic_int=dint[o][s], direction='horizontal')
# square blur
dst = imageblur(kernel=kernel, kshape=kshape, dic_int=dint[o][s], direction='horizontal')
# combine all options in one dictionary
dimages = dict({'vertical': dst_v, 'horizontal': dst_h, 'square': dst})
# convert from px to mm
if conversion is True:
if px2mm is None or surface is None:
raise ValueError('all parameter for conversion of px to mm are requires. Provide px2mm, surface parameter.')
dimages = dict(map(lambda d: (d, px2mm_conversion(df=pd.DataFrame(dimages[d]), px2mm=px2mm,
surface=surface[int(o[-1])-1])), dimages.keys()))
return dimages
def blurimage_df(o, kernel, kshape, dint, inorm_uncer, px2mm=None, surface=None, conversion=True):
# split in mean and std
image_av = np.array(list(map(lambda u: [i.n for i in inorm_uncer[u]], range(len(inorm_uncer)))))
image_std = np.array(list(map(lambda u: [i.s for i in inorm_uncer[u]], range(len(inorm_uncer)))))
# Depth profile with (horizontal, vertical, and square) Gaussian blur for one example
if kernel == 'savgol':
# vertical blur
imgv_arr = savgol_smooth(dic_int=image_av, window=kshape[1], polynom=kshape[0], direction='vertical')
imgvSTD_arr = savgol_smooth(dic_int=image_std, window=kshape[1], polynom=kshape[0], direction='vertical')
# horizontal blur
imgh_arr = savgol_smooth(dic_int=image_av, window=kshape[1], polynom=kshape[0], direction='horizontal')
imghSTD_arr = savgol_smooth(dic_int=image_std, window=kshape[1], polynom=kshape[0], direction='horizontal')
# square blur
img_arr = savgol_smooth(dic_int=image_av, window=kshape[1], polynom=kshape[0], direction='square')
imgSTD_arr = savgol_smooth(dic_int=image_std, window=kshape[1], polynom=kshape[0], direction='square')
else:
# vertical blur
imgv_arr = imageblur(kernel=kernel, kshape=(1, kshape[0]), dic_int=np.array(image_av), direction='horizontal')
imgvSTD_arr = imageblur(kernel=kernel, kshape=(1, kshape[0]), dic_int=np.array(image_std),
direction='horizontal')
# horizontal blur
imgh_arr = imageblur(kernel=kernel, kshape=(kshape[0], 1), dic_int=np.array(image_av), direction='horizontal')
imghSTD_arr = imageblur(kernel=kernel, kshape=(kshape[0], 1), dic_int=np.array(image_std),
direction='horizontal')
# square blur
img_arr = imageblur(kernel=kernel, kshape=kshape, dic_int=np.array(image_av), direction='horizontal')
imgSTD_arr = imageblur(kernel=kernel, kshape=kshape, dic_int=np.array(image_std), direction='horizontal')
# combine all options in one dictionary
dst_v = pd.DataFrame(imgv_arr, index=np.arange(0, dint.shape[0]), columns=np.arange(0, dint.shape[1]))
dst_v_std = pd.DataFrame(imgvSTD_arr, index=np.arange(0, dint.shape[0]), columns=np.arange(0, dint.shape[1]))
dst_h = pd.DataFrame(imgh_arr, index=np.arange(0, dint.shape[0]), columns=np.arange(0, dint.shape[1]))
dst_h_std = pd.DataFrame(imghSTD_arr, index=np.arange(0, dint.shape[0]), columns=np.arange(0, dint.shape[1]))
dst = pd.DataFrame(img_arr, index=np.arange(0, dint.shape[0]), columns=np.arange(0, dint.shape[1]))
dst_std = pd.DataFrame(imgSTD_arr, index=np.arange(0, dint.shape[0]), columns=np.arange(0, dint.shape[1]))
dimages = dict({'vertical': dst_v, 'horizontal': dst_h, 'square': dst})
dimagesSTD = dict({'vertical': dst_v_std, 'horizontal': dst_h_std, 'square': dst_std})
# convert from px to mm
if conversion is True:
if px2mm is None or surface is None:
raise ValueError('all parameter for conversion of px to mm are requires. Provide px2mm, surface parameter.')
dimages = dict(map(lambda d: (d, px2mm_conversion(df=pd.DataFrame(dimages[d]), px2mm=px2mm,
surface=surface[int(o[-1]) - 1])), dimages.keys()))
dimagesSTD = dict(map(lambda d: (d, px2mm_conversion(df=pd.DataFrame(dimagesSTD[d]), px2mm=px2mm,
surface=surface[int(o[-1]) - 1])), dimages.keys()))
return dimages, dimagesSTD
def blur_normIntensity(dint, I0, kshape, kernel='gauss', px2mm=None, surface=None, o=None, conversion=True):
# determine normalized intensity including uncertainty
i0_mp = ufloat(I0[0], I0[1])
iratio_arr = unumpy.uarray(dint, np.array(np.zeros(shape=(dint.shape))))
inorm_uncer = iratio_arr / i0_mp
# ......................................................................................
# blur image
dimages, dimagesSTD = blurimage_df(o=o, kernel=kernel, kshape=kshape, dint=dint, inorm_uncer=inorm_uncer,
px2mm=px2mm, surface=surface, conversion=conversion)
return dimages, dimagesSTD
def O2blur_optode(inp, path_calib, kernel, kshape, px2mm, surface, depth_min, depth_max, dint_ch1, dint_ch2=None,
blur_pos='ratio'):
# preparation
o = inp.split(',')[0]
s = inp.split(',')[1].strip()
# load calibration
calib_info = load_calibration_para_v1(path_calib=path_calib)
para = calib_info[o][s]
# -------------------------------------------
# blur images
if blur_pos == 'norm':
dimages, dimagesSTD = blur_normIntensity(dint=dint_ch1[o][s], I0=para.loc['I0'].to_numpy(), kernel=kernel,
kshape=kshape, px2mm=px2mm, surface=surface, o=o, conversion=True)
else:
dblur_ch1 = blurimage(o=o, s=s, kernel=kernel, kshape=kshape, dint=dint_ch1, px2mm=px2mm, surface=surface)
if blur_pos == 'ratio':
dimages = dblur_ch1
elif blur_pos == 'single':
# blur individual color channels, then determine ratiometric intensity
dgreen_blur = blurimage(o=o, s=s, kernel=kernel, kshape=kshape, dint=dint_ch2, px2mm=px2mm, surface=surface)
dimages = dict(map(lambda ax: (ax, dblur_ch1[ax] / dgreen_blur[ax]), dblur_ch1.keys()))
else:
raise ValueError('select a valid argument for int_type. Chose either norm, ratio, or single:'
'norm ... blur normalized intensity'
'ratio ... blur ratiometric intensity'
'single ... blur individual color channels')
# crop to image frame of interest
dimg = dict(map(lambda d: (d, dimages[d].loc[depth_min:depth_max, :] if dimages[d].empty is False else None),
dimages.keys()))
if blur_pos == 'norm':
dimg_std = dict(map(lambda d: (d, dimagesSTD[d].loc[depth_min:depth_max, :] if dimagesSTD[d].empty is False else None), dimagesSTD.keys()))
else:
dimg_std = dict(map(lambda d: (d, None), dimages.keys()))
# -------------------------------------------
# determine O2 concentration
dO2_calc_ = dict()
for h in dimages.keys():
if dimages[h].empty is True:
pass
else:
dO2_calc = O2_analysis_area(para=para, iratio=dimg[h], iratio_std=dimg_std[h], int_type=blur_pos)
dO2_calc_[h] = dO2_calc
# split in mean and SD
dO2_optode = dict()
for d in dO2_calc_.keys():
dO2op_av = pd.concat(dict(map(lambda c:
(c, pd.DataFrame([i.n for i in dO2_calc_[d][c]], index=dO2_calc_[d].index)),
dO2_calc_[d].columns)), axis=1, ignore_index=True)
dO2op_av.columns = dO2_calc_[d].columns
dO2_optode[d] = dO2op_av
return dO2_optode
def postprocessing_v1(dO2_calc, px2mm, surface, split=True, vmin=-50, vmax=150):
print('post-processing...')
# remove obvious outlier (min_, max_) and convert px to mm
dO2 = postprocessing(O2_calc=dO2_calc, px2mm=px2mm, baseline=surface, min_=vmin, max_=vmax)
# split ufloat into mean and sd for visualization
if split is True:
dO2_av, dO2_SD = split2statics(dO2)
else:
dO2_av, dO2_SD = None, None
return dO2, dO2_av, dO2_SD
def postprocessing(px2mm, baseline, O2_calc, min_=-50, max_=150):
# convert array into dataframe
dfO2_calc = dict(map(lambda o: (o, dict(map(lambda s: (s, pd.DataFrame(O2_calc[o][s])), O2_calc[o].keys()))),
O2_calc.keys()))
# remove obvious outlier x< -50 or x > 1000 %air
for o in dfO2_calc.keys():
for s in dfO2_calc[o].keys():
# run 2 - new gradient
dfO2_calc[o][s][dfO2_calc[o][s] < min_] = np.nan
dfO2_calc[o][s][dfO2_calc[o][s] > max_] = np.nan
# convert px-> mm
for en, o in enumerate(dfO2_calc.keys()):
for s in dfO2_calc[o].keys():
# run1 - gradient
ind_new = dfO2_calc[o][s].index.to_numpy() / px2mm - baseline[en]
col_new = dfO2_calc[o][s].columns.to_numpy() / px2mm
# rename index and columns for optode1 - mean and SD
dfO2_calc[o][s].index = ind_new
dfO2_calc[o][s].columns = col_new
return dfO2_calc
# ----------------------------------------------------------
def O2concentration_lp(para, ls_lw, ddlp, ddlp_std=None, int_type='norm'):
# determine O2 concentration
if ddlp_std is None:
dO2 = dict(map(lambda d:
(d, dict(map(lambda lw_: (lw_, pd.concat([O2_analysis_area(para=para, int_type=int_type,
iratio=ddlp[d][lw_][c],
iratio_std=None)
for c in ddlp[d][lw_].columns],
axis=1).fillna(limit=5, method='ffill')
if ddlp[d][lw_] is not None else None), ddlp[d].keys()))), ddlp.keys()))
else:
dO2 = dict(map(lambda d:
(d, dict(map(lambda lw_: (lw_, pd.concat([O2_analysis_area(para=para, int_type=int_type,
iratio=ddlp[d][lw_][c],
iratio_std=ddlp_std[d][lw_][c])
for c in ddlp[d][lw_].columns],
axis=1).fillna(limit=5, method='ffill')
if ddlp[d][lw_] is not None else None), ddlp[d].keys()))), ddlp.keys()))
# averaging for mean and SD
dO2_depth = dict()
for d in dO2.keys():
ddO2_dp = dict()
for lw_ in ls_lw:
d_av, d_sd = dict(), dict()
if dO2[d][lw_] is not None:
for c in dO2[d][lw_].columns:
d_av_ = pd.DataFrame([i.n for i in dO2[d][lw_][c].to_numpy()], index=dO2[d][lw_].index,
columns=['mean'])
d_sd_ = pd.DataFrame([i.s for i in dO2[d][lw_][c].to_numpy()], index=dO2[d][lw_].index,
columns=['SD'])
d_av[c], d_sd[c] = d_av_, d_sd_
dO2_dp = pd.concat([pd.concat(d_av, axis=1, ignore_index=True).mean(axis=1),
pd.concat(d_sd, axis=1, ignore_index=True).mean(axis=1)], axis=1)
dO2_dp.columns = ['mean', 'SD']
ddO2_dp[lw_] = dO2_dp
else:
ddO2_dp[lw_] = None
dO2_depth[d] = ddO2_dp
return dO2_depth
def O2_lineprofile_compare_v1(inp, surface, kernel, kshape, px2mm, lp, ls_lw, path_calib, dint_ch1, dint_ch2=None,
blur_type='ratio'):
# preparation
o, s = inp.split(',')[0], inp.split(',')[1].strip()
# load calibration
calib_info = load_calibration_para_v1(path_calib=path_calib)
para = calib_info[o][s]
# blur images
dblur_ch1 = blurimage(o=o, s=s, kernel=kernel, kshape=kshape, dint=dint_ch1, px2mm=px2mm, surface=surface)
if blur_type == 'ratio':
dimages = dblur_ch1
else:
# blur individual color channels, then determine ratiometric intensity
dgreen_blur = blurimage(o=o, s=s, kernel=kernel, kshape=kshape, dint=dint_ch2, px2mm=px2mm, surface=surface)
dimages = dict(map(lambda ax: (ax, dblur_ch1[ax] / dgreen_blur[ax]), dblur_ch1.keys()))
# crop to image width of interest
ddlp = dict(map(lambda d: (d, dict(map(lambda lw_: (lw_, line_profile_v1(df=dimages[d], lw=lw_, lp=lp[0])), ls_lw))),
dimages.keys()))
# determine O2 concentration for line profile
dO2_lp = O2concentration_lp(para=para, ddlp=ddlp, ls_lw=ls_lw)
return dO2_lp
def O2_lineprofile_compare_v2(inp, surface, kernel, kshape, px2mm, lp, ls_lw, path_calib, dint_ch1, dint_ch2=None,
blur_pos='norm'):
# preparation
o = inp.split(',')[0]
s = inp.split(',')[1].strip()
# load calibration
calib_info = load_calibration_para_v1(path_calib=path_calib)
para = calib_info[o][s]
# blur images
if blur_pos == 'norm':
dimages, dimagesSTD = blur_normIntensity(dint=dint_ch1[o][s], I0=para.loc['I0'].to_numpy(), kernel=kernel,
kshape=kshape, px2mm=px2mm, surface=surface, o=o, conversion=True)
else:
dblur_ch1 = blurimage(o=o, s=s, kernel=kernel, kshape=kshape, dint=dint_ch1, px2mm=px2mm, surface=surface)
if blur_pos == 'ratio':
dimages = dblur_ch1
elif blur_pos == 'single':
# blur individual color channels, then determine ratiometric intensity
dgreen_blur = blurimage(o=o, s=s, kernel=kernel, kshape=kshape, dint=dint_ch2, px2mm=px2mm, surface=surface)
dimages = dict(map(lambda ax: (ax, dblur_ch1[ax] / dgreen_blur[ax]), dblur_ch1.keys()))
else:
raise ValueError('select a valid argument for int_type. Chose either norm, ratio, or single:'
'norm ... blur normalized intensity'
'ratio ... blur ratiometric intensity'
'single ... blur individual color channels')
# crop to image width of interest
ddlp = dict(map(lambda d: (d, dict(map(lambda lw_: (lw_, line_profile_v1(df=dimages[d], lw=lw_, lp=lp[0])),
ls_lw))), dimages.keys()))
if blur_pos == 'norm':
ddlp_std = dict(map(lambda d:
(d, dict(map(lambda lw_: (lw_, line_profile_v1(df=dimagesSTD[d], lw=lw_, lp=lp[0])),
ls_lw))), dimages.keys()))
else:
ddlp_std = None
# determine O2 concentration for line profile
dO2_lp = O2concentration_lp(para=para, ls_lw=ls_lw, ddlp=ddlp, ddlp_std=ddlp_std, int_type=blur_pos)
return dO2_lp
| 46.285194 | 148 | 0.543929 |
794754882dc82ce358425b0be0851743b5c8d2e5 | 5,601 | py | Python | front_end/handlers/ImportCourseHandler.py | zacheliason/CodeBuddy | 18694771ccbb74e6966e08f1247aadda2d5d06f7 | [
"MIT"
] | null | null | null | front_end/handlers/ImportCourseHandler.py | zacheliason/CodeBuddy | 18694771ccbb74e6966e08f1247aadda2d5d06f7 | [
"MIT"
] | 11 | 2020-06-12T19:13:12.000Z | 2021-08-28T23:47:57.000Z | front_end/handlers/ImportCourseHandler.py | zacheliason/CodeBuddy | 18694771ccbb74e6966e08f1247aadda2d5d06f7 | [
"MIT"
] | 3 | 2020-05-12T16:54:16.000Z | 2021-04-30T16:19:46.000Z | from BaseUserHandler import *
class ImportCourseHandler(BaseUserHandler):
def post(self):
try:
if not self.is_administrator():
self.render("permissions.html")
return
result = ""
if "zip_file" in self.request.files and self.request.files["zip_file"][0]["content_type"] == 'application/zip':
zip_file_name = self.request.files["zip_file"][0]["filename"]
zip_file_contents = self.request.files["zip_file"][0]["body"]
descriptor = zip_file_name.replace(".zip", "")
zip_data = BytesIO()
zip_data.write(zip_file_contents)
zip_file = zipfile.ZipFile(zip_data)
version = int(zip_file.read(f"{descriptor}/VERSION"))
course_list = json.loads(zip_file.read(f"{descriptor}/courses.json"))[0]
course_id = None
course_basics = self.content.get_course_basics(course_id)
self.content.specify_course_basics(course_basics, course_list[1], bool(course_list[3]))
# Check whether course already exists.
if self.content.has_duplicate_title(self.content.get_courses(), course_basics["id"], course_list[1]):
result = f"Error: A course with that title already exists."
else:
course_details = self.content.get_course_details(course_id)
self.content.specify_course_details(course_details, course_list[2], convert_string_to_date(course_list[4]), convert_string_to_date(course_list[5]))
self.content.save_course(course_basics, course_details)
assignment_id_dict = {}
assignment_lists = json.loads(zip_file.read(f"{descriptor}/assignments.json"))
for assignment_list in assignment_lists:
assignment_id = None
assignment_basics = self.content.get_assignment_basics(course_basics["id"], assignment_id)
assignment_details = self.content.get_assignment_details(course_basics["id"], assignment_id)
self.content.specify_assignment_basics(assignment_basics, assignment_list[2], bool(assignment_list[4]))
#self.content.specify_assignment_details(assignment_details, assignment_list[3], convert_string_to_date(assignment_list[5]), convert_string_to_date(assignment_list[6]))
self.content.save_assignment(assignment_basics, assignment_details)
assignment_id_dict[assignment_list[1]] = assignment_basics["id"]
exercise_lists = json.loads(zip_file.read(f"{descriptor}/exercises.json"))
for exercise_list in exercise_lists:
exercise_id = None
exercise_basics = self.content.get_exercise_basics(course_basics["id"], assignment_id_dict[exercise_list[1]], exercise_id)
exercise_details = self.content.get_exercise_details(course_basics["id"], assignment_id_dict[exercise_list[1]], exercise_id)
self.content.specify_exercise_basics(exercise_basics, exercise_list[3], bool(exercise_list[4]))
answer_code = exercise_list[5]
answer_description = exercise_list[6]
hint = ""
max_submissions = int(exercise_list[7])
credit = exercise_list[8]
data_files = exercise_list[9]
back_end = exercise_list[10]
instructions = exercise_list[12]
output_type = exercise_list[13]
show_answer = bool(exercise_list[14])
show_student_submissions = bool(exercise_list[15])
show_expected = bool(exercise_list[16])
show_test_code = bool(exercise_list[17])
starter_code = exercise_list[18]
test_code = exercise_list[19]
date_created = convert_string_to_date(exercise_list[20])
date_updated = convert_string_to_date(exercise_list[21])
expected_text_output = ""
expected_image_output = ""
if expected_output == "txt":
expected_text_output = exercise_list[13]
else:
expected_image_output = exercise_list[13]
self.content.specify_exercise_details(exercise_details, instructions, back_end, output_type, answer_code, answer_description, hint, max_submissions, starter_code, test_code, credit, data_files, show_expected, show_test_code, show_answer, expected_output, date_created, date_updated)
self.content.save_exercise(exercise_basics, exercise_details)
result = "Success: The course was imported!"
else:
result = "Error: The uploaded file was not recognized as a zip file."
self.render("profile_admin.html", page="admin", tab="import", admins=self.content.get_users_from_role(0, "administrator"), result=result, user_info=self.get_user_info(), is_administrator=self.is_administrator(), is_instructor=self.is_instructor(), is_assistant=self.is_assistant())
except Exception as inst:
render_error(self, traceback.format_exc())
| 60.880435 | 306 | 0.604178 |
794754db57466ac7cb8e0e539af2d04afe1a7dfc | 39,918 | py | Python | grid_set-2019.py | hheorton/geo_data_group | 7f62cda797d0d7a10c1578f162b35d7f484865ec | [
"MIT"
] | null | null | null | grid_set-2019.py | hheorton/geo_data_group | 7f62cda797d0d7a10c1578f162b35d7f484865ec | [
"MIT"
] | null | null | null | grid_set-2019.py | hheorton/geo_data_group | 7f62cda797d0d7a10c1578f162b35d7f484865ec | [
"MIT"
] | null | null | null | # here is the class that holds all the data days/months
# it has all the gridding scripts needed
# it will save load all the data/days/months as needed
import numpy as np
import pandas as pd
import datetime
import copy
from netCDF4 import Dataset
from numba import jit
from scipy import stats
import data_year as dy
from dateutil.relativedelta import relativedelta
from mpl_toolkits.basemap import Basemap
from scipy.interpolate import griddata
class grid_set:
# will make one of these at a time point (as a datetime) defined by timestart
def __init__(self,mplot):
self.mplot = mplot
self.proj = True
self.files = False
self.saved = False
self.grid = False
self.gridinfo = False
self.masked = False
self.data = False
# def import_regrid
# # takes another data, on a seperate lon/lat, and regrids into a data/day/month
# def import_regrid_nc
# def import_regrid_vec
# # takes vector input, a seperate lon/lat, and regrids into a data/day/month
# # makes use of rotate/regrid/rotate methods to get it all pretty
# def import_regrid_vec_nc
def set_proj(self,mplot):
# puts in a projection mplot too
# # make make sure you keep hold of regridding projs
self.mplot = mplot
self.proj = True
def set_grid_lon_lat(self,lons,lats):
# creates a grid depending on wanted resolution
if self.proj:
xpts, ypts = self.mplot(lons,lats)
self.lons = lons
self.lats = lats
self.xpts = xpts
self.ypts = ypts
self.dxRes = np.mean(np.diff(xpts[0,:]))
self.dyRes = np.mean(np.diff(ypts[:,0]))
self.n,self.m = np.shape(lons)
self.grid = True
print("Got a grid res = ",self.n," x ",self.m)
print("Note that all grid info is in ny x nx grids, whilst data is in nx x ny")
else: print("Projection not defined yet, do that first")
def get_ptp(self):
"""
Generates pts arrays for pcolor and pcolormesh - midpoitns for grid areas
"""
if self.grid:
# extend longitude by 2
xpt_pad = np.pad(self.xpts, ((0,0),(1,0)), 'edge')
ypt_pad = np.pad(self.ypts, ((1,0),(0,0)), 'edge')
self.xptp = xpt_pad[:,:-1]+0.5*(np.diff(xpt_pad,axis=1))
self.yptp = ypt_pad[:-1,:]+0.5*(np.diff(ypt_pad,axis=0))
def set_grid_dxy(self,dxRes,dyRes):
# creates a grid depending on wanted resolution
if self.proj:
nx = int((self.mplot.xmax-self.mplot.xmin)/dxRes)+1
ny = int((self.mplot.ymax-self.mplot.ymin)/dyRes)+1
lons, lats, xpts, ypts = self.mplot.makegrid(nx, ny, returnxy=True)
self.lons = lons
self.lats = lats
self.xpts = xpts
self.ypts = ypts
self.dxRes = dxRes
self.dyRes = dyRes
self.grid = True
self.m = nx
self.n = ny
print("Got a grid res = ",nx," x ",ny)
print("Note that all grid info is in ny x nx grids, whilst data is in nx x ny")
else: print("Projection not defined yet, do that first")
def set_grid_mn(self,nx,ny):
# creates a grid depending on wanted no. of points
if self.proj:
lons, lats, xpts, ypts = self.mplot.makegrid(nx, ny, returnxy=True)
self.lons = lons
self.lats = lats
self.xpts = xpts
self.ypts = ypts
self.grid = True
self.dxRes = (self.mplot.xmax-self.mplot.xmin)/(nx - 1)
self.dyRes = (self.mplot.ymax-self.mplot.ymin)/(ny - 1)
self.m = nx
self.n = ny
print("Got a grid res = ",nx," x ",ny)
else: print("Projection not defined yet, do that first")
def get_grid_info(self):
# creates a grid depending on wanted no. of points
# print( self.grid and (not self.gridinfo))
if self.grid and (not self.gridinfo):
#iterate over the grid to get dimensions and angles
# first iterate all x dimensions - m-1/n array
# then iterate all y dimensions - m/n-1 array
xdims = np.empty([self.n,self.m-1])
ydims = np.empty([self.n-1,self.m])
self.xdist = np.empty([self.n,self.m])
self.ydist = np.empty([self.n,self.m])
self.ang_c = np.empty([self.n,self.m])
self.ang_s = np.empty([self.n,self.m])
for i in range(self.m-1):
for j in range(self.n):
xdims[j,i] = ellipsoidal_distance(
self.lons[j,i],self.lats[j,i],
self.lons[j,i+1],self.lats[j,i+1],deg=True)
for i in range(self.m):
for j in range(self.n-1):
ydims[j,i] = ellipsoidal_distance(
self.lons[j,i],self.lats[j,i],
self.lons[j+1,i],self.lats[j+1,i],deg=True)
# then average the available distances i-1,i j-1,j
for i in range(self.m):
for j in range(self.n):
self.xdist[j,i] = np.nanmean(xdims[j,:i+1][-2:])
self.ydist[j,i] = np.nanmean(ydims[:j+1,i][-2:])
print("Grid distances calculated: ",np.nanmean(self.xdist)," x ",np.nanmean(self.ydist))
# then iterate all angles - this is all points plus the extra possible angles
# pad the lon lat arrays for iteration
lon_pad = np.pad(self.lons, (1,1), 'linear_ramp', end_values=(np.nan))
lat_pad = np.pad(self.lats, (1,1), 'linear_ramp', end_values=(np.nan))
for i in range(self.m):
for j in range(self.n):
# i + angle
xPlus_c,xPlus_s = lon_lat_angle(lon_pad[j+1,i+1],lat_pad[j+1,i+1],
lon_pad[j+1,i+2],lat_pad[j+1,i+2],
return_trig = True,deg=True)
xMins_c,xMins_s = lon_lat_angle(lon_pad[j+1,i+1],lat_pad[j+1,i+1],
lon_pad[j+1,i ],lat_pad[j+1,i ],
return_trig = True,deg=True)
yPlus_c,yPlus_s = lon_lat_angle(lon_pad[j+1,i+1],lat_pad[j+1,i+1],
lon_pad[j+2,i+1],lat_pad[j+2,i+1],
return_trig = True,deg=True)
yMins_c,yMins_s = lon_lat_angle(lon_pad[j+1,i+1],lat_pad[j+1,i+1],
lon_pad[j ,i+1],lat_pad[j ,i+1],
return_trig = True,deg=True)
# average all the components first checking the orientation
# if j == 20 and i ==12:
# print([xPlus_c,xMins_c,yPlus_c,yMins_c])
# print([xPlus_s,xMins_s,yPlus_s,yMins_s])
self.ang_c[j,i] = np.nanmean([-xPlus_s, xMins_s, yPlus_c,-yMins_c])
self.ang_s[j,i] = np.nanmean([ xPlus_c,-xMins_c, yPlus_s,-yMins_s])
print('Angles calculated')
self.gridinfo = True
else: print("Grid not defined yet, do that first")
def get_square_points(self):
"""
makes the xsq,ysq fields that will let you plot on a square grid
uses np.meshgrid to make location arrasy statring lower left at (0,0)
"""
self.xsq,self.ysq = np.meshgrid(np.linspace(0,1,self.m),np.linspace(0,1,self.n))
def check_angles(self,point=False,scale=1.0):
# return np.hpot of con/sin, min/max and mean
check_ang = np.hypot(self.ang_c,self.ang_s)**2
print('mean ='+str(np.nanmean(check_ang)))
print('max ='+str(np.nanmax(check_ang)))
print('min ='+str(np.nanmin(check_ang)))
# if a point is given return a vector to north and x positive
# so it can be plotted on projection
if type(point) == list:
# returns two normalised vectors
i = point[0]
j = point[1]
# line1 starts at point
# goes in direction to j+1 (+ve x)
xvec = self.xpts[i,j+1] - self.xpts[i,j]
yvec = self.ypts[i,j+1] - self.ypts[i,j]
# print(xvec,yvec)
# xrot = self.ang_c[i,j]*xvec - self.ang_s[i,j]*yvec
# yrot = self.ang_c[i,j]*yvec + self.ang_s[i,j]*xvec
xrot = -self.ang_c[i,j]*yvec + self.ang_s[i,j]*xvec
yrot = self.ang_c[i,j]*xvec + self.ang_s[i,j]*yvec
# print(xrot,yrot)
print(np.rad2deg(np.arctan2(self.ang_s[i,j],self.ang_c[i,j])))
Out1 = (self.xpts[i,j],self.ypts[i,j])
Out2 = (Out1[0] + xvec*scale,Out1[1] + yvec*scale)
Out3 = (Out1[0] + xrot*scale,Out1[1] + yrot*scale)
# return the list of x,y's needed for plot
return ([Out1[0],Out2[0]],
[Out1[1],Out2[1]]),([Out1[0],Out3[0]],
[Out1[1],Out3[1]])
# line2 starts at point
# goes in direction - j+1 plus rotation
def rotate_vectors_to_plot(self,xvec,yvec):
"""
utilises the ang_c and ang_s arrays along with the associated projection
"""
# ur,vr will be in lon/lat
ur = xvec*self.ang_c - yvec*self.ang_s
vr = yvec*self.ang_c + xvec*self.ang_s
urr,vrr = self.mplot.rotate_vector(ur,vr,self.lons,self.lats)
return urr,vrr
def save_grid(self,file):
if self.grid and self.gridinfo:
# save lat/lon pts
np.savez(file,
lats = self.lats,
lons = self.lons,
xpts = self.xpts,
ypts = self.ypts,
dxRes = self.dxRes,
dyRes = self.dyRes,
m = self.m,
n = self.n,
ang_c = self.ang_c,
ang_s = self.ang_s,
xdist = self.xdist,
ydist = self.ydist)
print("Grid saved in "+file)
else:
print("No grid to save - run get_grid_info")
def save_grid_nc(self,file,notes=''):
if self.grid and self.gridinfo:
# save lat/lon pts
NC_f = Dataset(file, 'w', format='NETCDF4')
NC_f.description = 'python grid_set grid file'+notes
# dimensions
NC_f.createDimension('time', None)
NC_f.createDimension('x', self.m)
NC_f.createDimension('y', self.n)
# variables
# time = NC_f.createVariable('time', 'f8', ('time',))
x = NC_f.createVariable('x', 'f4', ('x',))
y = NC_f.createVariable('y', 'f4', ('y',))
lons = NC_f.createVariable('lons', 'f8', ('y', 'x',))
lats = NC_f.createVariable('lats', 'f8', ('y', 'x',))
xpts = NC_f.createVariable('xpts', 'f8', ('y', 'x',))
ypts = NC_f.createVariable('ypts', 'f8', ('y', 'x',))
ang_c = NC_f.createVariable('ang_c', 'f8',('y', 'x',))
ang_s = NC_f.createVariable('ang_s', 'f8',('y', 'x',))
xdist = NC_f.createVariable('xdist', 'f8',('y', 'x',))
ydist = NC_f.createVariable('ydist', 'f8',('y', 'x',))
NC_f.setncattr_string('dxRes',self.dxRes)
NC_f.setncattr_string('dyRes',self.dyRes)
x[:] =self.xpts[0,:]
y[:] =self.ypts[:,0]
lons[:,:] = self.lons
lats[:,:] = self.lats
xpts[:,:] = self.xpts
ypts[:,:] = self.ypts
ang_c[:,:] = self.ang_c
ang_s[:,:] = self.ang_s
xdist[:,:] = self.xdist
ydist[:,:] = self.ydist
NC_f.close()
# np.savez(file,
# lats = self.lats,
# lons = self.lons,
# xpts = self.xpts,
# ypts = self.ypts,
# dxRes = self.dxRes,
# dyRes = self.dyRes,
# m = self.m,
# n = self.n,
# ang_c = self.ang_c,
# ang_s = self.ang_s,
# xdist = self.xdist,
# ydist = self.ydist)
print("Grid saved in "+file)
else:
print("No grid to save - run get_grid_info")
def load_grid(self,file):
with np.load(file) as npzfile:
self.lats = npzfile["lats"]
self.lons = npzfile["lons"]
self.xpts = npzfile["xpts"]
self.ypts = npzfile["ypts"]
self.dxRes = npzfile["dxRes"]
self.dyRes = npzfile["dyRes"]
self.m = npzfile["m"][()]
self.n = npzfile["n"][()]
self.ang_c = npzfile["ang_c"]
self.ang_s = npzfile["ang_s"]
self.xdist = npzfile["xdist"]
self.ydist = npzfile["ydist"]
self.grid = True
self.gridinfo = True
self.get_ptp()
print("Loaded a grid: "+file)
def check_grid(self):
# makes sure the projection and loaded grid are consistent
if self.proj and self.grid and self.gridinfo:
proj_dim = self.mplot.xmax - self.mplot.xmin
proj_dim = proj_dim/self.m
print("Projection av xdim = ",proj_dim)
print("dxRes = ",self.dxRes)
print("xdist av = ",np.mean(self.xdist))
def get_grid_mask(self,inflate = 0.0):
# makes a land mask for each point then inflates by a distance m
# makes a land mask for each point then inflates by a distance m
if self.masked:
print("Already masked, do it again? set mask = False first")
else:
self.mask = np.ones([self.m,self.n])
for i in range(self.m):
for j in range(self.n):
if self.mplot.is_land(self.xpts[j,i],self.ypts[j,i]):
self.mask[i,j] = np.nan
inf_mask = np.ones([self.m,self.n])
if (inflate>0.0) and self.gridinfo:
self.mask_inflate = inflate
for i in range(self.m):
for j in range(self.n):
if np.isnan(self.mask[i,j]):
inf_p = int(inflate/np.hypot(self.xdist[j,i],self.ydist[j,i]))
inf_mask[i-inf_p:i+inf_p+1,j-inf_p:j+inf_p+1] = np.nan
self.mask = inf_mask
elif self.gridinfo:
self.mask_inflate = inflate
self.masked = True
def inflate_mask(self,inflate = 0.0):
# makes a land mask for each point then inflates by a distance m
# makes a land mask for each point then inflates by a distance m
if self.masked and self.gridinfo:
inf_mask = np.ones([self.m,self.n])
if (inflate>0.0) and self.gridinfo:
self.mask_inflate = inflate
for i in range(self.m):
for j in range(self.n):
if np.isnan(self.mask[i,j]):
inf_p = int(inflate/np.hypot(self.xdist[j,i],self.ydist[j,i]))
inf_mask[i-inf_p:i+inf_p+1,j-inf_p:j+inf_p+1] = np.nan
self.mask = inf_mask
elif self.gridinfo:
self.mask_inflate = inflate
else:
print("Not masked so can't inflate")
def mask_point(self,lon,lat,inflate = 0):
y,x = np.unravel_index(np.argmin(
np.abs(self.lons - lon) +
np.abs(self.lats - lat)),
np.shape(self.lons))
if (inflate>0.0) and self.gridinfo:
inf_p = int(inflate/np.hypot(self.xdist[y,x],self.ydist[y,x]))
self.mask[x-inf_p:x+inf_p+1,y-inf_p:y+inf_p+1] = np.nan
else:
self.mask[x,y] = np.nan
def save_mask(self,file):
if self.masked:
# save lat/lon pts
np.savez(file,
mask = self.mask,
mask_inflate = self.mask_inflate,
m = self.m,
n = self.n)
print("Mask saved in "+file)
else:
print("No mask to save - run get_grid_mask")
def load_mask(self,file):
if self.masked:
print("Masked already!")
elif self.gridinfo:
# save lat/lon pts
with np.load(file) as npzfile:
self.mask = npzfile["mask"]
self.mask_inflate = npzfile["mask_inflate"]
m_check = npzfile["m"]
n_check = npzfile["n"]
if (m_check == self.m)&(n_check == self.n):
print("Loaded mask, ",m_check," x ",n_check," inflated by ",self.mask_inflate)
self.masked = True
else:
print("Gird and mask dimensins inconsistent, check them")
print("Mask",m_check," x ",n_check," Grid, ",self.m," x ",self.n)
def generate_mask_lonlat(self,lon_r,lat_r,add_mask = True,out='bool'):
"""
give a lon_r = [l1,l2] range of lons to keep within the mask
give a lat_r = [l1,l2] range of lats to keep within the mask
makes a np array that keeps the given range unmaksed.
add_mask keeps the new mask true to the original GS mask, ie, keeps a land mask
out = 'bool' makes the out array a logical, T = unmaskes, F = masked
out = 'float' makes the out array a float, 1.0 unmasked, np.nan = masked
"""
new_mask = np.ones_like(self.mask)
new_mask[self.lats.T<lat_r[0]] =np.nan
new_mask[self.lats.T>lat_r[1]] =np.nan
new_mask[self.lons.T<lon_r[0]] =np.nan
new_mask[self.lons.T>lon_r[1]] =np.nan
if add_mask:
new_mask[np.isnan(self.mask)] =np.nan
if out == 'Float':
return new_mask
elif out == 'bool':
out_mask = np.ones_like(self.mask,dtype=bool)
out_mask[np.isnan(new_mask)] = False
return out_mask
def read_nc_single(ncfile,grid_set,lonlatk,valk,fill_lonlat = False):
"""
# read and grids, then regrids a single data slice netcdf
# data array
# slightly flexible,
# lonlatk = ['x','y'], say, gives the variable names for lon/lat
# valkk = ['data'] gives the variable name for the data you want
# fill_lonlat = True, allows the code to deal with an even lon/lat grid
# where the lon lat dimensions are given by a single dimension array
"""
data_nc = Dataset(ncfile)
lons = data_nc.variables[lonlatk[0]][:]
lats = data_nc.variables[lonlatk[1]][:]
d_array = data_nc.variables[valk[0]][:]
# if the lat_lons need filling - do it
if fill_lonlat:
lon_a,lat_a = np.meshgrid(lons,lats)
else:
lon_a = lons
lat_a = lats
# regrid depending upon m and grid
x_nc, y_nc = grid_set.mplot(lon_a.data, lat_a.data)
new_d_array = griddata((x_nc[~d_array.mask].ravel(), y_nc[~d_array.mask].ravel()),
d_array[~d_array.mask].ravel(), (grid_set.xpts, grid_set.ypts),
method='linear')
return new_d_array
def geo_gradient(array,grid_set):
"""
gradient function that will take the grid info from the
grid_set type class to get gradients
the array has to be consistent with the grid set class so it can access the x/ydist parameters
"""
# check if grid_set has grid info
if not grid_set.gridinfo:
print("No grid_set geo grid info - no result")
return False, False
in_mn = np.shape(array)
if in_mn[0]!=grid_set.m or in_mn[1]!=grid_set.n :
print("input array or geo grid_set not consistently shaped")
return False, False
else:
out_Dax = np.empty_like(array)
out_Day = np.empty_like(array)
# np gradient can't do an uneven array
# so we iterate along the columns, then the rows
# taking the gradient eachmtime
# 1 . columns
for i in range(grid_set.m):
temp_space = [np.sum(grid_set.ydist[0:j+1,i])
for j in range(grid_set.n)]
out_Day[i,:] = np.gradient(
array[i,:],temp_space)
# 2 . rows
for j in range(grid_set.n):
temp_space = [np.sum(grid_set.xdist[j,0:i+1])
for i in range(grid_set.m)]
out_Dax[:,j] = np.gradient(
array[:,j],temp_space)
return out_Dax,out_Day
def geo_curl(u,v,grid_set):
"""
curl function that will take the grid info from the
grid_set type class to get gradients
the array has to be consistent with the grid set class so it can access the x/ydist parameters
"""
# check if grid_set has grid info
if not grid_set.gridinfo:
print("No grid_set geo grid info - no result")
return False
in_mn = np.shape(u)
if in_mn[0]!=grid_set.m or in_mn[1]!=grid_set.n :
print("input array or geo grid_set not consistently shaped")
return False
else:
Dvdx = geo_gradient(v,grid_set)[1]
Dudy = geo_gradient(u,grid_set)[0]
zeta = Dvdx - Dudy
return zeta
def de_ripple(array1,array2,rip_filt_std = 1,filt_ring_sig = 5,force_zero = False):
from scipy.ndimage.filters import gaussian_filter
# find the ripples by subtracting the arrays
ripples = array1 - array2
# fast fourier transform the difference
rip_spec = np.fft.fft2(np.double(ripples))
rip_spec2 = np.fft.fftshift(rip_spec)
# find the ring sprectrum the contains the ripples
filt_ring = np.ones_like(array1)
spec_r = np.mean(rip_spec2) + rip_filt_std*np.std(rip_spec2)
filt_ring[rip_spec2>spec_r] = 0.0
filt_ring = gaussian_filter(filt_ring,sigma = filt_ring_sig)
if not type(force_zero) == bool:
filt_ring[rip_spec2>spec_r] = filt_ring[rip_spec2>spec_r]*force_zero
# use this filter ring to remove the array1 fft spectrum
a1_spec = np.fft.fft2(np.double(array1))
a1_spec2 = np.fft.fftshift(a1_spec)
a1_spec2 = a1_spec2*filt_ring
back = np.real(np.fft.ifft2(np.fft.ifftshift(a1_spec2)))
return back
def geo_filter(array,grid_set,distance,mask = False):
from scipy.ndimage.filters import gaussian_filter
"""
filter function that will take the grid info from the
grid_set type class to get filter distances
the array has to be consistent with the grid set class so it can access the x/ydist parameters
"""
# takes the DOT and filters out the geoid harmonics
# hopefully can implement variable gradient using
# grid info
# can dx/dyres if needed
# check if grid_set has grid info
if type(mask)==bool:
if mask:
mask = grid_set.mask
else:
mask = np.ones_like(array)
elif (np.shape(mask)[0] != grid_set.m
|np.shape(mask)[1] != grid_set.n):# check mask dimension)
print("Mask array incorrect shape, ignoring it")
mask = np.ones([grid_set.m,grid_set.n])
if not grid_set.gridinfo:
print("No grid_set geo grid info - no result")
return False
in_mn = np.shape(array)
if in_mn[0]!=grid_set.m or in_mn[1]!=grid_set.n :
print("input array or geo grid_set not consistently shaped")
return False
else:
V = np.empty_like(array)
W = np.empty_like(array)
out_array = np.empty_like(array)
f_sig =[distance/d for d in [grid_set.dxRes,grid_set.dyRes]] # some function of the radius given..
V[:,:]=array*mask
V[np.isnan(V)]=0
VV=gaussian_filter(V,sigma=f_sig)
W[:,:]=0*array+1
W = W*mask
W[np.isnan(W)]=0
WW=gaussian_filter(W,sigma=f_sig)
out_array[:,:]=VV/WW
out_array[np.isnan(array)] = np.nan
return out_array*mask
def geo_convolve(array,grid_set,distance,limits,mask = False,set_kernel = False):
from astropy.convolution import convolve, Gaussian2DKernel
"""
filter function that will take the grid info from the
grid_set type class to get filter distances
the array has to be consistent with the grid set class so it can access the x/ydist parameters
mask is the same size as the array
"""
# takes the DOT and filters out the geoid harmonics
# hopefully can implement variable gradient using
# grid info
# can dx/dyres if needed
# check if grid_set has grid info
if type(mask)==bool:
if mask:
mask = grid_set.mask
else:
mask = np.ones_like(array)
elif (np.shape(mask)[0] != grid_set.m
or np.shape(mask)[1] != grid_set.n):# check mask dimension)
print(np.shape(mask)[0],grid_set.m )
print(np.shape(mask)[1],grid_set.n )
print("Mask array incorrect shape, ignoring it")
mask = np.ones([grid_set.m,grid_set.n])
if not grid_set.gridinfo:
print("No grid_set geo grid info - no result")
return False
if type(set_kernel)==bool:
# set normal guassian kernel as a function of grid general dim
f_sig =np.mean([distance/d for d in [grid_set.dxRes,grid_set.dyRes]])
kernel = Gaussian2DKernel(f_sig)
else: kernel = set_kernel
in_mn = np.shape(array)
if in_mn[0]!=grid_set.m or in_mn[1]!=grid_set.n :
print("input array or geo grid_set not consistently shaped")
return False
else:
# some function of the radius given..
array_2 = copy.copy(array)
array_2[array<limits[0]] = np.nan
array_2[array>limits[1]] = np.nan
array_2[np.isnan(mask)] = np.nan
out_array = convolve(array_2,kernel,boundary = 'extend')
out_array[np.isnan(mask)] = np.nan
return out_array
# takes generic data and regrids it into a data_year
def regrid_data(data,dates,lons,lats,grid_set,periods,
fill_lonlat = False):
"""
makes a data year object, nicely regridded on D_Class grid
time dimension of data is default 0
currently setup to access list of lists, or arrays
first list access is the time point
retains old netcdf option to fill lat lon arrays from singular
axis arrays
otherwise lon/lat need to be of the same shape as the data time slice
periods is the number of time slices per year, ie. 12 for monthlies
"""
n_t = np.shape(data)[0]
new_d_array = np.empty([n_t,grid_set.m,grid_set.n])
# if the lat_lons need filling - do it
if fill_lonlat:
lon_a,lat_a = np.meshgrid(lons,lats)
else:
lon_a = lons
lat_a = lats
# regrid depending upon m and grid
x_d, y_d = grid_set.mplot(lon_a, lat_a)
for tt in range(n_t):
new_d_array[tt,:,:] = griddata((x_d.ravel(), y_d.ravel()),
data[tt][:].ravel(), (grid_set.xpts.T, grid_set.ypts.T),
method='linear')
return dy.data_year(new_d_array,dates,periods)
# takes generic data and regrids it into a data_year
def regrid_vectors(x,y,dates,lons,lats,grid_set,periods,
fill_lonlat = False,vector_angles = False):
"""
makes a vector data year object, nicely regridded on D_Class grid
time dimension of data is default 0
currently setup to access list of lists, or arrays
first list access is the time point
retains old netcdf option to fill lat lon arrays from singular
axis arrays
otherwise lon/lat need to be of the same shape as the data time slice
periods is the number of time slices per year, ie. 12 for monthlies
# the original vectors may need to be rotated back to be square to
# lon lat so they can be regridded
# if vector_angles = false then they are already square ie on an x/y frame sqaure to lon/lat
# otherwise vector_angles is the same shape as lon/lats etc
# and is angle positive from gridded data x/y to lon/lat
# ie positive rotational angle from local y positive dimension to true north
# so angle array is consistent to gridinfo method on a grid_set - so you can use that.
"""
n_t = np.shape(x)[0]
new_x_array = np.empty([n_t,grid_set.m,grid_set.n])
new_y_array = np.empty([n_t,grid_set.m,grid_set.n])
# if the lat_lons need filling - do it
if fill_lonlat:
lon_a,lat_a = np.meshgrid(lons,lats)
else:
lon_a = lons
lat_a = lats
if type(vector_angles) == bool:
orig_c = np.ones_like(lon_a)
orig_s = np.zeros_like(lon_a)
else:
orig_c = np.cos(np.deg2rad(vector_angles.T))
orig_s = np.sin(np.deg2rad(vector_angles.T))
# regrid depending upon mplot and grid
x_d, y_d = grid_set.mplot(lon_a, lat_a)
for tt in range(n_t):
# rotating back to lon lat
orig_x = x*orig_c - y*orig_s
orig_y = y*orig_c + x*orig_s
# regridding
temp_x = griddata((x_d.ravel(), y_d.ravel()),
orig_x.ravel(), (grid_set.xpts.T, grid_set.ypts.T),
method='linear')
temp_y = griddata((x_d.ravel(), y_d.ravel()),
orig_y.ravel(), (grid_set.xpts.T, grid_set.ypts.T),
method='linear')
# rotating to the new grid
new_x_array[tt] = temp_x*grid_set.ang_c.T - temp_y*grid_set.ang_s.T
new_y_array[tt] = temp_y*grid_set.ang_c.T + temp_x*grid_set.ang_s.T
return dy.vec_data_year(new_x_array,new_y_array,dates,periods)
@jit
def ellipsoidal_distance(long1, lat1, long2, lat2,deg=False,eps=1e-10):
"""
(long1, lat1, long2, lat2) all in radians
outputs a distance in m
"""
if np.isnan([long1, lat1, long2, lat2]).any():
return np.nan
else:
if deg:
long1 = np.deg2rad(long1)
lat1 = np.deg2rad(lat1)
long2 = np.deg2rad(long2)
lat2 = np.deg2rad(lat2)
a = 6378137.0 # equatorial radius in meters
f = 1/298.257223563 # ellipsoid flattening
b = (1 - f)*a
tolerance = eps # to stop iteration
phi1, phi2 = lat1, lat2
U1 = np.arctan((1-f)*np.tan(phi1))
U2 = np.arctan((1-f)*np.tan(phi2))
L1, L2 = long1, long2
L = L2 - L1
i = 0
lambda_old = L + 0
while True:
t = (np.cos(U2)*np.sin(lambda_old))**2
t += (np.cos(U1)*np.sin(U2) - np.sin(U1)*np.cos(U2)*np.cos(lambda_old))**2
sin_sigma = t**0.5
cos_sigma = np.sin(U1)*np.sin(U2) + np.cos(U1)*np.cos(U2)*np.cos(lambda_old)
sigma = np.arctan2(sin_sigma, cos_sigma)
sin_alpha = np.cos(U1)*np.cos(U2)*np.sin(lambda_old) / (sin_sigma)
cos_sq_alpha = 1 - sin_alpha**2
cos_2sigma_m = cos_sigma - 2*np.sin(U1)*np.sin(U2)/(cos_sq_alpha+1e-12)
C = f*cos_sq_alpha*(4 + f*(4-3*cos_sq_alpha))/16
t = sigma + C*sin_sigma*(cos_2sigma_m + C*cos_sigma*(-1 + 2*cos_2sigma_m**2))
lambda_new = L + (1 - C)*f*sin_alpha*t
if np.abs(lambda_new - lambda_old) <= tolerance:
break
elif i > 1000:
return np.nan
break
else:
lambda_old = lambda_new
i += 1
u2 = cos_sq_alpha*((a**2 - b**2)/b**2)
A = 1 + (u2/16384)*(4096 + u2*(-768+u2*(320 - 175*u2)))
B = (u2/1024)*(256 + u2*(-128 + u2*(74 - 47*u2)))
t = cos_2sigma_m + 0.25*B*(cos_sigma*(-1 + 2*cos_2sigma_m**2))
t -= (B/6)*cos_2sigma_m*(-3 + 4*sin_sigma**2)*(-3 + 4*cos_2sigma_m**2)
delta_sigma = B * sin_sigma * t
s = b*A*(sigma - delta_sigma)
return s
@jit
def lon_lat_angle( lon1,lat1,lon2,lat2,deg=False,return_trig = False ):
"""
#LAT_LON_ANGLE finds the geodesic angle from point 1 to point 2
#(lat lon in radians)
# This done by a series of axes rotations to get the 1st point at 0,0
# keeping the second the same relative distance apart. The roataion
# needed to get the 2nd point directly north of the 1st is the geodesic
# angle.
"""
if np.isnan([lon1,lat1,lon2,lat2]).any():
if return_trig: return np.nan, np.nan
else: return np.nan
else:
if deg:
lon1 = np.deg2rad(lon1)
lat1 = np.deg2rad(lat1)
lon2 = np.deg2rad(lon2)
lat2 = np.deg2rad(lat2)
C_lat=np.cos(lat2);
S_lat=np.sin(lat2);
C_lon=np.cos(lon2);
S_lon=np.sin(lon2);
C_1=np.cos(-lon1);
S_1=np.sin(-lon1);
C_2=np.cos(-lat1);
S_2=np.sin(-lat1);
A1=[[C_1, -S_1, 0],
[S_1, C_1, 0],
[0, 0, 1]]
A2=[[C_2, 0, -S_2],
[0, 1, 0 ],
[S_2, 0, C_2]]
Borig=[C_lat*C_lon,
C_lat*S_lon,
S_lat ];
B=np.matmul(A2,A1)
B=np.matmul(B,Borig)
# print(B)
if return_trig:
scale=np.hypot(B[1],B[2])
angle_sin=-B[1]/scale
angle_cos= B[2]/scale
return angle_cos, angle_sin
else:
angle=np.arctan2(-B[1],B[2])
return angle
def nearest_xy(lon,lat,grid_set):
x,y = np.unravel_index(np.argmin(
np.abs(grid_set.lons - lon) +
np.abs(grid_set.lats - lat)),
np.shape(grid_set.lons))
return x,y
def nearest_dy(lon,lat,t,gs,dy,tr = [0,0],box = [0,0],time_vec = False,space_array = False):
"""
give this a dy object and a gs object,
the nearest point to the supplied lon lat will be returned
tr is a time range option [time points previous, after]
if tr > 0 time_vec=True will return a rs/H/WAVES/SWH/swh_arrays/SWH_10vector of the time point, False is nanmean
box is a spatial range option [range in x, range in y]
if there is a box, space_array=True returns the whole box, False is nanmean
"""
y,x = nearest_xy(lon,lat,gs)
out_array = dy[t-tr[0]:t+tr[1]+1,x-box[0]:x+box[0]+1,y-box[1]:y+box[1]+1]
if time_vec and space_array:
return out_array
elif time_vec:
return np.nanmean(out_array,axis = (1,2))
elif space_array:
return np.nanmean(out_array,axis = 0)
else:
return np.nanmean(out_array)
def nearest_interp(lon,lat,t,gs,dy,tr = [0,0],time_vec = False):
"""
give this a dy object and a gs object,
the nearest point to the supplied lon lat will be returned
tr is a time range option [time points previous, after]
if tr > 0 time_vec=True will return a vector of the time point, False is nanmean
box is a spatial range option [range in x, range in y]
if there is a box, space_array=True returns the whole box, False is nanmean
"""
x,y = nearest_xy(lon,lat,gs)
# find 4 point weighting via x,y or lon,lat?
xpts,ypts = gs.mplot(lon,lat)
if xpts<gs.xpts[x,y]:
try:
xw = 1.0 - (gs.xpts[x,y]-xpts)/(gs.xpts[x,y] - gs.xpts[x-1,y])
except IndexError:
xw = 1.0
else:
try:
xw = 1.0 - (xpts - gs.xpts[x,y])/(gs.xpts[x+1,y] - gs.xpts[x,y])
except IndexError:
xw = 1.0
if ypts<gs.ypts[x,y]:
try:
yw = 1.0 - (gs.ypts[x,y]-ypts)/(gs.ypts[x,y] - gs.ypts[x-1,y])
except IndexError:
yw = 1.0
else:
try:
yw = 1.0 - (ypts - gs.ypts[x,y])/(gs.ypts[x+1,y] - gs.ypts[x,y])
except IndexError:
yw = 1.0
# print(sigi,sigw,tori,torw)
try:
pwf1 = xw * yw *gs[t-tr[0]:t+tr[1]+1,x ,y]
except IndexError:
pwf1 = np.nan
try:
pwf2 = (1.0-xw)* yw *gs[t-tr[0]:t+tr[1]+1,x+1,y]
except IndexError:
pwf2 = np.nan
try:
pwf3 = xw *(1.0-yw)*gs[t-tr[0]:t+tr[1]+1,x ,y+1]
except IndexError:
pwf3 = np.nan
try:
pwf4 = (1.0-xw)*(1.0-yw)*gs[t-tr[0]:t+tr[1]+1,x+1,y+1]
except IndexError:
pwf4 = np.nan
if time_vec:
return np.nansum([pwf1,pwf2,pwf3,pwf4],axis = 1)
else:
return np.nansum([pwf1,pwf2,pwf3,pwf4])
# out_array = dy[t-tr[0]:t+tr[1]+1,x-box[0]:x+box[0]+1,y-box[1]:y+box[1]+1]
# if time_vec and space_array:
# return out_array
# elif time_vec:
# return np.nanmean(out_array,axis = (1,2))
# elif space_array:
# return np.nanmean(out_array,axis = 0)
# else:
# return np.nanmean(out_array)
# def gen_from_lonlat():
# """
# Creates a lon/lat square grid from a 1d vector of each
# Returns a grid set with points defined
# """
class Gs2Gs:
"""
For regridding
give a new projection, this builds the internals of griddata, but for repeat usage
feed it two grid_sets and it'll be a function
regridding from one grid_set to the other
"""
def __init__(self,gs_native,gs_new,vectors = False):
"""
gs_native is the grid set the data is defined on
gs_new is where you want it to go
set vectors = True if you want to regrid vectors too
this will require all the correct angular grid info
"""
from scipy.spatial import Delaunay
# get the tri angulation
self.vectors = vectors
xorig,yorig = gs_new.mplot(gs_native.lons,gs_native.lats)
xyorig = np.vstack((xorig.ravel(),yorig.ravel())).T
self.tri = Delaunay(xyorig) # Compute the triangulation
# destination mesh
self.mesh_new = (gs_new.xpts.T,gs_new.ypts.T)
if vectors:
# record the neccesary angles to de-rotate the input vectors
# and re-rotate the output vectors
self.in_ang_c = gs_native.ang_c
self.in_ang_s = gs_native.ang_s
self.new_mplot = gs_new.mplot
self.new_lons = gs_new.lons
self.new_lats = gs_new.lats
def rg_array(self,arr):
"""
the regridding function
feed it the array defined on gs_native
out pops a new array on gs_new
"""
from scipy.interpolate import LinearNDInterpolator
# define the function
interpolator = LinearNDInterpolator(self.tri, arr.T.ravel())
return interpolator(self.mesh_new)
def rg_vecs(self,x,y):
"""
the regridding function
feed it the x,y comps defined on gs_native
out pops a new array on gs_new
"""
from scipy.interpolate import LinearNDInterpolator
if self.vectors:
# de-rotate the input vecs (back to lon lat square)
xr = x*self.in_ang_c - y*self.in_ang_s
yr = y*self.in_ang_c + x*self.in_ang_s
# define the function
interpolator = LinearNDInterpolator(self.tri, xr.T.ravel())
# use it
xrr = interpolator(self.mesh_new)
# define the function
interpolator = LinearNDInterpolator(self.tri, yr.T.ravel())
# use it
yrr = interpolator(self.mesh_new)
return self.new_mplot.rotate_vector(xrr,yrr,
self.new_lons.T,self.new_lats.T)
else:
print('Gs2Gs not defined for vectors, re-initialise')
def border(arr): # Input array : arr
alist=[arr[0,:-1], arr[:-1,-1], arr[-1,::-1], arr[-2:0:-1,0],[arr[0,0]]]
return np.concatenate(alist) | 38.71775 | 116 | 0.558044 |
79475504127da5cfe10c0a36ebefd305cbf63e99 | 6,965 | py | Python | chrome/test/functional/autotour.py | Gitman1989/chromium | 2b1cceae1075ef012fb225deec8b4c8bbe4bc897 | [
"BSD-3-Clause"
] | 2 | 2017-09-02T19:08:28.000Z | 2021-11-15T15:15:14.000Z | chrome/test/functional/autotour.py | meego-tablet-ux/meego-app-browser | 0f4ef17bd4b399c9c990a2f6ca939099495c2b9c | [
"BSD-3-Clause"
] | null | null | null | chrome/test/functional/autotour.py | meego-tablet-ux/meego-app-browser | 0f4ef17bd4b399c9c990a2f6ca939099495c2b9c | [
"BSD-3-Clause"
] | 1 | 2020-11-04T07:22:28.000Z | 2020-11-04T07:22:28.000Z | #!/usr/bin/python
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import random
import time
"""Autotour is a semi automatic exploratory framework for exploring actions
defined on an object. It uses decorators to mark methods as actions and an
explorer object to explore.
"""
def GodelAction(weight=1, requires=''):
"""Action Decorator
This function is the key to exploration. In effect, it annotates the wrapping
function with attributes such as is_action = True and sets some weights and
the requires condition. The explorer can invoke functions based on these
attributes.
Args:
weight: Weight for the action, default is set to 1. Can be any number >= 0
requires: Precondition for an action to be executed. This usually points
to a function which returns a boolean result.
"""
def custom(target):
"""This is the decorator which sets the attributes for
is_action, weight and requires.
Args:
target: Function to be decorated.
Returns:
The wrapped function with correct attributes set.
"""
def wrapper(self, *args, **kwargs):
target(self, *args, **kwargs)
wrapper.is_action = True
wrapper.weight = weight
wrapper.requires = requires
return wrapper
return custom
class Godel(object):
"""Base class for all exploratory objects.
All objects that wish to be explored must inherit this class.
It provides an important method GetActions, which looks at all the functions
and returns only those that have the is_action attribute set.
"""
def Initialize(self, uniqueid):
self._uniqueid = uniqueid
def GetName(self):
return type(self).__name__ + str(self._uniqueid)
def GetActions(self):
"""Gets all the actions for this class."""
return [method for method in dir(self)
if hasattr(getattr(self, method), 'is_action')]
def GetWeight(self, method):
"""Returns the weight of a given method.
Args:
method: Name of the Method whose Weight is queried
"""
method_obj = getattr(self, method)
return getattr(method_obj, 'weight', 1)
def SetWeight(self, method, weight):
"""Sets the weight for a given method."""
method_obj = getattr(self, method)
method_obj.im_func.weight = weight
class Explorer(object):
"""Explorer class that controls the exploration of the object.
This class has methods to add the exploration object and
initiate exploration on them.
"""
def __init__(self):
self._seed = time.time()
logging.info('#Seeded with %s' % self._seed)
random.seed(self._seed)
self._actionlimit = -1
self._godels = []
self._fh = logging.FileHandler(str(self._seed))
self._log = logging.getLogger()
self._log.addHandler(self._fh)
self._log.setLevel(logging.DEBUG)
self._uniqueid = 0
def NextId(self):
"""Gets the NextId by incrementing a counter."""
self._uniqueid = self._uniqueid + 1
return self._uniqueid
def Add(self, obj):
"""Adds an object which inherits from Godel to be explored.
Args:
obj: Object to be explored which usually inherits from the Godel class.
"""
uniqueid = self.NextId()
obj.Initialize(uniqueid)
name = type(obj).__name__
self._log.info('%s = %s()' % (name + str(uniqueid), name))
self._godels.append(obj)
def MeetsRequirement(self, godel, methodname):
"""Method that returns true if the method's precondition is satisfied.
It does so by using the attribute "Requires" which is set by the decorator
and invokes it which must return a boolean value
Args:
godel: Godel object on which the requirement needs to be tested.
methodname: Method name which needs to be called to test.
Returns:
True if the methodname invoked returned True or methodname was empty,
False otherwise
"""
method = getattr(godel, methodname)
requires = method.im_func.requires
if callable(requires):
return requires(godel)
else:
if len(requires) > 0:
precondition = getattr(godel, requires)
return precondition()
else:
return True
def GetAvailableActions(self):
"""Returns a list of only those actions that satisfy their preconditions"""
action_list = []
for godel in self._godels:
for action in godel.GetActions():
if self.MeetsRequirement(godel, action):
action_list.append([godel, action, godel.GetWeight(action)])
return action_list
def Choose(self, action_list):
"""Choosing function which allows to choose a method based on random
but weighted scale. So if one method has twice the weight, it is twice as
likely to be choosen than the other.
Args:
action_list: A list of Actions from which to choose.
Returns:
Chosen Action or None.
"""
total = sum([action_info[2] for action_info in action_list])
# Find a pivot value randomly from he total weight.
index = random.randint(0, total)
for action_info in action_list:
# Decrease the total weight by the current action weight.
total = total - action_info[2]
# If total has fallen below the pivot, then we select the current action
if total <= index:
return action_info;
return None
def Execute(self, action_info):
"""Executes the action and logs to console the action taken.
Args:
action_info: Action Info for the action to execute.
action_info[0] is the object on which the action is to be invoked.
action_info[1] is the name of the method which is to be invoked.
action_info[2] is the weight of the method.
"""
action = getattr(action_info[0], action_info[1])
self._log.info('%s.%s()' % (action_info[0].GetName(), action_info[1]))
action()
def Explore(self, function=None):
"""Sets the exploration in progress by repeatedly seeing if
any actions are available and if so continues to call them. It times out
after specified action limit.
Args:
function: A function which can be called to determine if the execution
should continue. This function is invoked after each step and
if it returns True, execution stops. This is useful in writing
tests which explore until a particular condition is met.
Returns:
True, if given |function| returns True, OR if no more action could be
chosen. False, otherwise.
"""
count = 0
while(True):
if self._actionlimit > 0 and count > self._actionlimit:
return False
action_list = self.GetAvailableActions()
action_info = self.Choose(action_list)
if action_info is None:
return function is None
self.Execute(action_info)
count = count + 1
if function is not None and function():
return True | 32.853774 | 79 | 0.684566 |
7947561265bb670f2ff10954a11d9de75a203cb6 | 5,194 | py | Python | GLMAutoNODE.py | BannanaDJoe/golem-node | e145bf94ee41b8241357c0505730c75e7be48593 | [
"MIT"
] | null | null | null | GLMAutoNODE.py | BannanaDJoe/golem-node | e145bf94ee41b8241357c0505730c75e7be48593 | [
"MIT"
] | null | null | null | GLMAutoNODE.py | BannanaDJoe/golem-node | e145bf94ee41b8241357c0505730c75e7be48593 | [
"MIT"
] | 1 | 2022-02-16T17:47:04.000Z | 2022-02-16T17:47:04.000Z | #!/usr/bin/env python3
import time
import platform
from typing import NewType
import shutil, errno
import urllib.request, json
import subprocess
class NodeCreator:
# Raise Amounts
NodeID = 0
RaiseAmount = 1.00
Multi = 1.15
Index = 1
CPU_Hour = 0.1
ENV_Hour = 0.02
StartFee = 0.0
def __init__(self):
type (self).NodeID +=1
if self.NodeID <= 1:
self.LOGO()
self.quest()
self.confNode()
self.Run()
self.changeRaise(self.RaiseAmount)
self.Looper()
else:
self.setRaise()
self.changer()
self.changeRaise(self.RaiseAmount)
self.Looper
@classmethod
def quest(cls):
cls.NodeName = str(input("Enter your Node Name: "))
cls.Wallet = str(input("Enter your Wallet Address: "))
cls.NODE_NUM = int(input("Enter Number of Nodes you want to Start: "))
cls.CPU = int(input("Enter your CPU amount to use: "))
cls.MEM = int(input("Enter Memory in GIB: "))
cls.DISK = int(input("Enter DISKSPACE in GIB: "))
cls.CPU_Hour = float(input("Enter CPU COSTS PER HOUR: "))
cls.ENV_Hour = float(input("Enter ENV COSTS PER HOUR: "))
cls.StartFee = float(input("ENTER PRICE FOR START JOBS: "))
print("Setup Price Raise Amount: set it like this: |15% = 1.15|30% = 1.30| ")
print("Leave it Empty if you want the !!!SAME PRICE ON ALL NODES!!")
cls.Multi = float(input("Enter the Multiplikator you want: "))
@classmethod
def confNode(self):
file = open('.env', 'w')
#Mainnet Settings
file.write("#This file contains all settings change to your needs")
file.write("\n")
file.write("\nYA_PAYMENT_NETWORK=mainnet")
file.write("\nNODE_SUBNET=public-beta")
#NODE Settings
file.write("\n")
file.write("\n# run settings")
file.write("\nNODE_NAME=" + str(self.NodeName))
file.write("\nYA_ACCOUNT="+ str(self.Wallet))
file.write("\nNODE_CPU_THREADS=" + str(self.CPU))
file.write("\nNODE_MEM_GIB=" + str(self.MEM))
file.write("\nNODE_STORAGE_GIB=" + str(self.DISK))
file.write("\nNODE_COSTS_CPU_HOUR=" + str(self.CPU_Hour))
file.write("\nNODE_COSTS_HOUR=" + str(self.ENV_Hour))
file.write("\nNODE_COSTS_START=" + str(self.StartFee))
# Run Settings
file.write("\n")
file.write("\nNODE_NUM=" + str(self.NODE_NUM))
file.write("\nNICENESS=20\n")
# AutoHeal Settings
file.write("AUTOHEAL_CONTAINER_LABEL=all\n")
file.write("AUTOHEAL_START_PERIOD=300\n")
file.write("AUTOHEAL_INTERVAL=5\n")
# Docker-Compose Name Settings
file.write("\nCOMPOSE_PROJECT_NAME=" + str(self.NodeName))
# Uncoment for Debug LOG on discord @Philip_golem
#file.write("RUST_LOG=debug\n")
#file.write("ya_market=info\n")
#file.write("trust_dns_proto=info\n")
#file.write("trust_dns_resolver=info\n")
#file.write("a_sb_router=trace\n")
#file.write("ya_net=trace\n")
#file.write("ya_service_bus=trace\n")
file.close()
def Looper(self):
while self.NodeID < self.NODE_NUM:
main()
else:
pass
def changeRaise(self, Multi):
type(self).RaiseAmount *=self.Multi
type(self).CPU_Hour *=self.Multi
type(self).ENV_Hour *=self.Multi
type(self).StartFee *=self.Multi
def setRaise(self, RaiseAmount=RaiseAmount):
self.RaiseAmount = RaiseAmount
self.CPU_Hour = float(self.CPU_Hour * self.RaiseAmount)
self.ENV_Hour = float(self.ENV_Hour * self.RaiseAmount)
self.StartFee = float(self.StartFee * self.RaiseAmount)
@classmethod
def changer(cls):
index = "index=" + str(cls.NodeID)
cpuPrice = "cpuh=" + str(cls.CPU_Hour)
envPrice = "envh=" + str(cls.ENV_Hour)
startPrice = "sfee=" + str(cls.StartFee)
cpuh = subprocess.run(['make', 'cpuh', index , cpuPrice])
envh = subprocess.run(['make', 'envh', index , envPrice])
sfee = subprocess.run(['make', 'sfee', index , startPrice])
@classmethod
def Run(cls):
subprocess.run('make presets', shell=True)
subprocess.run('make upd', shell=True)
print("Nodes are Starting")
time.sleep(1)
print("Node Price changes will start soon")
time.sleep(2)
@classmethod
def LOGO(cls):
print(" **** welcome To GLMAutoNODE ****")
time.sleep(0.1)
print(" __ __ __ __ ")
time.sleep(0.1)
print("/ _ | |\/| /\ |_ _ |\ |/ \| \|_ ")
time.sleep(0.1)
print("\__)|__| |/--\|_||_(_)| \|\__/|__/|__ ")
def main():
create = NodeCreator()
# Comming soon auto change Price by averange price
# https://api.golemstats.com/v1/provider/average/earnings
# https://api.golemstats.com/v1/network/pricing/average
def worker():
pass
main()
| 31.865031 | 85 | 0.576242 |
7947569ed1e044416be4573b63a6890cd453732f | 6,551 | py | Python | configs/fcos/fcos_t_s_full_mask_finetune_halved_from_scratch_good_initial_backbone_adaption_constant_term_dynamic_weight_normalized_pyramid_attention_r50_caffe_fpn_gn_1x_4gpu_nvidia.py | Lanselott/mmdetection | 03ce0a87f4d52f4adf4f78fd39ad30b2da394376 | [
"Apache-2.0"
] | null | null | null | configs/fcos/fcos_t_s_full_mask_finetune_halved_from_scratch_good_initial_backbone_adaption_constant_term_dynamic_weight_normalized_pyramid_attention_r50_caffe_fpn_gn_1x_4gpu_nvidia.py | Lanselott/mmdetection | 03ce0a87f4d52f4adf4f78fd39ad30b2da394376 | [
"Apache-2.0"
] | null | null | null | configs/fcos/fcos_t_s_full_mask_finetune_halved_from_scratch_good_initial_backbone_adaption_constant_term_dynamic_weight_normalized_pyramid_attention_r50_caffe_fpn_gn_1x_4gpu_nvidia.py | Lanselott/mmdetection | 03ce0a87f4d52f4adf4f78fd39ad30b2da394376 | [
"Apache-2.0"
] | null | null | null | # model settings
BLOCK_ALIGN = False
PYRAMID_ALIGN = True
PRI_PYRAMID_ALIGN = False
HEAD_ALIGN = False
FREEZE_TEACHER = False
GOOD_INITIAL = True
BN_TOPK_SELECTION = False
RATIO = 2
model = dict(
type='FCOSTS',
pretrained='open-mmlab://resnet50_caffe',
backbone=dict(
type='ResTSNet',
depth=50,
s_depth=50,
t_s_ratio=RATIO,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
style='caffe',
pyramid_hint_loss=dict(type='MSELoss', loss_weight=1),
apply_block_wise_alignment=BLOCK_ALIGN,
freeze_teacher=FREEZE_TEACHER,
good_initial=GOOD_INITIAL,
feature_adaption=True,
conv_downsample=True,
constant_term=True,
bn_topk_selection=BN_TOPK_SELECTION,
),
neck=dict(
type='FPNTS',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
s_in_channels=[128, 256, 512, 1024],
s_out_channels=128,
start_level=1,
t_s_ratio=RATIO,
add_extra_convs=True,
extra_convs_on_inputs=False, # use P5
num_outs=5,
relu_before_extra_convs=True,
apply_block_wise_alignment=BLOCK_ALIGN,
freeze_teacher=FREEZE_TEACHER),
bbox_head=dict(
type='FCOSTSFullMaskHead',
num_classes=81,
in_channels=256,
s_in_channels=128,
stacked_convs=4,
feat_channels=256,
s_feat_channels=128,
t_s_ratio=RATIO,
training=True,
eval_student=False,
learn_when_train=True,
finetune_student=True,
apply_iou_similarity=False,
apply_soft_cls_distill=False,
apply_feature_alignment=False, # regression
norm_pyramid=True,
dynamic_weight=True,
multi_levels=5,
temperature=1,
align_level=0,
apply_block_wise_alignment=BLOCK_ALIGN,
apply_pyramid_wise_alignment=PYRAMID_ALIGN,
apply_pri_pyramid_wise_alignment=PRI_PYRAMID_ALIGN,
pyramid_wise_attention=True,
downgrade_bg=False,
pyramid_factor=1,
apply_head_wise_alignment=HEAD_ALIGN,
freeze_teacher=FREEZE_TEACHER,
block_teacher_attention=False,
pyramid_attention_only=False,
# student distillation params
beta=1.5,
gamma=2,
adap_distill_loss_weight=0.3,
strides=[8, 16, 32, 64, 128],
pyramid_hint_loss=dict(type='MSELoss', loss_weight=1),
reg_head_hint_loss=dict(type='MSELoss', loss_weight=1),
cls_head_hint_loss=dict(type='MSELoss', loss_weight=1),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='IoULoss', loss_weight=1.0),
# loss_s_t_cls=dict(
# type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
# loss_s_t_reg=dict(
# type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_s_t_cls=dict(type='MSELoss', loss_weight=5),
loss_s_t_reg=dict(type='MSELoss', loss_weight=5),
t_s_distance=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
reduction='none',
loss_weight=1.0),
# loss_iou_similiarity=dict(
# type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_iou_similiarity=dict(type='MSELoss', loss_weight=1.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)))
# training and testing settings
train_cfg = dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False)
test_cfg = dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_thr=0.5),
max_per_img=100)
# dataset settings
dataset_type = 'CocoDataset'
data_root = '/coco/data/2017/'
img_norm_cfg = dict(
mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=4,
workers_per_gpu=4,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'images/train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'images/val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'images/val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(
type='SGD',
lr=0.01,
momentum=0.9,
weight_decay=0.0001,
paramwise_options=dict(bias_lr_mult=2., bias_decay_mult=0.))
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='constant',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/fcos_r50_caffe_fpn_gn_1x_4gpu'
load_from = None #'./fcos_t_s_finetune_halved_student_from_scratch_epoch_12.pth'
resume_from = None
workflow = [('train', 1)]
| 31.956098 | 81 | 0.637918 |
794758baeb8e5a1859cb91140c19663379bbf66e | 3,596 | py | Python | application/invoke.py | alexeysp11/sdc-console-python | 237ec357b0883d0d033cd0f5609ac8a4f064f86f | [
"MIT"
] | null | null | null | application/invoke.py | alexeysp11/sdc-console-python | 237ec357b0883d0d033cd0f5609ac8a4f064f86f | [
"MIT"
] | null | null | null | application/invoke.py | alexeysp11/sdc-console-python | 237ec357b0883d0d033cd0f5609ac8a4f064f86f | [
"MIT"
] | null | null | null | import sys, traceback
sys.path.append('../console')
sys.path.append('../physical_models')
sys.path.append('../signal_processing')
sys.path.append('../control_algorithms')
from console.console import Console as console
from physical_models.car import Car
from signal_processing.spa import SignalProcessingAlgorithm as SPA
from control_algorithms.fuzzy_driver_miso import FuzzyDriverMiso
from control_algorithms.fuzzy_driver_siso import FuzzyDriverSiso
class Invoke:
"""
Calls Kalman Filter for GPS, IMU (that consists of GPS, gyro,
accelerometer) and Fuzzy controller.
"""
def imu(mode):
"""
Allows to invoke all required modules in order to execute IMT module
that consists of GPS, speedometer, accelerometer.
Takes `mode` as an input parameter that indicates car's motion pattern
(for example, constant position `p`, constant speed `v` or random
acceleration `a`).
"""
try:
car = Car()
dim = 2
# assign initial velocity for 1D.
velocity, is_accel = car.default_velocity(mode, dim=dim)
# ask user if default parameters should be used for modeling.
is_default = console.is_default(dimension=dim)
# get initial state of a car.
init_data = car.initialize(dimension=dim,
init_velocity=velocity,
mode=mode,
is_accel=is_accel,
is_default=is_default)
# invoke Kalman filter for processing IMU data.
spa = SPA()
spa.imu_kf(dimension=dim, init_data=init_data)
except Exception as e:
print('Exception: '.upper(), e)
traceback.print_tb(e.__traceback__)
init_data = None
def gps(mode):
"""
Allows to invoke all required modules in order to execute GPS module.
Takes `mode` as an input parameter that indicates car's motion pattern
(for example, constant position `p`, constant speed `v` or random
acceleration `a`).
"""
try:
car = Car()
dim = 2
# assign initial velocity.
velocity, is_accel = car.default_velocity(mode, dim=dim)
# ask user if default parameters should be used for modeling.
is_default = console.is_default(dimension=dim)
# get initial state of a car.
init_data = car.initialize(dimension=dim,
init_velocity=velocity,
mode=mode,
is_accel=is_accel,
is_default=is_default)
# invoke Kalman filter for processing GPS data.
spa = SPA()
spa.gps_kf(dimension=dim, init_data=init_data)
except Exception as e:
print('Exception: '.upper(), e)
traceback.print_tb(e.__traceback__)
init_data = None
def fuzzy():
try:
is_miso = console.is_miso_fuzzy()
if is_miso == True:
fc = FuzzyDriverMiso()
fc.call()
else:
fc = FuzzyDriverSiso()
fc.call()
except Exception as e:
print('Exception: '.upper(), e)
traceback.print_tb(e.__traceback__)
| 34.576923 | 78 | 0.544772 |
79475911ed637dbd4bca456efd23639718bbbb5b | 112 | py | Python | django_th/signals.py | Leopere/django-th | 86c999d16bcf30b6224206e5b40824309834ac8c | [
"BSD-3-Clause"
] | 1,069 | 2015-01-07T01:55:57.000Z | 2022-02-17T10:50:57.000Z | django_th/signals.py | barrygolden/django-th | 86c999d16bcf30b6224206e5b40824309834ac8c | [
"BSD-3-Clause"
] | 207 | 2015-01-06T21:41:17.000Z | 2018-02-20T14:10:15.000Z | django_th/signals.py | barrygolden/django-th | 86c999d16bcf30b6224206e5b40824309834ac8c | [
"BSD-3-Clause"
] | 117 | 2015-01-04T16:21:13.000Z | 2022-02-22T06:18:49.000Z | from django.dispatch import Signal
digest_event = Signal(providing_args=["user", "title", "link", "duration"])
| 28 | 75 | 0.741071 |
79475972a57a3b60a04ca4cfd92a07c3483f49cf | 26,713 | py | Python | mypy/util.py | evinism/mypy | d87d468ee80908a865c388a7743c84c1f5c917d7 | [
"PSF-2.0"
] | 2 | 2020-02-13T06:41:07.000Z | 2022-02-14T09:28:02.000Z | mypy/util.py | evinism/mypy | d87d468ee80908a865c388a7743c84c1f5c917d7 | [
"PSF-2.0"
] | 10 | 2021-06-16T20:48:32.000Z | 2021-10-04T18:22:02.000Z | mypy/util.py | evinism/mypy | d87d468ee80908a865c388a7743c84c1f5c917d7 | [
"PSF-2.0"
] | null | null | null | """Utility functions with no non-trivial dependencies."""
import os
import pathlib
import re
import subprocess
import sys
import hashlib
import io
import shutil
from typing import (
TypeVar, List, Tuple, Optional, Dict, Sequence, Iterable, Container, IO, Callable
)
from typing_extensions import Final, Type, Literal
try:
import curses
import _curses # noqa
CURSES_ENABLED = True
except ImportError:
CURSES_ENABLED = False
T = TypeVar('T')
ENCODING_RE = \
re.compile(br'([ \t\v]*#.*(\r\n?|\n))??[ \t\v]*#.*coding[:=][ \t]*([-\w.]+)') # type: Final
DEFAULT_SOURCE_OFFSET = 4 # type: Final
DEFAULT_COLUMNS = 80 # type: Final
# At least this number of columns will be shown on each side of
# error location when printing source code snippet.
MINIMUM_WIDTH = 20
# VT100 color code processing was added in Windows 10, but only the second major update,
# Threshold 2. Fortunately, everyone (even on LTSB, Long Term Support Branch) should
# have a version of Windows 10 newer than this. Note that Windows 8 and below are not
# supported, but are either going out of support, or make up only a few % of the market.
MINIMUM_WINDOWS_MAJOR_VT100 = 10
MINIMUM_WINDOWS_BUILD_VT100 = 10586
default_python2_interpreter = \
['python2', 'python', '/usr/bin/python', 'C:\\Python27\\python.exe'] # type: Final
def split_module_names(mod_name: str) -> List[str]:
"""Return the module and all parent module names.
So, if `mod_name` is 'a.b.c', this function will return
['a.b.c', 'a.b', and 'a'].
"""
out = [mod_name]
while '.' in mod_name:
mod_name = mod_name.rsplit('.', 1)[0]
out.append(mod_name)
return out
def module_prefix(modules: Iterable[str], target: str) -> Optional[str]:
result = split_target(modules, target)
if result is None:
return None
return result[0]
def split_target(modules: Iterable[str], target: str) -> Optional[Tuple[str, str]]:
remaining = [] # type: List[str]
while True:
if target in modules:
return target, '.'.join(remaining)
components = target.rsplit('.', 1)
if len(components) == 1:
return None
target = components[0]
remaining.insert(0, components[1])
def short_type(obj: object) -> str:
"""Return the last component of the type name of an object.
If obj is None, return 'nil'. For example, if obj is 1, return 'int'.
"""
if obj is None:
return 'nil'
t = str(type(obj))
return t.split('.')[-1].rstrip("'>")
def find_python_encoding(text: bytes, pyversion: Tuple[int, int]) -> Tuple[str, int]:
"""PEP-263 for detecting Python file encoding"""
result = ENCODING_RE.match(text)
if result:
line = 2 if result.group(1) else 1
encoding = result.group(3).decode('ascii')
# Handle some aliases that Python is happy to accept and that are used in the wild.
if encoding.startswith(('iso-latin-1-', 'latin-1-')) or encoding == 'iso-latin-1':
encoding = 'latin-1'
return encoding, line
else:
default_encoding = 'utf8' if pyversion[0] >= 3 else 'ascii'
return default_encoding, -1
class DecodeError(Exception):
"""Exception raised when a file cannot be decoded due to an unknown encoding type.
Essentially a wrapper for the LookupError raised by `bytearray.decode`
"""
def decode_python_encoding(source: bytes, pyversion: Tuple[int, int]) -> str:
"""Read the Python file with while obeying PEP-263 encoding detection.
Returns the source as a string.
"""
# check for BOM UTF-8 encoding and strip it out if present
if source.startswith(b'\xef\xbb\xbf'):
encoding = 'utf8'
source = source[3:]
else:
# look at first two lines and check if PEP-263 coding is present
encoding, _ = find_python_encoding(source, pyversion)
try:
source_text = source.decode(encoding)
except LookupError as lookuperr:
raise DecodeError(str(lookuperr)) from lookuperr
return source_text
def read_py_file(path: str, read: Callable[[str], bytes],
pyversion: Tuple[int, int]) -> Optional[List[str]]:
"""Try reading a Python file as list of source lines.
Return None if something goes wrong.
"""
try:
source = read(path)
except OSError:
return None
else:
try:
source_lines = decode_python_encoding(source, pyversion).splitlines()
except DecodeError:
return None
return source_lines
def trim_source_line(line: str, max_len: int, col: int, min_width: int) -> Tuple[str, int]:
"""Trim a line of source code to fit into max_len.
Show 'min_width' characters on each side of 'col' (an error location). If either
start or end is trimmed, this is indicated by adding '...' there.
A typical result looks like this:
...some_variable = function_to_call(one_arg, other_arg) or...
Return the trimmed string and the column offset to to adjust error location.
"""
if max_len < 2 * min_width + 1:
# In case the window is too tiny it is better to still show something.
max_len = 2 * min_width + 1
# Trivial case: line already fits in.
if len(line) <= max_len:
return line, 0
# If column is not too large so that there is still min_width after it,
# the line doesn't need to be trimmed at the start.
if col + min_width < max_len:
return line[:max_len] + '...', 0
# Otherwise, if the column is not too close to the end, trim both sides.
if col < len(line) - min_width - 1:
offset = col - max_len + min_width + 1
return '...' + line[offset:col + min_width + 1] + '...', offset - 3
# Finally, if the column is near the end, just trim the start.
return '...' + line[-max_len:], len(line) - max_len - 3
def get_mypy_comments(source: str) -> List[Tuple[int, str]]:
PREFIX = '# mypy: '
# Don't bother splitting up the lines unless we know it is useful
if PREFIX not in source:
return []
lines = source.split('\n')
results = []
for i, line in enumerate(lines):
if line.startswith(PREFIX):
results.append((i + 1, line[len(PREFIX):]))
return results
_python2_interpreter = None # type: Optional[str]
def try_find_python2_interpreter() -> Optional[str]:
global _python2_interpreter
if _python2_interpreter:
return _python2_interpreter
for interpreter in default_python2_interpreter:
try:
retcode = subprocess.Popen([
interpreter, '-c',
'import sys, typing; assert sys.version_info[:2] == (2, 7)'
]).wait()
if not retcode:
_python2_interpreter = interpreter
return interpreter
except OSError:
pass
return None
PASS_TEMPLATE = """<?xml version="1.0" encoding="utf-8"?>
<testsuite errors="0" failures="0" name="mypy" skips="0" tests="1" time="{time:.3f}">
<testcase classname="mypy" file="mypy" line="1" name="mypy-py{ver}-{platform}" time="{time:.3f}">
</testcase>
</testsuite>
""" # type: Final
FAIL_TEMPLATE = """<?xml version="1.0" encoding="utf-8"?>
<testsuite errors="0" failures="1" name="mypy" skips="0" tests="1" time="{time:.3f}">
<testcase classname="mypy" file="mypy" line="1" name="mypy-py{ver}-{platform}" time="{time:.3f}">
<failure message="mypy produced messages">{text}</failure>
</testcase>
</testsuite>
""" # type: Final
ERROR_TEMPLATE = """<?xml version="1.0" encoding="utf-8"?>
<testsuite errors="1" failures="0" name="mypy" skips="0" tests="1" time="{time:.3f}">
<testcase classname="mypy" file="mypy" line="1" name="mypy-py{ver}-{platform}" time="{time:.3f}">
<error message="mypy produced errors">{text}</error>
</testcase>
</testsuite>
""" # type: Final
def write_junit_xml(dt: float, serious: bool, messages: List[str], path: str,
version: str, platform: str) -> None:
from xml.sax.saxutils import escape
if not messages and not serious:
xml = PASS_TEMPLATE.format(time=dt, ver=version, platform=platform)
elif not serious:
xml = FAIL_TEMPLATE.format(text=escape('\n'.join(messages)), time=dt,
ver=version, platform=platform)
else:
xml = ERROR_TEMPLATE.format(text=escape('\n'.join(messages)), time=dt,
ver=version, platform=platform)
# checks for a directory structure in path and creates folders if needed
xml_dirs = os.path.dirname(os.path.abspath(path))
if not os.path.isdir(xml_dirs):
os.makedirs(xml_dirs)
with open(path, 'wb') as f:
f.write(xml.encode('utf-8'))
class IdMapper:
"""Generate integer ids for objects.
Unlike id(), these start from 0 and increment by 1, and ids won't
get reused across the life-time of IdMapper.
Assume objects don't redefine __eq__ or __hash__.
"""
def __init__(self) -> None:
self.id_map = {} # type: Dict[object, int]
self.next_id = 0
def id(self, o: object) -> int:
if o not in self.id_map:
self.id_map[o] = self.next_id
self.next_id += 1
return self.id_map[o]
def get_prefix(fullname: str) -> str:
"""Drop the final component of a qualified name (e.g. ('x.y' -> 'x')."""
return fullname.rsplit('.', 1)[0]
def get_top_two_prefixes(fullname: str) -> Tuple[str, str]:
"""Return one and two component prefixes of a fully qualified name.
Given 'a.b.c.d', return ('a', 'a.b').
If fullname has only one component, return (fullname, fullname).
"""
components = fullname.split('.', 3)
return components[0], '.'.join(components[:2])
def correct_relative_import(cur_mod_id: str,
relative: int,
target: str,
is_cur_package_init_file: bool) -> Tuple[str, bool]:
if relative == 0:
return target, True
parts = cur_mod_id.split(".")
rel = relative
if is_cur_package_init_file:
rel -= 1
ok = len(parts) >= rel
if rel != 0:
cur_mod_id = ".".join(parts[:-rel])
return cur_mod_id + (("." + target) if target else ""), ok
fields_cache = {} # type: Final[Dict[Type[object], List[str]]]
def get_class_descriptors(cls: 'Type[object]') -> Sequence[str]:
import inspect # Lazy import for minor startup speed win
# Maintain a cache of type -> attributes defined by descriptors in the class
# (that is, attributes from __slots__ and C extension classes)
if cls not in fields_cache:
members = inspect.getmembers(
cls,
lambda o: inspect.isgetsetdescriptor(o) or inspect.ismemberdescriptor(o))
fields_cache[cls] = [x for x, y in members if x != '__weakref__' and x != '__dict__']
return fields_cache[cls]
def replace_object_state(new: object, old: object, copy_dict: bool = False) -> None:
"""Copy state of old node to the new node.
This handles cases where there is __dict__ and/or attribute descriptors
(either from slots or because the type is defined in a C extension module).
Assume that both objects have the same __class__.
"""
if hasattr(old, '__dict__'):
if copy_dict:
new.__dict__ = dict(old.__dict__)
else:
new.__dict__ = old.__dict__
for attr in get_class_descriptors(old.__class__):
try:
if hasattr(old, attr):
setattr(new, attr, getattr(old, attr))
elif hasattr(new, attr):
delattr(new, attr)
# There is no way to distinguish getsetdescriptors that allow
# writes from ones that don't (I think?), so we just ignore
# AttributeErrors if we need to.
# TODO: What about getsetdescriptors that act like properties???
except AttributeError:
pass
def is_sub_path(path1: str, path2: str) -> bool:
"""Given two paths, return if path1 is a sub-path of path2."""
return pathlib.Path(path2) in pathlib.Path(path1).parents
def hard_exit(status: int = 0) -> None:
"""Kill the current process without fully cleaning up.
This can be quite a bit faster than a normal exit() since objects are not freed.
"""
sys.stdout.flush()
sys.stderr.flush()
os._exit(status)
def unmangle(name: str) -> str:
"""Remove internal suffixes from a short name."""
return name.rstrip("'")
def get_unique_redefinition_name(name: str, existing: Container[str]) -> str:
"""Get a simple redefinition name not present among existing.
For example, for name 'foo' we try 'foo-redefinition', 'foo-redefinition2',
'foo-redefinition3', etc. until we find one that is not in existing.
"""
r_name = name + '-redefinition'
if r_name not in existing:
return r_name
i = 2
while r_name + str(i) in existing:
i += 1
return r_name + str(i)
def check_python_version(program: str) -> None:
"""Report issues with the Python used to run mypy, dmypy, or stubgen"""
# Check for known bad Python versions.
if sys.version_info[:2] < (3, 5):
sys.exit("Running {name} with Python 3.4 or lower is not supported; "
"please upgrade to 3.5 or newer".format(name=program))
# this can be deleted once we drop support for 3.5
if sys.version_info[:3] == (3, 5, 0):
sys.exit("Running {name} with Python 3.5.0 is not supported; "
"please upgrade to 3.5.1 or newer".format(name=program))
def count_stats(errors: List[str]) -> Tuple[int, int]:
"""Count total number of errors and files in error list."""
errors = [e for e in errors if ': error:' in e]
files = {e.split(':')[0] for e in errors}
return len(errors), len(files)
def split_words(msg: str) -> List[str]:
"""Split line of text into words (but not within quoted groups)."""
next_word = ''
res = [] # type: List[str]
allow_break = True
for c in msg:
if c == ' ' and allow_break:
res.append(next_word)
next_word = ''
continue
if c == '"':
allow_break = not allow_break
next_word += c
res.append(next_word)
return res
def get_terminal_width() -> int:
"""Get current terminal width if possible, otherwise return the default one."""
return (int(os.getenv('MYPY_FORCE_TERMINAL_WIDTH', '0'))
or shutil.get_terminal_size().columns
or DEFAULT_COLUMNS)
def soft_wrap(msg: str, max_len: int, first_offset: int,
num_indent: int = 0) -> str:
"""Wrap a long error message into few lines.
Breaks will only happen between words, and never inside a quoted group
(to avoid breaking types such as "Union[int, str]"). The 'first_offset' is
the width before the start of first line.
Pad every next line with 'num_indent' spaces. Every line will be at most 'max_len'
characters, except if it is a single word or quoted group.
For example:
first_offset
------------------------
path/to/file: error: 58: Some very long error message
that needs to be split in separate lines.
"Long[Type, Names]" are never split.
^^^^--------------------------------------------------
num_indent max_len
"""
words = split_words(msg)
next_line = words.pop(0)
lines = [] # type: List[str]
while words:
next_word = words.pop(0)
max_line_len = max_len - num_indent if lines else max_len - first_offset
# Add 1 to account for space between words.
if len(next_line) + len(next_word) + 1 <= max_line_len:
next_line += ' ' + next_word
else:
lines.append(next_line)
next_line = next_word
lines.append(next_line)
padding = '\n' + ' ' * num_indent
return padding.join(lines)
def hash_digest(data: bytes) -> str:
"""Compute a hash digest of some data.
We use a cryptographic hash because we want a low probability of
accidental collision, but we don't really care about any of the
cryptographic properties.
"""
# Once we drop Python 3.5 support, we should consider using
# blake2b, which is faster.
return hashlib.sha256(data).hexdigest()
def parse_gray_color(cup: bytes) -> str:
"""Reproduce a gray color in ANSI escape sequence"""
set_color = ''.join([cup[:-1].decode(), 'm'])
gray = curses.tparm(set_color.encode('utf-8'), 1, 89).decode()
return gray
class FancyFormatter:
"""Apply color and bold font to terminal output.
This currently only works on Linux and Mac.
"""
def __init__(self, f_out: IO[str], f_err: IO[str], show_error_codes: bool) -> None:
self.show_error_codes = show_error_codes
# Check if we are in a human-facing terminal on a supported platform.
if sys.platform not in ('linux', 'darwin', 'win32'):
self.dummy_term = True
return
force_color = int(os.getenv('MYPY_FORCE_COLOR', '0'))
if not force_color and (not f_out.isatty() or not f_err.isatty()):
self.dummy_term = True
return
if sys.platform == 'win32':
self.dummy_term = not self.initialize_win_colors()
else:
self.dummy_term = not self.initialize_unix_colors()
if not self.dummy_term:
self.colors = {'red': self.RED, 'green': self.GREEN,
'blue': self.BLUE, 'yellow': self.YELLOW,
'none': ''}
def initialize_win_colors(self) -> bool:
"""Return True if initialization was successful and we can use colors, False otherwise"""
# Windows ANSI escape sequences are only supported on Threshold 2 and above.
# we check with an assert at runtime and an if check for mypy, as asserts do not
# yet narrow platform
assert sys.platform == 'win32'
if sys.platform == 'win32':
winver = sys.getwindowsversion()
if (winver.major < MINIMUM_WINDOWS_MAJOR_VT100
or winver.build < MINIMUM_WINDOWS_BUILD_VT100):
return False
import ctypes
kernel32 = ctypes.windll.kernel32
ENABLE_PROCESSED_OUTPUT = 0x1
ENABLE_WRAP_AT_EOL_OUTPUT = 0x2
ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x4
STD_OUTPUT_HANDLE = -11
kernel32.SetConsoleMode(kernel32.GetStdHandle(STD_OUTPUT_HANDLE),
ENABLE_PROCESSED_OUTPUT
| ENABLE_WRAP_AT_EOL_OUTPUT
| ENABLE_VIRTUAL_TERMINAL_PROCESSING)
self.BOLD = '\033[1m'
self.UNDER = '\033[4m'
self.BLUE = '\033[94m'
self.GREEN = '\033[92m'
self.RED = '\033[91m'
self.YELLOW = '\033[93m'
self.NORMAL = '\033[0m'
self.DIM = '\033[2m'
return True
return False
def initialize_unix_colors(self) -> bool:
"""Return True if initialization was successful and we can use colors, False otherwise"""
if not CURSES_ENABLED:
return False
try:
# setupterm wants a fd to potentially write an "initialization sequence".
# We override sys.stdout for the daemon API so if stdout doesn't have an fd,
# just give it /dev/null.
try:
fd = sys.stdout.fileno()
except io.UnsupportedOperation:
with open("/dev/null", "rb") as f:
curses.setupterm(fd=f.fileno())
else:
curses.setupterm(fd=fd)
except curses.error:
# Most likely terminfo not found.
return False
bold = curses.tigetstr('bold')
under = curses.tigetstr('smul')
set_color = curses.tigetstr('setaf')
set_eseq = curses.tigetstr('cup')
if not (bold and under and set_color and set_eseq):
return False
self.NORMAL = curses.tigetstr('sgr0').decode()
self.BOLD = bold.decode()
self.UNDER = under.decode()
self.DIM = parse_gray_color(set_eseq)
self.BLUE = curses.tparm(set_color, curses.COLOR_BLUE).decode()
self.GREEN = curses.tparm(set_color, curses.COLOR_GREEN).decode()
self.RED = curses.tparm(set_color, curses.COLOR_RED).decode()
self.YELLOW = curses.tparm(set_color, curses.COLOR_YELLOW).decode()
return True
def style(self, text: str, color: Literal['red', 'green', 'blue', 'yellow', 'none'],
bold: bool = False, underline: bool = False, dim: bool = False) -> str:
"""Apply simple color and style (underlined or bold)."""
if self.dummy_term:
return text
if bold:
start = self.BOLD
else:
start = ''
if underline:
start += self.UNDER
if dim:
start += self.DIM
return start + self.colors[color] + text + self.NORMAL
def fit_in_terminal(self, messages: List[str],
fixed_terminal_width: Optional[int] = None) -> List[str]:
"""Improve readability by wrapping error messages and trimming source code."""
width = fixed_terminal_width or get_terminal_width()
new_messages = messages.copy()
for i, error in enumerate(messages):
if ': error:' in error:
loc, msg = error.split('error:', maxsplit=1)
msg = soft_wrap(msg, width, first_offset=len(loc) + len('error: '))
new_messages[i] = loc + 'error:' + msg
if error.startswith(' ' * DEFAULT_SOURCE_OFFSET) and '^' not in error:
# TODO: detecting source code highlights through an indent can be surprising.
# Restore original error message and error location.
error = error[DEFAULT_SOURCE_OFFSET:]
column = messages[i+1].index('^') - DEFAULT_SOURCE_OFFSET
# Let source have some space also on the right side, plus 6
# to accommodate ... on each side.
max_len = width - DEFAULT_SOURCE_OFFSET - 6
source_line, offset = trim_source_line(error, max_len, column, MINIMUM_WIDTH)
new_messages[i] = ' ' * DEFAULT_SOURCE_OFFSET + source_line
# Also adjust the error marker position.
new_messages[i+1] = ' ' * (DEFAULT_SOURCE_OFFSET + column - offset) + '^'
return new_messages
def colorize(self, error: str) -> str:
"""Colorize an output line by highlighting the status and error code."""
if ': error:' in error:
loc, msg = error.split('error:', maxsplit=1)
if not self.show_error_codes:
return (loc + self.style('error:', 'red', bold=True) +
self.highlight_quote_groups(msg))
codepos = msg.rfind('[')
if codepos != -1:
code = msg[codepos:]
msg = msg[:codepos]
else:
code = "" # no error code specified
return (loc + self.style('error:', 'red', bold=True) +
self.highlight_quote_groups(msg) + self.style(code, 'yellow'))
elif ': note:' in error:
loc, msg = error.split('note:', maxsplit=1)
formatted = self.highlight_quote_groups(self.underline_link(msg))
return loc + self.style('note:', 'blue') + formatted
elif error.startswith(' ' * DEFAULT_SOURCE_OFFSET):
# TODO: detecting source code highlights through an indent can be surprising.
if '^' not in error:
return self.style(error, 'none', dim=True)
return self.style(error, 'red')
else:
return error
def highlight_quote_groups(self, msg: str) -> str:
"""Make groups quoted with double quotes bold (including quotes).
This is used to highlight types, attribute names etc.
"""
if msg.count('"') % 2:
# Broken error message, don't do any formatting.
return msg
parts = msg.split('"')
out = ''
for i, part in enumerate(parts):
if i % 2 == 0:
out += self.style(part, 'none')
else:
out += self.style('"' + part + '"', 'none', bold=True)
return out
def underline_link(self, note: str) -> str:
"""Underline a link in a note message (if any).
This assumes there is at most one link in the message.
"""
match = re.search(r'https?://\S*', note)
if not match:
return note
start = match.start()
end = match.end()
return (note[:start] +
self.style(note[start:end], 'none', underline=True) +
note[end:])
def format_success(self, n_sources: int, use_color: bool = True) -> str:
"""Format short summary in case of success.
n_sources is total number of files passed directly on command line,
i.e. excluding stubs and followed imports.
"""
msg = 'Success: no issues found in {}' \
' source file{}'.format(n_sources, 's' if n_sources != 1 else '')
if not use_color:
return msg
return self.style(msg, 'green', bold=True)
def format_error(
self, n_errors: int, n_files: int, n_sources: int, *,
blockers: bool = False, use_color: bool = True
) -> str:
"""Format a short summary in case of errors."""
msg = 'Found {} error{} in {} file{}'.format(
n_errors, 's' if n_errors != 1 else '',
n_files, 's' if n_files != 1 else ''
)
if blockers:
msg += ' (errors prevented further checking)'
else:
msg += ' (checked {} source file{})'.format(n_sources, 's' if n_sources != 1 else '')
if not use_color:
return msg
return self.style(msg, 'red', bold=True)
def is_typeshed_file(file: str) -> bool:
# gross, but no other clear way to tell
return 'typeshed' in os.path.abspath(file).split(os.sep)
def is_stub_package_file(file: str) -> bool:
# Use hacky heuristics to check whether file is part of a PEP 561 stub package.
if not file.endswith('.pyi'):
return False
return any(component.endswith('-stubs')
for component in os.path.abspath(file).split(os.sep))
| 36.845517 | 99 | 0.603826 |
79475a09111aafbde7b22e5693ad8680d865e968 | 1,364 | py | Python | setup.py | lucag/sparrow | e6af3f4b5d8d0aa68a1f67d61bb1daadc218a9ba | [
"BSD-3-Clause"
] | null | null | null | setup.py | lucag/sparrow | e6af3f4b5d8d0aa68a1f67d61bb1daadc218a9ba | [
"BSD-3-Clause"
] | 2 | 2020-05-29T00:16:27.000Z | 2021-03-31T19:45:40.000Z | setup.py | lucag/sparrow | e6af3f4b5d8d0aa68a1f67d61bb1daadc218a9ba | [
"BSD-3-Clause"
] | null | null | null | from os.path import join, dirname
from setuptools import setup, find_packages
base = dirname(__file__)
README = join(base, 'README.rst')
def lines(filename):
with open(filename) as lines:
return [line.rstrip() for line in lines]
setup(
name='sparrow',
version='1.0.1-SNAPSHOT',
author='Jasper Op de Coul (Infrae)',
author_email='[email protected]',
description="Sparrow, Common RDF/SPARQL Database API",
long_description=open(README).read() + open('HISTORY.txt').read(),
classifiers=["Development Status :: 4 - Beta",
"Programming Language :: Python",
"License :: OSI Approved :: BSD License",
"Topic :: Software Development :: Libraries :: Python Modules",
"Environment :: Web Environment"],
keywords='python RDF SPARQL',
packages=find_packages(where='src', exclude=['*tests*']),
package_dir={'': 'src'},
include_package_data=True,
zip_safe=False,
license='BSD',
# entry_points={
# 'console_scripts': [
# 'start_sesame_server = sparrow.sesame_backend:start_server',
# 'configure_sesame = sparrow.sesame_backend:configure_server',
# 'start_allegro_server = sparrow.allegro_backend:start_server'
# ]
# },
install_requires=lines(join(base, 'requirements.txt')),
)
| 33.268293 | 80 | 0.63563 |
79475a10dbfb5990bddd0339795091d759fcd705 | 9,851 | py | Python | tests/unit/core/components/test_module_path.py | avosper-intellaegis/runway | 757d4e7db269ec16479b044ac82a69f25fa2a450 | [
"Apache-2.0"
] | 134 | 2018-02-26T21:35:23.000Z | 2022-03-03T00:30:27.000Z | tests/unit/core/components/test_module_path.py | asksmruti/runway | 8aca76df9372e3d13eb35e12f81758f618e89e74 | [
"Apache-2.0"
] | 937 | 2018-03-08T22:04:35.000Z | 2022-03-30T12:21:47.000Z | tests/unit/core/components/test_module_path.py | asksmruti/runway | 8aca76df9372e3d13eb35e12f81758f618e89e74 | [
"Apache-2.0"
] | 70 | 2018-02-26T23:48:11.000Z | 2022-03-02T18:44:30.000Z | """Test runway.core.components._module_path."""
# pylint: disable=no-self-use
# pyright: basic
from __future__ import annotations
from copy import deepcopy
from pathlib import Path
from typing import TYPE_CHECKING, Dict, List, Optional, Union
import pytest
from mock import MagicMock
from typing_extensions import TypedDict
from runway.config.components.runway import RunwayModuleDefinition
from runway.config.models.runway import RunwayModuleDefinitionModel
from runway.constants import DEFAULT_CACHE_DIR
from runway.core.components._module_path import ModulePath
if TYPE_CHECKING:
from pytest_mock import MockerFixture
from runway.core.components import DeployEnvironment
MODULE = "runway.core.components._module_path"
TypeDefTestDefinitionExpected = TypedDict(
"TypeDefTestDefinitionExpected",
arguments=Dict[str, str],
location=str,
source=str,
uri=str,
)
TypeDefTestDefinition = TypedDict(
"TypeDefTestDefinition",
definition=Optional[Union[Path, str]],
expected=TypeDefTestDefinitionExpected,
)
TESTS: List[TypeDefTestDefinition] = [
{
"definition": "git::git://github.com/onicagroup/foo/bar.git",
"expected": {
"location": "./",
"arguments": {},
"source": "git",
"uri": "git://github.com/onicagroup/foo/bar.git",
},
},
{
"definition": "git::git://github.com/onicagroup/foo/bar.git//foo/bar",
"expected": {
"location": "foo/bar",
"arguments": {},
"source": "git",
"uri": "git://github.com/onicagroup/foo/bar.git",
},
},
{
"definition": "git::git://github.com/onicagroup/foo/bar.git?branch=foo",
"expected": {
"location": "./",
"arguments": {"branch": "foo"},
"source": "git",
"uri": "git://github.com/onicagroup/foo/bar.git",
},
},
{
"definition": "git::git://github.com/onicagroup/foo/bar.git?branch=foo&bar=baz",
"expected": {
"location": "./",
"arguments": {"branch": "foo", "bar": "baz"},
"source": "git",
"uri": "git://github.com/onicagroup/foo/bar.git",
},
},
{
"definition": "git::git://github.com/onicagroup/foo/bar.git//src/foo/bar?branch=foo",
"expected": {
"location": "src/foo/bar",
"arguments": {"branch": "foo"},
"source": "git",
"uri": "git://github.com/onicagroup/foo/bar.git",
},
},
{
"definition": "git::git://github.com/onicagroup/foo/bar.git//src/foo/bar?branch=foo&bar=baz", # noqa
"expected": {
"location": "src/foo/bar",
"arguments": {"branch": "foo", "bar": "baz"},
"source": "git",
"uri": "git://github.com/onicagroup/foo/bar.git",
},
},
{
"definition": Path.cwd(),
"expected": {"location": "./", "arguments": {}, "source": "local", "uri": ""},
},
{
"definition": None,
"expected": {"location": "./", "arguments": {}, "source": "local", "uri": ""},
},
{
"definition": "/Users/kyle/repos/runway/.demo",
"expected": {
"location": "/Users/kyle/repos/runway/.demo",
"arguments": {},
"source": "local",
"uri": "",
},
},
{
"definition": "local:://example/path",
"expected": {
"location": "example/path",
"arguments": {},
"source": "local",
"uri": "",
},
},
{
"definition": "/example/path",
"expected": {
"location": "/example/path",
"arguments": {},
"source": "local",
"uri": "",
},
},
{
"definition": "./example/path",
"expected": {
"location": "./example/path",
"arguments": {},
"source": "local",
"uri": "",
},
},
{
"definition": "//example/path",
"expected": {
"location": "//example/path",
"arguments": {},
"source": "local",
"uri": "",
},
},
{
"definition": "sampleapp.cfn",
"expected": {
"location": "sampleapp.cfn",
"arguments": {},
"source": "local",
"uri": "",
},
},
]
class TestModulePath:
"""Test runway.core.components._module_path.ModulePath."""
@pytest.mark.parametrize("test", deepcopy(TESTS))
def test_arguments(
self, deploy_environment: DeployEnvironment, test: TypeDefTestDefinition
) -> None:
"""Test arguments."""
assert (
ModulePath(
test["definition"], deploy_environment=deploy_environment
).arguments
== test["expected"]["arguments"]
)
@pytest.mark.parametrize("test", deepcopy(TESTS))
def test_location(
self, deploy_environment: DeployEnvironment, test: TypeDefTestDefinition
) -> None:
"""Test location."""
assert (
ModulePath(
test["definition"], deploy_environment=deploy_environment
).location
== test["expected"]["location"]
)
@pytest.mark.parametrize("test", deepcopy(TESTS))
def test_metadata(
self, deploy_environment: DeployEnvironment, test: TypeDefTestDefinition
) -> None:
"""Test metadata."""
assert ModulePath(
test["definition"], deploy_environment=deploy_environment
).metadata == {
"arguments": test["expected"]["arguments"],
"cache_dir": DEFAULT_CACHE_DIR,
"location": test["expected"]["location"],
"source": test["expected"]["source"],
"uri": test["expected"]["uri"],
}
def test_module_root_not_implimented(self) -> None:
"""Test module_root NotImplimentedError."""
with pytest.raises(NotImplementedError):
assert not ModulePath("invalid::something").module_root
@pytest.mark.parametrize("test", deepcopy(TESTS))
def test_module_root(
self,
deploy_environment: DeployEnvironment,
mocker: MockerFixture,
test: TypeDefTestDefinition,
) -> None:
"""Test module_root."""
mocker.patch.object(ModulePath, "REMOTE_SOURCE_HANDLERS", {"git": MagicMock()})
obj = ModulePath(test["definition"], deploy_environment=deploy_environment)
if isinstance(test["definition"], (type(None), Path)):
assert obj.module_root == test["definition"] or Path.cwd()
elif test["expected"]["source"] == "local":
assert (
obj.module_root
== deploy_environment.root_dir / test["expected"]["location"]
)
else:
assert (
obj.module_root
== ModulePath.REMOTE_SOURCE_HANDLERS[
obj.source
].return_value.fetch.return_value # type: ignore
)
ModulePath.REMOTE_SOURCE_HANDLERS[obj.source].assert_called_once_with( # type: ignore
**obj.metadata
)
ModulePath.REMOTE_SOURCE_HANDLERS[
obj.source
].return_value.fetch.assert_called_once_with() # type: ignore
@pytest.mark.parametrize("test", deepcopy(TESTS))
def test_source(
self, deploy_environment: DeployEnvironment, test: TypeDefTestDefinition
) -> None:
"""Test source."""
assert (
ModulePath(test["definition"], deploy_environment=deploy_environment).source
== test["expected"]["source"]
)
@pytest.mark.parametrize("test", deepcopy(TESTS))
def test_uri(
self, deploy_environment: DeployEnvironment, test: TypeDefTestDefinition
) -> None:
"""Test uri."""
assert (
ModulePath(test["definition"], deploy_environment=deploy_environment).uri
== test["expected"]["uri"]
)
def test_parse_obj_none(self, deploy_environment: DeployEnvironment) -> None:
"""Test parse_obj None."""
obj = ModulePath.parse_obj(None, deploy_environment=deploy_environment)
assert obj.definition == Path.cwd()
assert obj.env == deploy_environment
def test_parse_obj_path(
self, deploy_environment: DeployEnvironment, tmp_path: Path
) -> None:
"""Test parse_obj Path."""
obj = ModulePath.parse_obj(tmp_path, deploy_environment=deploy_environment)
assert obj.definition == tmp_path
assert obj.env == deploy_environment
def test_parse_obj_runway_config(
self, deploy_environment: DeployEnvironment, tmp_path: Path
) -> None:
"""Test parse_obj Runway config objects."""
model = RunwayModuleDefinitionModel(path=tmp_path)
obj0 = ModulePath.parse_obj(model, deploy_environment=deploy_environment)
assert obj0.definition == model.path
assert obj0.env == deploy_environment
module = RunwayModuleDefinition(model)
obj1 = ModulePath.parse_obj(module, deploy_environment=deploy_environment)
assert obj1.definition == model.path
assert obj1.env == deploy_environment
def test_parse_obj_str(self, deploy_environment: DeployEnvironment) -> None:
"""Test parse_obj str."""
obj = ModulePath.parse_obj("./test", deploy_environment=deploy_environment)
assert obj.definition == "./test"
assert obj.env == deploy_environment
def test_parse_obj_type_error(self) -> None:
"""Test parse_obj TypeError."""
with pytest.raises(TypeError):
assert not ModulePath.parse_obj({}) # type: ignore
| 33.39322 | 109 | 0.570602 |
79475a83f0e363c5f9f223278c5a3ebdf5f7fad6 | 42,322 | py | Python | src/black/linegen.py | henrikhorluck/black | 5379d4f3f460ec9b7063dd1cc10f437b0edf9ae3 | [
"MIT"
] | null | null | null | src/black/linegen.py | henrikhorluck/black | 5379d4f3f460ec9b7063dd1cc10f437b0edf9ae3 | [
"MIT"
] | null | null | null | src/black/linegen.py | henrikhorluck/black | 5379d4f3f460ec9b7063dd1cc10f437b0edf9ae3 | [
"MIT"
] | null | null | null | """
Generating lines of code.
"""
from functools import partial, wraps
import sys
from typing import Collection, Iterator, List, Optional, Set, Union
from black.nodes import WHITESPACE, RARROW, STATEMENT, STANDALONE_COMMENT
from black.nodes import ASSIGNMENTS, OPENING_BRACKETS, CLOSING_BRACKETS
from black.nodes import Visitor, syms, is_arith_like, ensure_visible
from black.nodes import is_docstring, is_empty_tuple, is_one_tuple, is_one_tuple_between
from black.nodes import is_name_token, is_lpar_token, is_rpar_token
from black.nodes import is_walrus_assignment, is_yield, is_vararg, is_multiline_string
from black.nodes import is_stub_suite, is_stub_body, is_atom_with_invisible_parens
from black.nodes import wrap_in_parentheses
from black.brackets import max_delimiter_priority_in_atom
from black.brackets import DOT_PRIORITY, COMMA_PRIORITY
from black.lines import Line, line_to_string, is_line_short_enough
from black.lines import can_omit_invisible_parens, can_be_split, append_leaves
from black.comments import generate_comments, list_comments, FMT_OFF
from black.numerics import normalize_numeric_literal
from black.strings import get_string_prefix, fix_docstring
from black.strings import normalize_string_prefix, normalize_string_quotes
from black.trans import Transformer, CannotTransform, StringMerger, StringSplitter
from black.trans import StringParenWrapper, StringParenStripper, hug_power_op
from black.mode import Mode, Feature, Preview
from blib2to3.pytree import Node, Leaf
from blib2to3.pgen2 import token
# types
LeafID = int
LN = Union[Leaf, Node]
class CannotSplit(CannotTransform):
"""A readable split that fits the allotted line length is impossible."""
# This isn't a dataclass because @dataclass + Generic breaks mypyc.
# See also https://github.com/mypyc/mypyc/issues/827.
class LineGenerator(Visitor[Line]):
"""Generates reformatted Line objects. Empty lines are not emitted.
Note: destroys the tree it's visiting by mutating prefixes of its leaves
in ways that will no longer stringify to valid Python code on the tree.
"""
def __init__(self, mode: Mode) -> None:
self.mode = mode
self.current_line: Line
self.__post_init__()
def line(self, indent: int = 0) -> Iterator[Line]:
"""Generate a line.
If the line is empty, only emit if it makes sense.
If the line is too long, split it first and then generate.
If any lines were generated, set up a new current_line.
"""
if not self.current_line:
self.current_line.depth += indent
return # Line is empty, don't emit. Creating a new one unnecessary.
complete_line = self.current_line
self.current_line = Line(mode=self.mode, depth=complete_line.depth + indent)
yield complete_line
def visit_default(self, node: LN) -> Iterator[Line]:
"""Default `visit_*()` implementation. Recurses to children of `node`."""
if isinstance(node, Leaf):
any_open_brackets = self.current_line.bracket_tracker.any_open_brackets()
for comment in generate_comments(node, preview=self.mode.preview):
if any_open_brackets:
# any comment within brackets is subject to splitting
self.current_line.append(comment)
elif comment.type == token.COMMENT:
# regular trailing comment
self.current_line.append(comment)
yield from self.line()
else:
# regular standalone comment
yield from self.line()
self.current_line.append(comment)
yield from self.line()
normalize_prefix(node, inside_brackets=any_open_brackets)
if self.mode.string_normalization and node.type == token.STRING:
node.value = normalize_string_prefix(node.value)
node.value = normalize_string_quotes(node.value)
if node.type == token.NUMBER:
normalize_numeric_literal(node)
if node.type not in WHITESPACE:
self.current_line.append(node)
yield from super().visit_default(node)
def visit_INDENT(self, node: Leaf) -> Iterator[Line]:
"""Increase indentation level, maybe yield a line."""
# In blib2to3 INDENT never holds comments.
yield from self.line(+1)
yield from self.visit_default(node)
def visit_DEDENT(self, node: Leaf) -> Iterator[Line]:
"""Decrease indentation level, maybe yield a line."""
# The current line might still wait for trailing comments. At DEDENT time
# there won't be any (they would be prefixes on the preceding NEWLINE).
# Emit the line then.
yield from self.line()
# While DEDENT has no value, its prefix may contain standalone comments
# that belong to the current indentation level. Get 'em.
yield from self.visit_default(node)
# Finally, emit the dedent.
yield from self.line(-1)
def visit_stmt(
self, node: Node, keywords: Set[str], parens: Set[str]
) -> Iterator[Line]:
"""Visit a statement.
This implementation is shared for `if`, `while`, `for`, `try`, `except`,
`def`, `with`, `class`, `assert`, and assignments.
The relevant Python language `keywords` for a given statement will be
NAME leaves within it. This methods puts those on a separate line.
`parens` holds a set of string leaf values immediately after which
invisible parens should be put.
"""
normalize_invisible_parens(node, parens_after=parens, preview=self.mode.preview)
for child in node.children:
if is_name_token(child) and child.value in keywords:
yield from self.line()
yield from self.visit(child)
def visit_match_case(self, node: Node) -> Iterator[Line]:
"""Visit either a match or case statement."""
normalize_invisible_parens(node, parens_after=set(), preview=self.mode.preview)
yield from self.line()
for child in node.children:
yield from self.visit(child)
def visit_suite(self, node: Node) -> Iterator[Line]:
"""Visit a suite."""
if self.mode.is_pyi and is_stub_suite(node):
yield from self.visit(node.children[2])
else:
yield from self.visit_default(node)
def visit_simple_stmt(self, node: Node) -> Iterator[Line]:
"""Visit a statement without nested statements."""
prev_type: Optional[int] = None
for child in node.children:
if (prev_type is None or prev_type == token.SEMI) and is_arith_like(child):
wrap_in_parentheses(node, child, visible=False)
prev_type = child.type
is_suite_like = node.parent and node.parent.type in STATEMENT
if is_suite_like:
if self.mode.is_pyi and is_stub_body(node):
yield from self.visit_default(node)
else:
yield from self.line(+1)
yield from self.visit_default(node)
yield from self.line(-1)
else:
if (
not self.mode.is_pyi
or not node.parent
or not is_stub_suite(node.parent)
):
yield from self.line()
yield from self.visit_default(node)
def visit_async_stmt(self, node: Node) -> Iterator[Line]:
"""Visit `async def`, `async for`, `async with`."""
yield from self.line()
children = iter(node.children)
for child in children:
yield from self.visit(child)
if child.type == token.ASYNC:
break
internal_stmt = next(children)
for child in internal_stmt.children:
yield from self.visit(child)
def visit_decorators(self, node: Node) -> Iterator[Line]:
"""Visit decorators."""
for child in node.children:
yield from self.line()
yield from self.visit(child)
def visit_power(self, node: Node) -> Iterator[Line]:
for idx, leaf in enumerate(node.children[:-1]):
next_leaf = node.children[idx + 1]
if not isinstance(leaf, Leaf):
continue
value = leaf.value.lower()
if (
leaf.type == token.NUMBER
and next_leaf.type == syms.trailer
# Ensure that we are in an attribute trailer
and next_leaf.children[0].type == token.DOT
# It shouldn't wrap hexadecimal, binary and octal literals
and not value.startswith(("0x", "0b", "0o"))
# It shouldn't wrap complex literals
and "j" not in value
):
wrap_in_parentheses(node, leaf)
yield from self.visit_default(node)
def visit_SEMI(self, leaf: Leaf) -> Iterator[Line]:
"""Remove a semicolon and put the other statement on a separate line."""
yield from self.line()
def visit_ENDMARKER(self, leaf: Leaf) -> Iterator[Line]:
"""End of file. Process outstanding comments and end with a newline."""
yield from self.visit_default(leaf)
yield from self.line()
def visit_STANDALONE_COMMENT(self, leaf: Leaf) -> Iterator[Line]:
if not self.current_line.bracket_tracker.any_open_brackets():
yield from self.line()
yield from self.visit_default(leaf)
def visit_factor(self, node: Node) -> Iterator[Line]:
"""Force parentheses between a unary op and a binary power:
-2 ** 8 -> -(2 ** 8)
"""
_operator, operand = node.children
if (
operand.type == syms.power
and len(operand.children) == 3
and operand.children[1].type == token.DOUBLESTAR
):
lpar = Leaf(token.LPAR, "(")
rpar = Leaf(token.RPAR, ")")
index = operand.remove() or 0
node.insert_child(index, Node(syms.atom, [lpar, operand, rpar]))
yield from self.visit_default(node)
def visit_STRING(self, leaf: Leaf) -> Iterator[Line]:
if is_docstring(leaf) and "\\\n" not in leaf.value:
# We're ignoring docstrings with backslash newline escapes because changing
# indentation of those changes the AST representation of the code.
docstring = normalize_string_prefix(leaf.value)
prefix = get_string_prefix(docstring)
docstring = docstring[len(prefix) :] # Remove the prefix
quote_char = docstring[0]
# A natural way to remove the outer quotes is to do:
# docstring = docstring.strip(quote_char)
# but that breaks on """""x""" (which is '""x').
# So we actually need to remove the first character and the next two
# characters but only if they are the same as the first.
quote_len = 1 if docstring[1] != quote_char else 3
docstring = docstring[quote_len:-quote_len]
docstring_started_empty = not docstring
if is_multiline_string(leaf):
indent = " " * 4 * self.current_line.depth
docstring = fix_docstring(docstring, indent)
else:
docstring = docstring.strip()
if docstring:
# Add some padding if the docstring starts / ends with a quote mark.
if docstring[0] == quote_char:
docstring = " " + docstring
if docstring[-1] == quote_char:
docstring += " "
if docstring[-1] == "\\":
backslash_count = len(docstring) - len(docstring.rstrip("\\"))
if backslash_count % 2:
# Odd number of tailing backslashes, add some padding to
# avoid escaping the closing string quote.
docstring += " "
elif not docstring_started_empty:
docstring = " "
# We could enforce triple quotes at this point.
quote = quote_char * quote_len
leaf.value = prefix + quote + docstring + quote
yield from self.visit_default(leaf)
def __post_init__(self) -> None:
"""You are in a twisty little maze of passages."""
self.current_line = Line(mode=self.mode)
v = self.visit_stmt
Ø: Set[str] = set()
self.visit_assert_stmt = partial(v, keywords={"assert"}, parens={"assert", ","})
self.visit_if_stmt = partial(
v, keywords={"if", "else", "elif"}, parens={"if", "elif"}
)
self.visit_while_stmt = partial(v, keywords={"while", "else"}, parens={"while"})
self.visit_for_stmt = partial(v, keywords={"for", "else"}, parens={"for", "in"})
self.visit_try_stmt = partial(
v, keywords={"try", "except", "else", "finally"}, parens=Ø
)
self.visit_except_clause = partial(v, keywords={"except"}, parens=Ø)
self.visit_with_stmt = partial(v, keywords={"with"}, parens=Ø)
self.visit_funcdef = partial(v, keywords={"def"}, parens=Ø)
self.visit_classdef = partial(v, keywords={"class"}, parens=Ø)
self.visit_expr_stmt = partial(v, keywords=Ø, parens=ASSIGNMENTS)
self.visit_return_stmt = partial(v, keywords={"return"}, parens={"return"})
self.visit_import_from = partial(v, keywords=Ø, parens={"import"})
self.visit_del_stmt = partial(v, keywords=Ø, parens={"del"})
self.visit_async_funcdef = self.visit_async_stmt
self.visit_decorated = self.visit_decorators
# PEP 634
self.visit_match_stmt = self.visit_match_case
self.visit_case_block = self.visit_match_case
def transform_line(
line: Line, mode: Mode, features: Collection[Feature] = ()
) -> Iterator[Line]:
"""Transform a `line`, potentially splitting it into many lines.
They should fit in the allotted `line_length` but might not be able to.
`features` are syntactical features that may be used in the output.
"""
if line.is_comment:
yield line
return
line_str = line_to_string(line)
ll = mode.line_length
sn = mode.string_normalization
string_merge = StringMerger(ll, sn)
string_paren_strip = StringParenStripper(ll, sn)
string_split = StringSplitter(ll, sn)
string_paren_wrap = StringParenWrapper(ll, sn)
transformers: List[Transformer]
if (
not line.contains_uncollapsable_type_comments()
and not line.should_split_rhs
and not line.magic_trailing_comma
and (
is_line_short_enough(line, line_length=mode.line_length, line_str=line_str)
or line.contains_unsplittable_type_ignore()
)
and not (line.inside_brackets and line.contains_standalone_comments())
):
# Only apply basic string preprocessing, since lines shouldn't be split here.
if Preview.string_processing in mode:
transformers = [string_merge, string_paren_strip]
else:
transformers = []
elif line.is_def:
transformers = [left_hand_split]
else:
def _rhs(
self: object, line: Line, features: Collection[Feature]
) -> Iterator[Line]:
"""Wraps calls to `right_hand_split`.
The calls increasingly `omit` right-hand trailers (bracket pairs with
content), meaning the trailers get glued together to split on another
bracket pair instead.
"""
for omit in generate_trailers_to_omit(line, mode.line_length):
lines = list(
right_hand_split(line, mode.line_length, features, omit=omit)
)
# Note: this check is only able to figure out if the first line of the
# *current* transformation fits in the line length. This is true only
# for simple cases. All others require running more transforms via
# `transform_line()`. This check doesn't know if those would succeed.
if is_line_short_enough(lines[0], line_length=mode.line_length):
yield from lines
return
# All splits failed, best effort split with no omits.
# This mostly happens to multiline strings that are by definition
# reported as not fitting a single line, as well as lines that contain
# trailing commas (those have to be exploded).
yield from right_hand_split(
line, line_length=mode.line_length, features=features
)
# HACK: nested functions (like _rhs) compiled by mypyc don't retain their
# __name__ attribute which is needed in `run_transformer` further down.
# Unfortunately a nested class breaks mypyc too. So a class must be created
# via type ... https://github.com/mypyc/mypyc/issues/884
rhs = type("rhs", (), {"__call__": _rhs})()
if Preview.string_processing in mode:
if line.inside_brackets:
transformers = [
string_merge,
string_paren_strip,
string_split,
delimiter_split,
standalone_comment_split,
string_paren_wrap,
rhs,
]
else:
transformers = [
string_merge,
string_paren_strip,
string_split,
string_paren_wrap,
rhs,
]
else:
if line.inside_brackets:
transformers = [delimiter_split, standalone_comment_split, rhs]
else:
transformers = [rhs]
# It's always safe to attempt hugging of power operations and pretty much every line
# could match.
transformers.append(hug_power_op)
for transform in transformers:
# We are accumulating lines in `result` because we might want to abort
# mission and return the original line in the end, or attempt a different
# split altogether.
try:
result = run_transformer(line, transform, mode, features, line_str=line_str)
except CannotTransform:
continue
else:
yield from result
break
else:
yield line
def left_hand_split(line: Line, _features: Collection[Feature] = ()) -> Iterator[Line]:
"""Split line into many lines, starting with the first matching bracket pair.
Note: this usually looks weird, only use this for function definitions.
Prefer RHS otherwise. This is why this function is not symmetrical with
:func:`right_hand_split` which also handles optional parentheses.
"""
tail_leaves: List[Leaf] = []
body_leaves: List[Leaf] = []
head_leaves: List[Leaf] = []
current_leaves = head_leaves
matching_bracket: Optional[Leaf] = None
for leaf in line.leaves:
if (
current_leaves is body_leaves
and leaf.type in CLOSING_BRACKETS
and leaf.opening_bracket is matching_bracket
):
current_leaves = tail_leaves if body_leaves else head_leaves
current_leaves.append(leaf)
if current_leaves is head_leaves:
if leaf.type in OPENING_BRACKETS:
matching_bracket = leaf
current_leaves = body_leaves
if not matching_bracket:
raise CannotSplit("No brackets found")
head = bracket_split_build_line(head_leaves, line, matching_bracket)
body = bracket_split_build_line(body_leaves, line, matching_bracket, is_body=True)
tail = bracket_split_build_line(tail_leaves, line, matching_bracket)
bracket_split_succeeded_or_raise(head, body, tail)
for result in (head, body, tail):
if result:
yield result
def right_hand_split(
line: Line,
line_length: int,
features: Collection[Feature] = (),
omit: Collection[LeafID] = (),
) -> Iterator[Line]:
"""Split line into many lines, starting with the last matching bracket pair.
If the split was by optional parentheses, attempt splitting without them, too.
`omit` is a collection of closing bracket IDs that shouldn't be considered for
this split.
Note: running this function modifies `bracket_depth` on the leaves of `line`.
"""
tail_leaves: List[Leaf] = []
body_leaves: List[Leaf] = []
head_leaves: List[Leaf] = []
current_leaves = tail_leaves
opening_bracket: Optional[Leaf] = None
closing_bracket: Optional[Leaf] = None
for leaf in reversed(line.leaves):
if current_leaves is body_leaves:
if leaf is opening_bracket:
current_leaves = head_leaves if body_leaves else tail_leaves
current_leaves.append(leaf)
if current_leaves is tail_leaves:
if leaf.type in CLOSING_BRACKETS and id(leaf) not in omit:
opening_bracket = leaf.opening_bracket
closing_bracket = leaf
current_leaves = body_leaves
if not (opening_bracket and closing_bracket and head_leaves):
# If there is no opening or closing_bracket that means the split failed and
# all content is in the tail. Otherwise, if `head_leaves` are empty, it means
# the matching `opening_bracket` wasn't available on `line` anymore.
raise CannotSplit("No brackets found")
tail_leaves.reverse()
body_leaves.reverse()
head_leaves.reverse()
head = bracket_split_build_line(head_leaves, line, opening_bracket)
body = bracket_split_build_line(body_leaves, line, opening_bracket, is_body=True)
tail = bracket_split_build_line(tail_leaves, line, opening_bracket)
bracket_split_succeeded_or_raise(head, body, tail)
if (
Feature.FORCE_OPTIONAL_PARENTHESES not in features
# the opening bracket is an optional paren
and opening_bracket.type == token.LPAR
and not opening_bracket.value
# the closing bracket is an optional paren
and closing_bracket.type == token.RPAR
and not closing_bracket.value
# it's not an import (optional parens are the only thing we can split on
# in this case; attempting a split without them is a waste of time)
and not line.is_import
# there are no standalone comments in the body
and not body.contains_standalone_comments(0)
# and we can actually remove the parens
and can_omit_invisible_parens(body, line_length)
):
omit = {id(closing_bracket), *omit}
try:
yield from right_hand_split(line, line_length, features=features, omit=omit)
return
except CannotSplit as e:
if not (
can_be_split(body)
or is_line_short_enough(body, line_length=line_length)
):
raise CannotSplit(
"Splitting failed, body is still too long and can't be split."
) from e
elif head.contains_multiline_strings() or tail.contains_multiline_strings():
raise CannotSplit(
"The current optional pair of parentheses is bound to fail to"
" satisfy the splitting algorithm because the head or the tail"
" contains multiline strings which by definition never fit one"
" line."
) from e
ensure_visible(opening_bracket)
ensure_visible(closing_bracket)
for result in (head, body, tail):
if result:
yield result
def bracket_split_succeeded_or_raise(head: Line, body: Line, tail: Line) -> None:
"""Raise :exc:`CannotSplit` if the last left- or right-hand split failed.
Do nothing otherwise.
A left- or right-hand split is based on a pair of brackets. Content before
(and including) the opening bracket is left on one line, content inside the
brackets is put on a separate line, and finally content starting with and
following the closing bracket is put on a separate line.
Those are called `head`, `body`, and `tail`, respectively. If the split
produced the same line (all content in `head`) or ended up with an empty `body`
and the `tail` is just the closing bracket, then it's considered failed.
"""
tail_len = len(str(tail).strip())
if not body:
if tail_len == 0:
raise CannotSplit("Splitting brackets produced the same line")
elif tail_len < 3:
raise CannotSplit(
f"Splitting brackets on an empty body to save {tail_len} characters is"
" not worth it"
)
def bracket_split_build_line(
leaves: List[Leaf], original: Line, opening_bracket: Leaf, *, is_body: bool = False
) -> Line:
"""Return a new line with given `leaves` and respective comments from `original`.
If `is_body` is True, the result line is one-indented inside brackets and as such
has its first leaf's prefix normalized and a trailing comma added when expected.
"""
result = Line(mode=original.mode, depth=original.depth)
if is_body:
result.inside_brackets = True
result.depth += 1
if leaves:
# Since body is a new indent level, remove spurious leading whitespace.
normalize_prefix(leaves[0], inside_brackets=True)
# Ensure a trailing comma for imports and standalone function arguments, but
# be careful not to add one after any comments or within type annotations.
no_commas = (
original.is_def
and opening_bracket.value == "("
and not any(leaf.type == token.COMMA for leaf in leaves)
# In particular, don't add one within a parenthesized return annotation.
# Unfortunately the indicator we're in a return annotation (RARROW) may
# be defined directly in the parent node, the parent of the parent ...
# and so on depending on how complex the return annotation is.
# This isn't perfect and there's some false negatives but they are in
# contexts were a comma is actually fine.
and not any(
node.prev_sibling.type == RARROW
for node in (
leaves[0].parent,
getattr(leaves[0].parent, "parent", None),
)
if isinstance(node, Node) and isinstance(node.prev_sibling, Leaf)
)
)
if original.is_import or no_commas:
for i in range(len(leaves) - 1, -1, -1):
if leaves[i].type == STANDALONE_COMMENT:
continue
if leaves[i].type != token.COMMA:
new_comma = Leaf(token.COMMA, ",")
leaves.insert(i + 1, new_comma)
break
# Populate the line
for leaf in leaves:
result.append(leaf, preformatted=True)
for comment_after in original.comments_after(leaf):
result.append(comment_after, preformatted=True)
if is_body and should_split_line(result, opening_bracket):
result.should_split_rhs = True
return result
def dont_increase_indentation(split_func: Transformer) -> Transformer:
"""Normalize prefix of the first leaf in every line returned by `split_func`.
This is a decorator over relevant split functions.
"""
@wraps(split_func)
def split_wrapper(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]:
for line in split_func(line, features):
normalize_prefix(line.leaves[0], inside_brackets=True)
yield line
return split_wrapper
@dont_increase_indentation
def delimiter_split(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]:
"""Split according to delimiters of the highest priority.
If the appropriate Features are given, the split will add trailing commas
also in function signatures and calls that contain `*` and `**`.
"""
try:
last_leaf = line.leaves[-1]
except IndexError:
raise CannotSplit("Line empty") from None
bt = line.bracket_tracker
try:
delimiter_priority = bt.max_delimiter_priority(exclude={id(last_leaf)})
except ValueError:
raise CannotSplit("No delimiters found") from None
if delimiter_priority == DOT_PRIORITY:
if bt.delimiter_count_with_priority(delimiter_priority) == 1:
raise CannotSplit("Splitting a single attribute from its owner looks wrong")
current_line = Line(
mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets
)
lowest_depth = sys.maxsize
trailing_comma_safe = True
def append_to_line(leaf: Leaf) -> Iterator[Line]:
"""Append `leaf` to current line or to new line if appending impossible."""
nonlocal current_line
try:
current_line.append_safe(leaf, preformatted=True)
except ValueError:
yield current_line
current_line = Line(
mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets
)
current_line.append(leaf)
for leaf in line.leaves:
yield from append_to_line(leaf)
for comment_after in line.comments_after(leaf):
yield from append_to_line(comment_after)
lowest_depth = min(lowest_depth, leaf.bracket_depth)
if leaf.bracket_depth == lowest_depth:
if is_vararg(leaf, within={syms.typedargslist}):
trailing_comma_safe = (
trailing_comma_safe and Feature.TRAILING_COMMA_IN_DEF in features
)
elif is_vararg(leaf, within={syms.arglist, syms.argument}):
trailing_comma_safe = (
trailing_comma_safe and Feature.TRAILING_COMMA_IN_CALL in features
)
leaf_priority = bt.delimiters.get(id(leaf))
if leaf_priority == delimiter_priority:
yield current_line
current_line = Line(
mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets
)
if current_line:
if (
trailing_comma_safe
and delimiter_priority == COMMA_PRIORITY
and current_line.leaves[-1].type != token.COMMA
and current_line.leaves[-1].type != STANDALONE_COMMENT
):
new_comma = Leaf(token.COMMA, ",")
current_line.append(new_comma)
yield current_line
@dont_increase_indentation
def standalone_comment_split(
line: Line, features: Collection[Feature] = ()
) -> Iterator[Line]:
"""Split standalone comments from the rest of the line."""
if not line.contains_standalone_comments(0):
raise CannotSplit("Line does not have any standalone comments")
current_line = Line(
mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets
)
def append_to_line(leaf: Leaf) -> Iterator[Line]:
"""Append `leaf` to current line or to new line if appending impossible."""
nonlocal current_line
try:
current_line.append_safe(leaf, preformatted=True)
except ValueError:
yield current_line
current_line = Line(
line.mode, depth=line.depth, inside_brackets=line.inside_brackets
)
current_line.append(leaf)
for leaf in line.leaves:
yield from append_to_line(leaf)
for comment_after in line.comments_after(leaf):
yield from append_to_line(comment_after)
if current_line:
yield current_line
def normalize_prefix(leaf: Leaf, *, inside_brackets: bool) -> None:
"""Leave existing extra newlines if not `inside_brackets`. Remove everything
else.
Note: don't use backslashes for formatting or you'll lose your voting rights.
"""
if not inside_brackets:
spl = leaf.prefix.split("#")
if "\\" not in spl[0]:
nl_count = spl[-1].count("\n")
if len(spl) > 1:
nl_count -= 1
leaf.prefix = "\n" * nl_count
return
leaf.prefix = ""
def normalize_invisible_parens(
node: Node, parens_after: Set[str], *, preview: bool
) -> None:
"""Make existing optional parentheses invisible or create new ones.
`parens_after` is a set of string leaf values immediately after which parens
should be put.
Standardizes on visible parentheses for single-element tuples, and keeps
existing visible parentheses for other tuples and generator expressions.
"""
for pc in list_comments(node.prefix, is_endmarker=False, preview=preview):
if pc.value in FMT_OFF:
# This `node` has a prefix with `# fmt: off`, don't mess with parens.
return
check_lpar = False
for index, child in enumerate(list(node.children)):
# Fixes a bug where invisible parens are not properly stripped from
# assignment statements that contain type annotations.
if isinstance(child, Node) and child.type == syms.annassign:
normalize_invisible_parens(
child, parens_after=parens_after, preview=preview
)
# Add parentheses around long tuple unpacking in assignments.
if (
index == 0
and isinstance(child, Node)
and child.type == syms.testlist_star_expr
):
check_lpar = True
if check_lpar:
if child.type == syms.atom:
if maybe_make_parens_invisible_in_atom(child, parent=node):
wrap_in_parentheses(node, child, visible=False)
elif is_one_tuple(child):
wrap_in_parentheses(node, child, visible=True)
elif node.type == syms.import_from:
# "import from" nodes store parentheses directly as part of
# the statement
if is_lpar_token(child):
assert is_rpar_token(node.children[-1])
# make parentheses invisible
child.value = ""
node.children[-1].value = ""
elif child.type != token.STAR:
# insert invisible parentheses
node.insert_child(index, Leaf(token.LPAR, ""))
node.append_child(Leaf(token.RPAR, ""))
break
elif not (isinstance(child, Leaf) and is_multiline_string(child)):
wrap_in_parentheses(node, child, visible=False)
check_lpar = isinstance(child, Leaf) and child.value in parens_after
def maybe_make_parens_invisible_in_atom(node: LN, parent: LN) -> bool:
"""If it's safe, make the parens in the atom `node` invisible, recursively.
Additionally, remove repeated, adjacent invisible parens from the atom `node`
as they are redundant.
Returns whether the node should itself be wrapped in invisible parentheses.
"""
if (
node.type != syms.atom
or is_empty_tuple(node)
or is_one_tuple(node)
or (is_yield(node) and parent.type != syms.expr_stmt)
or max_delimiter_priority_in_atom(node) >= COMMA_PRIORITY
):
return False
if is_walrus_assignment(node):
if parent.type in [
syms.annassign,
syms.expr_stmt,
syms.assert_stmt,
syms.return_stmt,
# these ones aren't useful to end users, but they do please fuzzers
syms.for_stmt,
syms.del_stmt,
]:
return False
first = node.children[0]
last = node.children[-1]
if is_lpar_token(first) and is_rpar_token(last):
middle = node.children[1]
# make parentheses invisible
first.value = ""
last.value = ""
maybe_make_parens_invisible_in_atom(middle, parent=parent)
if is_atom_with_invisible_parens(middle):
# Strip the invisible parens from `middle` by replacing
# it with the child in-between the invisible parens
middle.replace(middle.children[1])
return False
return True
def should_split_line(line: Line, opening_bracket: Leaf) -> bool:
"""Should `line` be immediately split with `delimiter_split()` after RHS?"""
if not (opening_bracket.parent and opening_bracket.value in "[{("):
return False
# We're essentially checking if the body is delimited by commas and there's more
# than one of them (we're excluding the trailing comma and if the delimiter priority
# is still commas, that means there's more).
exclude = set()
trailing_comma = False
try:
last_leaf = line.leaves[-1]
if last_leaf.type == token.COMMA:
trailing_comma = True
exclude.add(id(last_leaf))
max_priority = line.bracket_tracker.max_delimiter_priority(exclude=exclude)
except (IndexError, ValueError):
return False
return max_priority == COMMA_PRIORITY and (
(line.mode.magic_trailing_comma and trailing_comma)
# always explode imports
or opening_bracket.parent.type in {syms.atom, syms.import_from}
)
def generate_trailers_to_omit(line: Line, line_length: int) -> Iterator[Set[LeafID]]:
"""Generate sets of closing bracket IDs that should be omitted in a RHS.
Brackets can be omitted if the entire trailer up to and including
a preceding closing bracket fits in one line.
Yielded sets are cumulative (contain results of previous yields, too). First
set is empty, unless the line should explode, in which case bracket pairs until
the one that needs to explode are omitted.
"""
omit: Set[LeafID] = set()
if not line.magic_trailing_comma:
yield omit
length = 4 * line.depth
opening_bracket: Optional[Leaf] = None
closing_bracket: Optional[Leaf] = None
inner_brackets: Set[LeafID] = set()
for index, leaf, leaf_length in line.enumerate_with_length(reversed=True):
length += leaf_length
if length > line_length:
break
has_inline_comment = leaf_length > len(leaf.value) + len(leaf.prefix)
if leaf.type == STANDALONE_COMMENT or has_inline_comment:
break
if opening_bracket:
if leaf is opening_bracket:
opening_bracket = None
elif leaf.type in CLOSING_BRACKETS:
prev = line.leaves[index - 1] if index > 0 else None
if (
prev
and prev.type == token.COMMA
and leaf.opening_bracket is not None
and not is_one_tuple_between(
leaf.opening_bracket, leaf, line.leaves
)
):
# Never omit bracket pairs with trailing commas.
# We need to explode on those.
break
inner_brackets.add(id(leaf))
elif leaf.type in CLOSING_BRACKETS:
prev = line.leaves[index - 1] if index > 0 else None
if prev and prev.type in OPENING_BRACKETS:
# Empty brackets would fail a split so treat them as "inner"
# brackets (e.g. only add them to the `omit` set if another
# pair of brackets was good enough.
inner_brackets.add(id(leaf))
continue
if closing_bracket:
omit.add(id(closing_bracket))
omit.update(inner_brackets)
inner_brackets.clear()
yield omit
if (
prev
and prev.type == token.COMMA
and leaf.opening_bracket is not None
and not is_one_tuple_between(leaf.opening_bracket, leaf, line.leaves)
):
# Never omit bracket pairs with trailing commas.
# We need to explode on those.
break
if leaf.value:
opening_bracket = leaf.opening_bracket
closing_bracket = leaf
def run_transformer(
line: Line,
transform: Transformer,
mode: Mode,
features: Collection[Feature],
*,
line_str: str = "",
) -> List[Line]:
if not line_str:
line_str = line_to_string(line)
result: List[Line] = []
for transformed_line in transform(line, features):
if str(transformed_line).strip("\n") == line_str:
raise CannotTransform("Line transformer returned an unchanged result")
result.extend(transform_line(transformed_line, mode=mode, features=features))
if (
transform.__class__.__name__ != "rhs"
or not line.bracket_tracker.invisible
or any(bracket.value for bracket in line.bracket_tracker.invisible)
or line.contains_multiline_strings()
or result[0].contains_uncollapsable_type_comments()
or result[0].contains_unsplittable_type_ignore()
or is_line_short_enough(result[0], line_length=mode.line_length)
# If any leaves have no parents (which _can_ occur since
# `transform(line)` potentially destroys the line's underlying node
# structure), then we can't proceed. Doing so would cause the below
# call to `append_leaves()` to fail.
or any(leaf.parent is None for leaf in line.leaves)
):
return result
line_copy = line.clone()
append_leaves(line_copy, line, line.leaves)
features_fop = set(features) | {Feature.FORCE_OPTIONAL_PARENTHESES}
second_opinion = run_transformer(
line_copy, transform, mode, features_fop, line_str=line_str
)
if all(
is_line_short_enough(ln, line_length=mode.line_length) for ln in second_opinion
):
result = second_opinion
return result
| 39.964117 | 88 | 0.624829 |
79475aec84918fd23d243efb7bb93b2b278317c4 | 8,056 | py | Python | host-software/keyplus/cdata_types.py | dpejcha/keyplus | 8d0d4b59e9175295fd5edf0d4fecfced8053460f | [
"MIT"
] | 226 | 2017-08-14T16:11:36.000Z | 2022-03-13T00:58:13.000Z | host-software/keyplus/cdata_types.py | dpejcha/keyplus | 8d0d4b59e9175295fd5edf0d4fecfced8053460f | [
"MIT"
] | 90 | 2017-09-12T02:07:39.000Z | 2022-01-27T20:58:19.000Z | host-software/keyplus/cdata_types.py | dpejcha/keyplus | 8d0d4b59e9175295fd5edf0d4fecfced8053460f | [
"MIT"
] | 44 | 2017-09-17T17:31:25.000Z | 2022-02-27T08:19:46.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import cstruct
from cstruct import define, typedef, CStruct
from keyplus.constants import *
class CStructWithBytes(CStruct):
def to_bytes(self):
return bytearray(self.pack())
typedef('uint8', 'uint8_t')
typedef('uint16', 'uint16_t')
typedef('uint32', 'uint32_t')
typedef('int8', 'int8_t')
typedef('int16', 'int16_t')
typedef('int32', 'int32_t')
define('MAX_NUM_KEYBOARDS', 64)
define('MAX_NUM_DEVICES', 64)
define('AES_KEY_LEN', 16)
define('NRF_ADDR_LEN', 5)
def make_bit_field_variables(class_obj, field_list):
"""
This function uses the python built-in `property()` to create attributes
to access the bit fields of a function directly.
The field_list should contain a list of properties where each property is
defined as a tuple of the form:
(bit_mask_variable, bit_field_name, bit_field_mask)
After this function has been called, you should be able to get and set
the bit fields using the `bit_field_name`.
"""
for (bit_mask_variable, attribute_name, attribute_mask) in field_list:
# This function returns a getter function used to get a variable in
# a bitmask
def make_getter(bit_mask_variable, mask):
def getter(self):
return bool(getattr(self, bit_mask_variable) & mask)
return getter
# This function returns a setter function used to set a variable in
# a bitmask
def make_setter(bit_mask_variable, mask):
def setter(self, value):
# First find the new value for the bit mask variable
new_mask_value = getattr(self, bit_mask_variable)
if value:
new_mask_value |= mask
else:
new_mask_value &= ~mask
# then update the bitmask variable with this value
setattr(
self,
bit_mask_variable,
new_mask_value,
)
return setter
setattr(
class_obj ,
attribute_name,
property(
fget = make_getter(bit_mask_variable, attribute_mask),
fset = make_setter(bit_mask_variable, attribute_mask),
)
)
class scan_plan_t(CStructWithBytes):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
uint8_t mode;
uint8_t rows;
uint8_t cols;
uint8_t debounce_time_press;
uint8_t debounce_time_release;
uint8_t trigger_time_press;
uint8_t trigger_time_release;
uint8_t parasitic_discharge_delay_idle;
uint8_t parasitic_discharge_delay_debouncing;
uint8_t max_col_pin_num;
uint8_t max_key_num;
"""
class settings_header_t(CStructWithBytes):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
uint8_t device_id;
uint8_t _device_name[50];
uint8_t timestamp[8];
uint8_t default_report_mode;
struct scan_plan_t scan_plan;
uint8_t _reserved0[8];
uint8_t feature_ctrl;
uint8_t _reserved1[14];
uint16_t crc; /* total size == 96 */
"""
class feature_ctrl_t(CStructWithBytes):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
uint8_t feature_ctrl;
"""
for class_name in [settings_header_t, feature_ctrl_t]:
make_bit_field_variables(class_name, [
("feature_ctrl" , "usb_disabled" , FEATURE_CTRL_USB_DISABLE) ,
("feature_ctrl" , "i2c_disabled" , FEATURE_CTRL_WIRED_DISABLE) ,
("feature_ctrl" , "nrf24_disabled" , FEATURE_CTRL_RF_DISABLE) ,
("feature_ctrl" , "unifying_disabled" , FEATURE_CTRL_RF_MOUSE_DISABLE) ,
("feature_ctrl" , "bluetooth_disabled" , FEATURE_CTRL_BT_DISABLE) ,
])
class keyboard_info_t(CStructWithBytes):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
uint8_t matrix_size;
uint8_t layer_count;
"""
class device_info_t(CStructWithBytes):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
uint8_t layout_id;
uint8_t matrix_offset;
uint8_t matrix_size;
"""
class layout_settings_header_t(CStructWithBytes):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
uint8_t number_layouts;
uint8_t number_devices;
uint8_t default_layout_id;
uint8_t _reserved[29]; /* 32 */
"""
class layout_settings_t(CStructWithBytes):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
uint8_t number_layouts;
uint8_t number_devices;
uint8_t default_layout_id;
uint8_t _reserved[29]; /* 32 */
struct keyboard_info_t layouts[MAX_NUM_KEYBOARDS];
struct device_info_t devices[MAX_NUM_DEVICES]; /* 353 bytes */
"""
class firmware_info_t(CStructWithBytes):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
uint8_t version_major;
uint8_t version_minor;
uint8_t version_patch;
uint32_t layout_flash_size;
uint8_t timestamp[8];
uint8_t git_hash[8];
uint8_t connectivity;
uint8_t scan_support;
uint8_t keyboard_features;
uint8_t key_handlers;
uint8_t led_support;
uint16_t bootloader_vid;
uint16_t bootloader_pid;
uint32_t chip_id;
uint16_t board_id;
uint8_t internal_scan_method;
uint8_t page_size;
uint8_t max_rows;
uint8_t version_info;
uint8_t mouse_support;
uint8_t reserved[19];
"""
make_bit_field_variables(firmware_info_t, [
("scan_support" , "has_scanning" , SUPPORT_SCANNING_MASK) ,
("scan_support" , "has_scanning_col_row" , SUPPORT_SCANNING_COL_ROW_MASK) ,
("scan_support" , "has_scanning_row_col" , SUPPORT_SCANNING_ROW_COL_MASK) ,
("scan_support" , "has_scanning_pins" , SUPPORT_SCANNING_PINS_MASK) ,
("scan_support" , "has_scanning_arbitrary" , SUPPORT_SCANNING_ARBITRARY_MASK) ,
("scan_support" , "has_scanning_built_in" , SUPPORT_SCANNING_BUILT_IN_MASK) ,
("key_handlers" , "has_media_keys" , SUPPORT_KEY_MEDIA) ,
("key_handlers" , "has_mouse_keys" , SUPPORT_KEY_MOUSE) ,
("key_handlers" , "has_layer_keys" , SUPPORT_KEY_LAYERS) ,
("key_handlers" , "has_sticky_keys" , SUPPORT_KEY_STICKY) ,
("key_handlers" , "has_tap_keys" , SUPPORT_KEY_TAP) ,
("key_handlers" , "has_hold_keys" , SUPPORT_KEY_HOLD) ,
("keyboard_features" , "has_6kro" , SUPPORT_KRO_6) ,
("keyboard_features" , "has_nkro" , SUPPORT_KRO_N) ,
("led_support" , "has_led_indicators" , SUPPORT_LED_INDICATORS) ,
("led_support" , "has_led_backlighting" , SUPPORT_LED_BACKLIGHTING) ,
("led_support" , "has_led_ws2812" , SUPPORT_LED_WS2812) ,
("connectivity" , "has_nrf24" , SUPPORT_NRF24) ,
("connectivity" , "has_i2c" , SUPPORT_I2C) ,
("connectivity" , "has_unifying" , SUPPORT_UNIFYING) ,
("connectivity" , "has_usb" , SUPPORT_USB) ,
("connectivity" , "has_bluetooth" , SUPPORT_BT) ,
("version_info" , "is_stable_build" , VERSION_IS_STABLE) ,
("mouse_support" , "has_mouse " , SUPPORT_MOUSE) ,
("mouse_support" , "has_mouse_gesture" , SUPPORT_MOUSE_GESTURE) ,
])
class rf_settings_t(CStructWithBytes):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
uint8_t pipe_addr_0[NRF_ADDR_LEN];
uint8_t pipe_addr_1[NRF_ADDR_LEN];
uint8_t pipe_addr_2;
uint8_t pipe_addr_3;
uint8_t pipe_addr_4;
uint8_t pipe_addr_5;
uint8_t channel;
uint8_t arc;
uint8_t data_rate;
uint8_t power;
uint8_t _reserved[14];
uint8_t ekey[AES_KEY_LEN];
uint8_t dkey[AES_KEY_LEN];
"""
class settings_t(CStructWithBytes): #
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
struct settings_header_t header; /* size == 96 */
struct layout_settings_t layout; /* size == 352 */
struct rf_settings_t rf; /* size == 64 */
""" # size == 512 bytes
| 33.016393 | 83 | 0.659384 |
79475b38085f2476af2828a97d01f3ae26f34cf6 | 315 | py | Python | test.py | RapDoodle8080/mooli-milk-tea-management-system | 9b6e31664529ac70d180da2f219baf0eb902017b | [
"MIT"
] | 1 | 2021-05-29T09:40:42.000Z | 2021-05-29T09:40:42.000Z | test.py | RapDoodle/Mooli-Ordering-System | 9b6e31664529ac70d180da2f219baf0eb902017b | [
"MIT"
] | 1 | 2020-10-28T02:10:24.000Z | 2020-10-28T02:10:24.000Z | test.py | RapDoodle/Mooli-Ordering-System | 9b6e31664529ac70d180da2f219baf0eb902017b | [
"MIT"
] | 1 | 2021-05-29T09:40:41.000Z | 2021-05-29T09:40:41.000Z | from setup import init_test_db
init_test_db()
import tests.test_category
import tests.test_product
import tests.test_user
import tests.test_redeem_card
import tests.test_coupon
import tests.test_cart_item
import tests.test_order
import tests.test_archive
import tests.test_role_permission
import tests.test_staff
| 21 | 33 | 0.87619 |
79475bac62fe1d9dc3a8b9f07b1c041f8d3b051c | 886 | py | Python | isi_sdk_8_1_1/test/test_providers_ldap.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 24 | 2018-06-22T14:13:23.000Z | 2022-03-23T01:21:26.000Z | isi_sdk_8_1_1/test/test_providers_ldap.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 46 | 2018-04-30T13:28:22.000Z | 2022-03-21T21:11:07.000Z | isi_sdk_8_1_1/test/test_providers_ldap.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 29 | 2018-06-19T00:14:04.000Z | 2022-02-08T17:51:19.000Z | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 6
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_1_1
from isi_sdk_8_1_1.models.providers_ldap import ProvidersLdap # noqa: E501
from isi_sdk_8_1_1.rest import ApiException
class TestProvidersLdap(unittest.TestCase):
"""ProvidersLdap unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testProvidersLdap(self):
"""Test ProvidersLdap"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_1_1.models.providers_ldap.ProvidersLdap() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 21.609756 | 83 | 0.699774 |
79475bd21aea84e2ce8294c56d924614cb998bbe | 12,960 | py | Python | niftynet/contrib/csv_reader/applications_maybe/label_driven_registration.py | tdml13/NiftyNet | b35fa19ca307e81d229e2fe8269a417724833da2 | [
"Apache-2.0"
] | 1,403 | 2017-08-30T11:49:45.000Z | 2022-03-31T11:44:05.000Z | niftynet/contrib/csv_reader/applications_maybe/label_driven_registration.py | tdml13/NiftyNet | b35fa19ca307e81d229e2fe8269a417724833da2 | [
"Apache-2.0"
] | 360 | 2017-10-03T15:33:53.000Z | 2021-03-17T06:27:38.000Z | niftynet/contrib/csv_reader/applications_maybe/label_driven_registration.py | tdml13/NiftyNet | b35fa19ca307e81d229e2fe8269a417724833da2 | [
"Apache-2.0"
] | 464 | 2017-09-13T20:56:32.000Z | 2022-02-11T20:33:47.000Z | """
A preliminary re-implementation of:
Hu et al., Weakly-Supervised Convolutional Neural Networks for
Multimodal Image Registration, Medical Image Analysis (2018)
https://doi.org/10.1016/j.media.2018.07.002
The original implementation and tutorial is available at:
https://github.com/YipengHu/label-reg
"""
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from niftynet.application.base_application import BaseApplication
from niftynet.io.image_reader import ImageReader
from niftynet.contrib.sampler_pairwise.sampler_pairwise_uniform import \
PairwiseUniformSampler
from niftynet.contrib.sampler_pairwise.sampler_pairwise_resize import \
PairwiseResizeSampler
from niftynet.contrib.csv_reader.csv_reader import CSVReader
from niftynet.engine.application_factory import \
OptimiserFactory, ApplicationNetFactory
from niftynet.engine.application_variables import \
NETWORK_OUTPUT, CONSOLE, TF_SUMMARIES
from niftynet.engine.windows_aggregator_resize import ResizeSamplesAggregator
from niftynet.layer.resampler import ResamplerLayer
from niftynet.layer.pad import PadLayer
from niftynet.layer.loss_segmentation import LossFunction
SUPPORTED_INPUT = {'moving_image', 'moving_label',
'fixed_image', 'fixed_label'}
class RegApp(BaseApplication):
REQUIRED_CONFIG_SECTION = "REGISTRATION"
def __init__(self, net_param, action_param, action):
BaseApplication.__init__(self)
tf.logging.info('starting label-driven registration')
self.action = action
self.net_param = net_param
self.action_param = action_param
self.registration_param = None
self.data_param = None
def initialise_dataset_loader(
self, data_param=None, task_param=None, data_partitioner=None):
self.data_param = data_param
self.registration_param = task_param
if self.is_evaluation:
NotImplementedError('Evaluation is not yet '
'supported in this application.')
try:
reader_phase = self.action_param.dataset_to_infer
except AttributeError:
reader_phase = None
file_lists = data_partitioner.get_file_lists_by(
phase=reader_phase, action=self.action)
self.readers = []
for file_list in file_lists:
fixed_reader = ImageReader({'fixed_image', 'fixed_label'})
fixed_reader.initialise(data_param, task_param, file_list)
self.readers.append(fixed_reader)
moving_reader = ImageReader({'moving_image', 'moving_label'})
moving_reader.initialise(data_param, task_param, file_list)
self.readers.append(moving_reader)
# pad the fixed target only
# moving image will be resampled to match the targets
#volume_padding_layer = []
#if self.net_param.volume_padding_size:
# volume_padding_layer.append(PadLayer(
# image_name=('fixed_image', 'fixed_label'),
# border=self.net_param.volume_padding_size))
#for reader in self.readers:
# reader.add_preprocessing_layers(volume_padding_layer)
def initialise_sampler(self):
if self.is_training:
self.sampler = []
assert len(self.readers) >= 2, 'at least two readers are required'
training_sampler = PairwiseUniformSampler(
reader_0=self.readers[0],
reader_1=self.readers[1],
data_param=self.data_param,
batch_size=self.net_param.batch_size)
self.sampler.append(training_sampler)
# adding validation readers if possible
if len(self.readers) >= 4:
validation_sampler = PairwiseUniformSampler(
reader_0=self.readers[2],
reader_1=self.readers[3],
data_param=self.data_param,
batch_size=self.net_param.batch_size)
self.sampler.append(validation_sampler)
else:
self.sampler = PairwiseResizeSampler(
reader_0=self.readers[0],
reader_1=self.readers[1],
data_param=self.data_param,
batch_size=self.net_param.batch_size)
def initialise_network(self):
decay = self.net_param.decay
self.net = ApplicationNetFactory.create(self.net_param.name)(decay)
def connect_data_and_network(self,
outputs_collector=None,
gradients_collector=None):
def switch_samplers(for_training):
with tf.name_scope('train' if for_training else 'validation'):
sampler = self.get_sampler()[0 if for_training else -1]
return sampler() # returns image only
if self.is_training:
self.patience = self.action_param.patience
if self.action_param.validation_every_n > 0:
sampler_window = \
tf.cond(tf.logical_not(self.is_validation),
lambda: switch_samplers(True),
lambda: switch_samplers(False))
else:
sampler_window = switch_samplers(True)
image_windows, _ = sampler_window
# image_windows, locations = sampler_window
# decode channels for moving and fixed images
image_windows_list = [
tf.expand_dims(img, axis=-1)
for img in tf.unstack(image_windows, axis=-1)]
fixed_image, fixed_label, moving_image, moving_label = \
image_windows_list
# estimate ddf
dense_field = self.net(fixed_image, moving_image)
if isinstance(dense_field, tuple):
dense_field = dense_field[0]
# transform the moving labels
resampler = ResamplerLayer(
interpolation='linear', boundary='replicate')
resampled_moving_label = resampler(moving_label, dense_field)
# compute label loss (foreground only)
loss_func = LossFunction(
n_class=1,
loss_type=self.action_param.loss_type,
softmax=False)
label_loss = loss_func(prediction=resampled_moving_label,
ground_truth=fixed_label)
dice_fg = 1.0 - label_loss
# appending regularisation loss
total_loss = label_loss
reg_loss = tf.get_collection('bending_energy')
if reg_loss:
total_loss = total_loss + \
self.net_param.decay * tf.reduce_mean(reg_loss)
self.total_loss = total_loss
# compute training gradients
with tf.name_scope('Optimiser'):
optimiser_class = OptimiserFactory.create(
name=self.action_param.optimiser)
self.optimiser = optimiser_class.get_instance(
learning_rate=self.action_param.lr)
grads = self.optimiser.compute_gradients(
total_loss, colocate_gradients_with_ops=True)
gradients_collector.add_to_collection(grads)
metrics_dice = loss_func(
prediction=tf.to_float(resampled_moving_label >= 0.5),
ground_truth=tf.to_float(fixed_label >= 0.5))
metrics_dice = 1.0 - metrics_dice
# command line output
outputs_collector.add_to_collection(
var=dice_fg, name='one_minus_data_loss',
collection=CONSOLE)
outputs_collector.add_to_collection(
var=tf.reduce_mean(reg_loss), name='bending_energy',
collection=CONSOLE)
outputs_collector.add_to_collection(
var=total_loss, name='total_loss', collection=CONSOLE)
outputs_collector.add_to_collection(
var=metrics_dice, name='ave_fg_dice', collection=CONSOLE)
# for tensorboard
outputs_collector.add_to_collection(
var=dice_fg,
name='data_loss',
average_over_devices=True,
summary_type='scalar',
collection=TF_SUMMARIES)
outputs_collector.add_to_collection(
var=total_loss,
name='total_loss',
average_over_devices=True,
summary_type='scalar',
collection=TF_SUMMARIES)
outputs_collector.add_to_collection(
var=metrics_dice,
name='averaged_foreground_Dice',
average_over_devices=True,
summary_type='scalar',
collection=TF_SUMMARIES)
# for visualisation debugging
# resampled_moving_image = resampler(moving_image, dense_field)
# outputs_collector.add_to_collection(
# var=fixed_image, name='fixed_image',
# collection=NETWORK_OUTPUT)
# outputs_collector.add_to_collection(
# var=fixed_label, name='fixed_label',
# collection=NETWORK_OUTPUT)
# outputs_collector.add_to_collection(
# var=moving_image, name='moving_image',
# collection=NETWORK_OUTPUT)
# outputs_collector.add_to_collection(
# var=moving_label, name='moving_label',
# collection=NETWORK_OUTPUT)
# outputs_collector.add_to_collection(
# var=resampled_moving_image, name='resampled_image',
# collection=NETWORK_OUTPUT)
# outputs_collector.add_to_collection(
# var=resampled_moving_label, name='resampled_label',
# collection=NETWORK_OUTPUT)
# outputs_collector.add_to_collection(
# var=dense_field, name='ddf', collection=NETWORK_OUTPUT)
# outputs_collector.add_to_collection(
# var=locations, name='locations', collection=NETWORK_OUTPUT)
# outputs_collector.add_to_collection(
# var=shift[0], name='a', collection=CONSOLE)
# outputs_collector.add_to_collection(
# var=shift[1], name='b', collection=CONSOLE)
else:
image_windows, locations = self.sampler()
image_windows_list = [
tf.expand_dims(img, axis=-1)
for img in tf.unstack(image_windows, axis=-1)]
fixed_image, fixed_label, moving_image, moving_label = \
image_windows_list
dense_field = self.net(fixed_image, moving_image)
if isinstance(dense_field, tuple):
dense_field = dense_field[0]
# transform the moving labels
resampler = ResamplerLayer(
interpolation='linear', boundary='replicate')
resampled_moving_image = resampler(moving_image, dense_field)
resampled_moving_label = resampler(moving_label, dense_field)
outputs_collector.add_to_collection(
var=fixed_image, name='fixed_image',
collection=NETWORK_OUTPUT)
outputs_collector.add_to_collection(
var=moving_image, name='moving_image',
collection=NETWORK_OUTPUT)
outputs_collector.add_to_collection(
var=resampled_moving_image,
name='resampled_moving_image',
collection=NETWORK_OUTPUT)
outputs_collector.add_to_collection(
var=resampled_moving_label,
name='resampled_moving_label',
collection=NETWORK_OUTPUT)
outputs_collector.add_to_collection(
var=fixed_label, name='fixed_label',
collection=NETWORK_OUTPUT)
outputs_collector.add_to_collection(
var=moving_label, name='moving_label',
collection=NETWORK_OUTPUT)
#outputs_collector.add_to_collection(
# var=dense_field, name='field',
# collection=NETWORK_OUTPUT)
outputs_collector.add_to_collection(
var=locations, name='locations',
collection=NETWORK_OUTPUT)
self.output_decoder = ResizeSamplesAggregator(
image_reader=self.readers[0], # fixed image reader
name='fixed_image',
output_path=self.action_param.save_seg_dir,
interp_order=self.action_param.output_interp_order)
def interpret_output(self, batch_output):
if self.is_training:
return True
return self.output_decoder.decode_batch(
{'window_resampled':batch_output['resampled_moving_image']},
batch_output['locations'])
| 41.806452 | 78 | 0.617901 |
79475cd69a115695d9e8eea9aa59b3bf674563d9 | 7,286 | py | Python | jcld/tdir.py | clld/recipes | 947ad0514e08f89cad95cacb448b60c41231b7d6 | [
"Apache-2.0"
] | null | null | null | jcld/tdir.py | clld/recipes | 947ad0514e08f89cad95cacb448b60c41231b7d6 | [
"Apache-2.0"
] | 1 | 2019-05-02T10:14:26.000Z | 2019-05-06T09:16:30.000Z | jcld/tdir.py | clld/recipes | 947ad0514e08f89cad95cacb448b60c41231b7d6 | [
"Apache-2.0"
] | 2 | 2015-12-06T22:04:13.000Z | 2017-10-05T06:44:06.000Z | import sys
import re
from sqlalchemy import create_engine
from xlrd import open_workbook
from path import path
from bs4 import BeautifulSoup as bs
from clld.db.meta import DBSession
from clld.db.models import common
DATA_DIR = path('/home/robert/venvs/clld/data/tdir-data/TDIR')
def read(type_):
assert type_ in ['glosses', 'examples', 'languages', 'references']
wb = open_workbook(DATA_DIR.joinpath('tdir.%s.comp.xls' % type_))
for s in wb.sheets():
break
fields = [s.cell(0, i).value for i in range(s.ncols)]
for row in range(1, s.nrows):
values = []
for i in range(s.ncols):
value = s.cell(row, i).value
if value in ['NULL', '--', #'%'
]:
value = None
if isinstance(value, float):
value = unicode(int(value))
values.append(value)
yield dict(zip(fields, values))
EXAMPLE_MAP = {
u'idem': '', # 1
u'own': 'own',
u'adv inclusive': '', # 27
u'reflexive': 'refl',
u'scalar focus particle': 'sfp',
u'focus particle': 'sfp',
u'scalar focus particl': 'sfp',
None: '', # 13
u'adv exclusive': 'ave',
u'refllexive': 'refl',
u'refl': 'refl',
u'same': '', # 1
u'adnominal': 'adn',
u'negative polarity item': '', # 1
}
#<option value="adnom">adnominal intensifier</option>
# <option value="excl">adverbial-exclusive intensifier</option>
# <option value="incl">adverbial-inclusive intensifier</option>
# <option value="refl">reflexive</option>
# <option value="own">attributive intensifier ('own')</option>
# <option value="scal">scalar focus particle</option>
PARAMS = {
u'sort': u'Sortal restrictions of primary adnominal intensifier',
#u'otherint': None,
u'own': u"Attributive intensifier ('own')",
u'sfp': u"Scalar focus particle ('even')",
#u'otherrefl': u'-tʂə̀-',
u'lex': u'Lexical source of primary adnominal intensifier',
u'refl': u'Primary reflexive marker',
u'adn': u'Primary adnominal intensifier',
u'ave': u'Exclusive intensifier',
}
#Primary adnominal intensifier:
# Sortal restrictions:
# Lexical source:
#Other intensifiers:
#Exclusive intensifier:
#Primary reflexive marker
#Other reflexive markers
#Attributive intensifier ('own'):
#Scalar focus particle ('even'):
def fix_example(e, repl='\t'):
return unicode(bs(e.replace('</td><td>', repl)))
def load():
wals = create_engine('postgresql://robert@/wals3')
contributor = common.Contributor(id='gastvolker', name='Volker Gast')
contribution = common.Contribution(
id='tdir', name='Typological Database of Intensifiers and Reflexives')
cc = common.ContributionContributor(
contribution=contribution, contributor=contributor)
DBSession.add(cc)
for row in read('glosses'):
DBSession.add(common.GlossAbbreviation(id=row['gloss'], name=row['explanation']))
params = {}
for id_, name in PARAMS.items():
params[id_] = common.Parameter(id='tdir-' + id_, name=name)
DBSession.add(params[id_])
#
# TODO: domain for sortal restrictions!
#
values = {}
languages = {}
for row in read('languages'):
if row['adn'] and '<br>' in row['adn']:
row['adn'], other = row['adn'].split('<br>', 1)
if not row['otherint']:
row['otherint'] = ''
row['otherint'] = '\n'.join(filter(None, row['otherint'].split('<br>') + other.split('<br>')))
row['sil'] = row['sil'].lower()
row['sil'] = {
'arm': 'hye',
'vmn': 'mig',
'gli': 'gle',
'grk': 'ell',
'hbr': 'heb',
'ltn': 'lat',
'chn': 'cmn',
'ota': 'ote',
'pnj': 'pan',
'pba': 'rap',
'esg': 'kal',
'vla': 'zea',
'lat': 'lav',
}.get(row['sil'], row['sil'])
l = common.Language(id=row['sil'].lower(), name=row['language'])
languages[row['language']] = l
res = wals.execute("select l.latitude, l.longitude from language as l, languageidentifier as li, identifier as i where l.pk = li.language_pk and li.identifier_pk = i.pk and i.id = '%s' and i.type = 'iso639-3';" \
% row['sil']).fetchone()
if not res:
res = wals.execute("select latitude, longitude from language where name = '%s';" % row['language']).fetchone()
if res:
l.latitude, l.longitude = res
else:
print(row['language'], row['sil'])
#(u'Classical Nahuatl', u'nci') ???
#(u'Ancient Greek', u'gko')
for pid in params.keys():
value = row[pid]
if value:
value = common.Value(
id='tdir-%s-%s' % (pid, l.id),
name=unicode(bs(value)),
contribution=contribution,
parameter=params[pid],
language=l)
values['%s-%s' % (pid, row['language'])] = value
DBSession.add(value)
def normalize_ref(ref):
ref = re.sub('\s+', ' ', ref).strip()
return unicode(bs(ref)).replace('<i>', '"').replace('</i>', '"')
"""
Ogawa, A. (1998)
Wali, K. et al. (2000)
Lyutikova. -> Lyutikova,
se-Bertit -> se-Berit
missing refs:
Sengupta, G. (2000). Lexical anaphors and pronouns in Bangla. In Lust et al. (eds.), <i>Lexical Anaphors and Pronouns in Selected South Asian Languages</i>. Berlin: Mouton de Gruyter.
Davison, A. Mistry (2000). Lexical anaphors and pronouns in Hindi/Urdu. In Lust et al. (eds.), <i>Lexical Anaphors and Pronouns in Selected South Asian Languages</i>. Berlin: Mouton de Gruyter.
"""
refs = {}
for row in read('references'):
name = re.sub('\s+', ' ', row['entry'].split(').')[0].strip()) + ')'
src = common.Source(
id=row['ref'].strip(), name=name, description=normalize_ref(row['entry']))
refs[name] = src
DBSession.add(src)
for row in read('examples'):
if row['language'] not in languages:
print('example for unknown language "%s"' % row['language'])
continue
s = common.Sentence(
id=row['Nr'].strip(),
name=fix_example(row['original'], repl=' '),
language=languages[row['language']],
analyzed=fix_example(row['original']),
gloss=fix_example(row['gloss']),
description=row['translation'],
source=row['source'],
comment=row['comments'])
has_refs = False
for ref in refs:
if ref in row['source']:
if normalize_ref(row['source']) != refs[ref].description:
print('-->')
print(row['source'])
has_refs = True
common.SentenceReference(sentence=s, source=refs[ref])
if not has_refs:
print('+++++')
print(row['source'])
pid = EXAMPLE_MAP[row['pov']]
if pid:
# associate with value!
o = common.ValueSentence(value=values['%s-%s' % (pid, row['language'])], sentence=s)
DBSession.add(s)
| 33.118182 | 220 | 0.555998 |
79475edd45556a3febc6f5a70464ca8bd2eef3df | 2,030 | py | Python | rediscache/RedisConnector.py | Rizwan-Hasan/Flask-URL-Shortener-Application | 1d5f2209ff74ce14baf79f1757d71221e18b93d3 | [
"MIT"
] | 1 | 2021-11-03T20:46:25.000Z | 2021-11-03T20:46:25.000Z | rediscache/RedisConnector.py | Rizwan-Hasan/Flask-URL-Shortener-Application | 1d5f2209ff74ce14baf79f1757d71221e18b93d3 | [
"MIT"
] | null | null | null | rediscache/RedisConnector.py | Rizwan-Hasan/Flask-URL-Shortener-Application | 1d5f2209ff74ce14baf79f1757d71221e18b93d3 | [
"MIT"
] | null | null | null | import logging
import redis
from rediscache import host, port, password, dbNo
# Redis Connector ↓
class RedisConnect:
def __init__(self, counterName: str, counter: int):
try:
self.__pool = redis.ConnectionPool(
decode_responses=True, host=host, port=port, password=password, db=dbNo
)
self.__counterName: str = counterName
self.__counter: int = counter
self.__urltable = None
except Exception as e:
logging.error(e)
def __getConnection(self):
return redis.Redis(connection_pool=self.__pool)
def setUrlTableObject(self, urltable):
self.__urltable = urltable
def getNewCounter(self):
while True:
try:
conn = self.__getConnection()
counter = conn.get(self.__counterName)
if counter is None:
conn.set(
self.__counterName,
self.__counter + self.__urltable.getRowCount(),
)
counter = conn.incr(self.__counterName)
conn.close()
return counter
except Exception as e:
logging.error(e)
def storeURL(self, url: str, short_url: str, expire: int):
while True:
try:
conn = self.__getConnection()
conn.set(f"{short_url}", f"{url}", ex=expire)
conn.close()
return True
except Exception as e:
logging.error(e)
def getLongUrl(self, short_url: str):
while True:
try:
conn = self.__getConnection()
tmp = conn.get(f"{short_url}")
conn.close()
if tmp is not None:
return tmp
else:
return None
except Exception as e:
logging.error(e)
if __name__ == "__main__":
print("Hello World")
| 29.42029 | 87 | 0.5133 |
79475f3196a3b160a2d27011119ff916db64ee2a | 3,131 | py | Python | alipay/aop/api/response/AlipayTradeCustomsDeclareResponse.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/response/AlipayTradeCustomsDeclareResponse.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/response/AlipayTradeCustomsDeclareResponse.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayTradeCustomsDeclareResponse(AlipayResponse):
def __init__(self):
super(AlipayTradeCustomsDeclareResponse, self).__init__()
self._alipay_declare_no = None
self._currency = None
self._identity_check = None
self._out_trade_no = None
self._pay_code = None
self._pay_transaction_id = None
self._total_amount = None
self._trade_no = None
self._ver_dept = None
@property
def alipay_declare_no(self):
return self._alipay_declare_no
@alipay_declare_no.setter
def alipay_declare_no(self, value):
self._alipay_declare_no = value
@property
def currency(self):
return self._currency
@currency.setter
def currency(self, value):
self._currency = value
@property
def identity_check(self):
return self._identity_check
@identity_check.setter
def identity_check(self, value):
self._identity_check = value
@property
def out_trade_no(self):
return self._out_trade_no
@out_trade_no.setter
def out_trade_no(self, value):
self._out_trade_no = value
@property
def pay_code(self):
return self._pay_code
@pay_code.setter
def pay_code(self, value):
self._pay_code = value
@property
def pay_transaction_id(self):
return self._pay_transaction_id
@pay_transaction_id.setter
def pay_transaction_id(self, value):
self._pay_transaction_id = value
@property
def total_amount(self):
return self._total_amount
@total_amount.setter
def total_amount(self, value):
self._total_amount = value
@property
def trade_no(self):
return self._trade_no
@trade_no.setter
def trade_no(self, value):
self._trade_no = value
@property
def ver_dept(self):
return self._ver_dept
@ver_dept.setter
def ver_dept(self, value):
self._ver_dept = value
def parse_response_content(self, response_content):
response = super(AlipayTradeCustomsDeclareResponse, self).parse_response_content(response_content)
if 'alipay_declare_no' in response:
self.alipay_declare_no = response['alipay_declare_no']
if 'currency' in response:
self.currency = response['currency']
if 'identity_check' in response:
self.identity_check = response['identity_check']
if 'out_trade_no' in response:
self.out_trade_no = response['out_trade_no']
if 'pay_code' in response:
self.pay_code = response['pay_code']
if 'pay_transaction_id' in response:
self.pay_transaction_id = response['pay_transaction_id']
if 'total_amount' in response:
self.total_amount = response['total_amount']
if 'trade_no' in response:
self.trade_no = response['trade_no']
if 'ver_dept' in response:
self.ver_dept = response['ver_dept']
| 29.537736 | 106 | 0.66145 |
79475f92fda14c8694d599eb7c742393a726abf5 | 7,216 | py | Python | tools/project-creator/Python2.6.6/Lib/abc.py | gohopo/nineck.ca | 9601f5ae4c20f8a3ea27b06551556fa5e1eecce3 | [
"MIT"
] | 81 | 2017-03-13T08:24:01.000Z | 2021-04-02T09:48:38.000Z | tools/project-creator/Python2.6.6/Lib/abc.py | gohopo/nineck.ca | 9601f5ae4c20f8a3ea27b06551556fa5e1eecce3 | [
"MIT"
] | 6 | 2017-04-30T08:36:55.000Z | 2017-09-22T01:37:28.000Z | tools/project-creator/Python2.6.6/Lib/abc.py | gohopo/nineck.ca | 9601f5ae4c20f8a3ea27b06551556fa5e1eecce3 | [
"MIT"
] | 41 | 2017-03-18T14:11:58.000Z | 2021-04-14T05:06:09.000Z | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) according to PEP 3119."""
# Instance of old-style class
class _C: pass
_InstanceType = type(_C())
def abstractmethod(funcobj):
"""A decorator indicating abstract methods.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract methods are overridden.
The abstract methods can be called using any of the normal
'super' call mechanisms.
Usage:
class C:
__metaclass__ = ABCMeta
@abstractmethod
def my_abstract_method(self, ...):
...
"""
funcobj.__isabstractmethod__ = True
return funcobj
class abstractproperty(property):
"""A decorator indicating abstract properties.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract properties are overridden.
The abstract properties can be called using any of the normal
'super' call mechanisms.
Usage:
class C:
__metaclass__ = ABCMeta
@abstractproperty
def my_abstract_property(self):
...
This defines a read-only property; you can also define a read-write
abstract property using the 'long' form of property declaration:
class C:
__metaclass__ = ABCMeta
def getx(self): ...
def setx(self, value): ...
x = abstractproperty(getx, setx)
"""
__isabstractmethod__ = True
class ABCMeta(type):
"""Metaclass for defining Abstract Base Classes (ABCs).
Use this metaclass to create an ABC. An ABC can be subclassed
directly, and then acts as a mix-in class. You can also register
unrelated concrete classes (even built-in classes) and unrelated
ABCs as 'virtual subclasses' -- these and their descendants will
be considered subclasses of the registering ABC by the built-in
issubclass() function, but the registering ABC won't show up in
their MRO (Method Resolution Order) nor will method
implementations defined by the registering ABC be callable (not
even via super()).
"""
# A global counter that is incremented each time a class is
# registered as a virtual subclass of anything. It forces the
# negative cache to be cleared before its next use.
_abc_invalidation_counter = 0
def __new__(mcls, name, bases, namespace):
cls = super(ABCMeta, mcls).__new__(mcls, name, bases, namespace)
# Compute set of abstract method names
abstracts = set(name
for name, value in namespace.items()
if getattr(value, "__isabstractmethod__", False))
for base in bases:
for name in getattr(base, "__abstractmethods__", set()):
value = getattr(cls, name, None)
if getattr(value, "__isabstractmethod__", False):
abstracts.add(name)
cls.__abstractmethods__ = frozenset(abstracts)
# Set up inheritance registry
cls._abc_registry = set()
cls._abc_cache = set()
cls._abc_negative_cache = set()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
return cls
def register(cls, subclass):
"""Register a virtual subclass of an ABC."""
if not isinstance(cls, type):
raise TypeError("Can only register classes")
if issubclass(subclass, cls):
return # Already a subclass
# Subtle: test for cycles *after* testing for "already a subclass";
# this means we allow X.register(X) and interpret it as a no-op.
if issubclass(cls, subclass):
# This would create a cycle, which is bad for the algorithm below
raise RuntimeError("Refusing to create an inheritance cycle")
cls._abc_registry.add(subclass)
ABCMeta._abc_invalidation_counter += 1 # Invalidate negative cache
def _dump_registry(cls, file=None):
"""Debug helper to print the ABC registry."""
print >> file, "Class: %s.%s" % (cls.__module__, cls.__name__)
print >> file, "Inv.counter: %s" % ABCMeta._abc_invalidation_counter
for name in sorted(cls.__dict__.keys()):
if name.startswith("_abc_"):
value = getattr(cls, name)
print >> file, "%s: %r" % (name, value)
def __instancecheck__(cls, instance):
"""Override for isinstance(instance, cls)."""
# Inline the cache checking when it's simple.
subclass = getattr(instance, '__class__', None)
if subclass in cls._abc_cache:
return True
subtype = type(instance)
# Old-style instances
if subtype is _InstanceType:
subtype = subclass
if subtype is subclass or subclass is None:
if (cls._abc_negative_cache_version ==
ABCMeta._abc_invalidation_counter and
subtype in cls._abc_negative_cache):
return False
# Fall back to the subclass check.
return cls.__subclasscheck__(subtype)
return (cls.__subclasscheck__(subclass) or
cls.__subclasscheck__(subtype))
def __subclasscheck__(cls, subclass):
"""Override for issubclass(subclass, cls)."""
# Check cache
if subclass in cls._abc_cache:
return True
# Check negative cache; may have to invalidate
if cls._abc_negative_cache_version < ABCMeta._abc_invalidation_counter:
# Invalidate the negative cache
cls._abc_negative_cache = set()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
elif subclass in cls._abc_negative_cache:
return False
# Check the subclass hook
ok = cls.__subclasshook__(subclass)
if ok is not NotImplemented:
assert isinstance(ok, bool)
if ok:
cls._abc_cache.add(subclass)
else:
cls._abc_negative_cache.add(subclass)
return ok
# Check if it's a direct subclass
if cls in getattr(subclass, '__mro__', ()):
cls._abc_cache.add(subclass)
return True
# Check if it's a subclass of a registered class (recursive)
for rcls in cls._abc_registry:
if issubclass(subclass, rcls):
cls._abc_cache.add(subclass)
return True
# Check if it's a subclass of a subclass (recursive)
for scls in cls.__subclasses__():
if issubclass(subclass, scls):
cls._abc_cache.add(subclass)
return True
# No dice; update negative cache
cls._abc_negative_cache.add(subclass)
return False
| 39.431694 | 80 | 0.617239 |
79476075e83e2165b0a87d4686dcf53e79e3c772 | 6,659 | py | Python | tests/cli/test_rasa_test.py | chaneyjd/rasa | 104a9591fc10b96eaa7fe402b6d64ca652b7ebe2 | [
"Apache-2.0"
] | null | null | null | tests/cli/test_rasa_test.py | chaneyjd/rasa | 104a9591fc10b96eaa7fe402b6d64ca652b7ebe2 | [
"Apache-2.0"
] | 46 | 2020-09-26T11:36:38.000Z | 2022-03-01T13:38:02.000Z | tests/cli/test_rasa_test.py | chaneyjd/rasa | 104a9591fc10b96eaa7fe402b6d64ca652b7ebe2 | [
"Apache-2.0"
] | null | null | null | import os
from shutil import copyfile
from rasa.core.test import CONFUSION_MATRIX_STORIES_FILE
from rasa.constants import RESULTS_FILE
from rasa.shared.constants import DEFAULT_RESULTS_PATH
from rasa.shared.utils.io import list_files, write_yaml
from typing import Callable
from _pytest.pytester import RunResult
def test_test_core(run_in_simple_project: Callable[..., RunResult]):
run_in_simple_project("test", "core", "--stories", "data")
assert os.path.exists("results")
def test_test_core_no_plot(run_in_simple_project: Callable[..., RunResult]):
run_in_simple_project("test", "core", "--no-plot")
assert not os.path.exists(f"results/{CONFUSION_MATRIX_STORIES_FILE}")
def test_test(run_in_simple_project_with_model: Callable[..., RunResult]):
write_yaml(
{
"pipeline": "KeywordIntentClassifier",
"policies": [{"name": "MemoizationPolicy"}],
},
"config2.yml",
)
run_in_simple_project_with_model("test")
assert os.path.exists("results")
assert os.path.exists("results/intent_histogram.png")
assert os.path.exists("results/intent_confusion_matrix.png")
def test_test_no_plot(run_in_simple_project: Callable[..., RunResult]):
run_in_simple_project("test", "--no-plot")
assert not os.path.exists("results/intent_histogram.png")
assert not os.path.exists("results/intent_confusion_matrix.png")
assert not os.path.exists("results/story_confmat.pdf")
def test_test_nlu(run_in_simple_project_with_model: Callable[..., RunResult]):
run_in_simple_project_with_model("test", "nlu", "--nlu", "data", "--successes")
assert os.path.exists("results/intent_histogram.png")
assert os.path.exists("results/intent_confusion_matrix.png")
assert os.path.exists("results/intent_successes.json")
def test_test_nlu_no_plot(run_in_simple_project: Callable[..., RunResult]):
run_in_simple_project("test", "nlu", "--no-plot")
assert not os.path.exists("results/intent_histogram.png")
assert not os.path.exists("results/intent_confusion_matrix.png")
def test_test_nlu_cross_validation(run_in_simple_project: Callable[..., RunResult]):
run_in_simple_project(
"test", "nlu", "--cross-validation", "-c", "config.yml", "-f", "2", "-r", "1"
)
assert os.path.exists("results/intent_histogram.png")
assert os.path.exists("results/intent_confusion_matrix.png")
def test_test_nlu_comparison(run_in_simple_project: Callable[..., RunResult]):
write_yaml({"pipeline": "KeywordIntentClassifier"}, "config.yml")
write_yaml({"pipeline": "KeywordIntentClassifier"}, "config2.yml")
run_in_simple_project(
"test",
"nlu",
"--config",
"config.yml",
"config2.yml",
"--run",
"2",
"--percentages",
"75",
"25",
)
assert os.path.exists("results/run_1")
assert os.path.exists("results/run_2")
def test_test_core_comparison(
run_in_simple_project_with_model: Callable[..., RunResult]
):
files = list_files("models")
copyfile(files[0], "models/copy-model.tar.gz")
run_in_simple_project_with_model(
"test",
"core",
"-m",
files[0],
"models/copy-model.tar.gz",
"--stories",
"data/stories.md",
)
assert os.path.exists(os.path.join(DEFAULT_RESULTS_PATH, RESULTS_FILE))
def test_test_core_comparison_after_train(
run_in_simple_project: Callable[..., RunResult]
):
write_yaml(
{"language": "en", "policies": [{"name": "MemoizationPolicy"}]}, "config_1.yml"
)
write_yaml(
{"language": "en", "policies": [{"name": "MemoizationPolicy"}]}, "config_2.yml"
)
run_in_simple_project(
"train",
"core",
"-c",
"config_1.yml",
"config_2.yml",
"--stories",
"data/stories.yml",
"--runs",
"2",
"--percentages",
"25",
"75",
"--out",
"comparison_models",
)
assert os.path.exists("comparison_models")
assert os.path.exists("comparison_models/run_1")
assert os.path.exists("comparison_models/run_2")
run_in_simple_project(
"test",
"core",
"-m",
"comparison_models",
"--stories",
"data/stories",
"--evaluate-model-directory",
)
assert os.path.exists(os.path.join(DEFAULT_RESULTS_PATH, RESULTS_FILE))
assert os.path.exists(
os.path.join(DEFAULT_RESULTS_PATH, "core_model_comparison_graph.pdf")
)
def test_test_help(run: Callable[..., RunResult]):
output = run("test", "--help")
help_text = """usage: rasa test [-h] [-v] [-vv] [--quiet] [-m MODEL] [-s STORIES]
[--max-stories MAX_STORIES] [--endpoints ENDPOINTS]
[--fail-on-prediction-errors] [--url URL]
[--evaluate-model-directory] [-u NLU]
[-c CONFIG [CONFIG ...]] [--cross-validation] [-f FOLDS]
[-r RUNS] [-p PERCENTAGES [PERCENTAGES ...]] [--no-plot]
[--successes] [--no-errors] [--out OUT]
{core,nlu} ..."""
lines = help_text.split("\n")
# expected help text lines should appear somewhere in the output
printed_help = set(output.outlines)
for line in lines:
assert line in printed_help
def test_test_nlu_help(run: Callable[..., RunResult]):
output = run("test", "nlu", "--help")
help_text = """usage: rasa test nlu [-h] [-v] [-vv] [--quiet] [-m MODEL] [-u NLU] [--out OUT]
[-c CONFIG [CONFIG ...]] [--cross-validation] [-f FOLDS]
[-r RUNS] [-p PERCENTAGES [PERCENTAGES ...]] [--no-plot]
[--successes] [--no-errors]"""
lines = help_text.split("\n")
# expected help text lines should appear somewhere in the output
printed_help = set(output.outlines)
for line in lines:
assert line in printed_help
def test_test_core_help(run: Callable[..., RunResult]):
output = run("test", "core", "--help")
help_text = """usage: rasa test core [-h] [-v] [-vv] [--quiet] [-m MODEL [MODEL ...]]
[-s STORIES] [--max-stories MAX_STORIES] [--out OUT]
[--e2e] [--endpoints ENDPOINTS]
[--fail-on-prediction-errors] [--url URL]
[--evaluate-model-directory] [--no-plot] [--successes]
[--no-errors]"""
lines = help_text.split("\n")
# expected help text lines should appear somewhere in the output
printed_help = set(output.outlines)
for line in lines:
assert line in printed_help
| 31.861244 | 97 | 0.620514 |
794762575f72eaa6e791bc573b8920ff92da10e6 | 19,720 | py | Python | sdk/python/pulumi_azure_native/apimanagement/v20210101preview/backend.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/apimanagement/v20210101preview/backend.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/apimanagement/v20210101preview/backend.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['BackendArgs', 'Backend']
@pulumi.input_type
class BackendArgs:
def __init__(__self__, *,
protocol: pulumi.Input[Union[str, 'BackendProtocol']],
resource_group_name: pulumi.Input[str],
service_name: pulumi.Input[str],
url: pulumi.Input[str],
backend_id: Optional[pulumi.Input[str]] = None,
credentials: Optional[pulumi.Input['BackendCredentialsContractArgs']] = None,
description: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input['BackendPropertiesArgs']] = None,
proxy: Optional[pulumi.Input['BackendProxyContractArgs']] = None,
resource_id: Optional[pulumi.Input[str]] = None,
title: Optional[pulumi.Input[str]] = None,
tls: Optional[pulumi.Input['BackendTlsPropertiesArgs']] = None):
"""
The set of arguments for constructing a Backend resource.
:param pulumi.Input[Union[str, 'BackendProtocol']] protocol: Backend communication protocol.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] service_name: The name of the API Management service.
:param pulumi.Input[str] url: Runtime Url of the Backend.
:param pulumi.Input[str] backend_id: Identifier of the Backend entity. Must be unique in the current API Management service instance.
:param pulumi.Input['BackendCredentialsContractArgs'] credentials: Backend Credentials Contract Properties
:param pulumi.Input[str] description: Backend Description.
:param pulumi.Input['BackendPropertiesArgs'] properties: Backend Properties contract
:param pulumi.Input['BackendProxyContractArgs'] proxy: Backend Proxy Contract Properties
:param pulumi.Input[str] resource_id: Management Uri of the Resource in External System. This url can be the Arm Resource Id of Logic Apps, Function Apps or API Apps.
:param pulumi.Input[str] title: Backend Title.
:param pulumi.Input['BackendTlsPropertiesArgs'] tls: Backend TLS Properties
"""
pulumi.set(__self__, "protocol", protocol)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "service_name", service_name)
pulumi.set(__self__, "url", url)
if backend_id is not None:
pulumi.set(__self__, "backend_id", backend_id)
if credentials is not None:
pulumi.set(__self__, "credentials", credentials)
if description is not None:
pulumi.set(__self__, "description", description)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if proxy is not None:
pulumi.set(__self__, "proxy", proxy)
if resource_id is not None:
pulumi.set(__self__, "resource_id", resource_id)
if title is not None:
pulumi.set(__self__, "title", title)
if tls is not None:
pulumi.set(__self__, "tls", tls)
@property
@pulumi.getter
def protocol(self) -> pulumi.Input[Union[str, 'BackendProtocol']]:
"""
Backend communication protocol.
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: pulumi.Input[Union[str, 'BackendProtocol']]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="serviceName")
def service_name(self) -> pulumi.Input[str]:
"""
The name of the API Management service.
"""
return pulumi.get(self, "service_name")
@service_name.setter
def service_name(self, value: pulumi.Input[str]):
pulumi.set(self, "service_name", value)
@property
@pulumi.getter
def url(self) -> pulumi.Input[str]:
"""
Runtime Url of the Backend.
"""
return pulumi.get(self, "url")
@url.setter
def url(self, value: pulumi.Input[str]):
pulumi.set(self, "url", value)
@property
@pulumi.getter(name="backendId")
def backend_id(self) -> Optional[pulumi.Input[str]]:
"""
Identifier of the Backend entity. Must be unique in the current API Management service instance.
"""
return pulumi.get(self, "backend_id")
@backend_id.setter
def backend_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "backend_id", value)
@property
@pulumi.getter
def credentials(self) -> Optional[pulumi.Input['BackendCredentialsContractArgs']]:
"""
Backend Credentials Contract Properties
"""
return pulumi.get(self, "credentials")
@credentials.setter
def credentials(self, value: Optional[pulumi.Input['BackendCredentialsContractArgs']]):
pulumi.set(self, "credentials", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Backend Description.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['BackendPropertiesArgs']]:
"""
Backend Properties contract
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['BackendPropertiesArgs']]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter
def proxy(self) -> Optional[pulumi.Input['BackendProxyContractArgs']]:
"""
Backend Proxy Contract Properties
"""
return pulumi.get(self, "proxy")
@proxy.setter
def proxy(self, value: Optional[pulumi.Input['BackendProxyContractArgs']]):
pulumi.set(self, "proxy", value)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[pulumi.Input[str]]:
"""
Management Uri of the Resource in External System. This url can be the Arm Resource Id of Logic Apps, Function Apps or API Apps.
"""
return pulumi.get(self, "resource_id")
@resource_id.setter
def resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_id", value)
@property
@pulumi.getter
def title(self) -> Optional[pulumi.Input[str]]:
"""
Backend Title.
"""
return pulumi.get(self, "title")
@title.setter
def title(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "title", value)
@property
@pulumi.getter
def tls(self) -> Optional[pulumi.Input['BackendTlsPropertiesArgs']]:
"""
Backend TLS Properties
"""
return pulumi.get(self, "tls")
@tls.setter
def tls(self, value: Optional[pulumi.Input['BackendTlsPropertiesArgs']]):
pulumi.set(self, "tls", value)
class Backend(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
backend_id: Optional[pulumi.Input[str]] = None,
credentials: Optional[pulumi.Input[pulumi.InputType['BackendCredentialsContractArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[pulumi.InputType['BackendPropertiesArgs']]] = None,
protocol: Optional[pulumi.Input[Union[str, 'BackendProtocol']]] = None,
proxy: Optional[pulumi.Input[pulumi.InputType['BackendProxyContractArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_id: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
title: Optional[pulumi.Input[str]] = None,
tls: Optional[pulumi.Input[pulumi.InputType['BackendTlsPropertiesArgs']]] = None,
url: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Backend details.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] backend_id: Identifier of the Backend entity. Must be unique in the current API Management service instance.
:param pulumi.Input[pulumi.InputType['BackendCredentialsContractArgs']] credentials: Backend Credentials Contract Properties
:param pulumi.Input[str] description: Backend Description.
:param pulumi.Input[pulumi.InputType['BackendPropertiesArgs']] properties: Backend Properties contract
:param pulumi.Input[Union[str, 'BackendProtocol']] protocol: Backend communication protocol.
:param pulumi.Input[pulumi.InputType['BackendProxyContractArgs']] proxy: Backend Proxy Contract Properties
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] resource_id: Management Uri of the Resource in External System. This url can be the Arm Resource Id of Logic Apps, Function Apps or API Apps.
:param pulumi.Input[str] service_name: The name of the API Management service.
:param pulumi.Input[str] title: Backend Title.
:param pulumi.Input[pulumi.InputType['BackendTlsPropertiesArgs']] tls: Backend TLS Properties
:param pulumi.Input[str] url: Runtime Url of the Backend.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: BackendArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Backend details.
:param str resource_name: The name of the resource.
:param BackendArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(BackendArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
backend_id: Optional[pulumi.Input[str]] = None,
credentials: Optional[pulumi.Input[pulumi.InputType['BackendCredentialsContractArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[pulumi.InputType['BackendPropertiesArgs']]] = None,
protocol: Optional[pulumi.Input[Union[str, 'BackendProtocol']]] = None,
proxy: Optional[pulumi.Input[pulumi.InputType['BackendProxyContractArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_id: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
title: Optional[pulumi.Input[str]] = None,
tls: Optional[pulumi.Input[pulumi.InputType['BackendTlsPropertiesArgs']]] = None,
url: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = BackendArgs.__new__(BackendArgs)
__props__.__dict__["backend_id"] = backend_id
__props__.__dict__["credentials"] = credentials
__props__.__dict__["description"] = description
__props__.__dict__["properties"] = properties
if protocol is None and not opts.urn:
raise TypeError("Missing required property 'protocol'")
__props__.__dict__["protocol"] = protocol
__props__.__dict__["proxy"] = proxy
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["resource_id"] = resource_id
if service_name is None and not opts.urn:
raise TypeError("Missing required property 'service_name'")
__props__.__dict__["service_name"] = service_name
__props__.__dict__["title"] = title
__props__.__dict__["tls"] = tls
if url is None and not opts.urn:
raise TypeError("Missing required property 'url'")
__props__.__dict__["url"] = url
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:apimanagement/v20210101preview:Backend"), pulumi.Alias(type_="azure-native:apimanagement:Backend"), pulumi.Alias(type_="azure-nextgen:apimanagement:Backend"), pulumi.Alias(type_="azure-native:apimanagement/v20160707:Backend"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20160707:Backend"), pulumi.Alias(type_="azure-native:apimanagement/v20161010:Backend"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20161010:Backend"), pulumi.Alias(type_="azure-native:apimanagement/v20170301:Backend"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20170301:Backend"), pulumi.Alias(type_="azure-native:apimanagement/v20180101:Backend"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20180101:Backend"), pulumi.Alias(type_="azure-native:apimanagement/v20180601preview:Backend"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20180601preview:Backend"), pulumi.Alias(type_="azure-native:apimanagement/v20190101:Backend"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20190101:Backend"), pulumi.Alias(type_="azure-native:apimanagement/v20191201:Backend"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20191201:Backend"), pulumi.Alias(type_="azure-native:apimanagement/v20191201preview:Backend"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20191201preview:Backend"), pulumi.Alias(type_="azure-native:apimanagement/v20200601preview:Backend"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20200601preview:Backend"), pulumi.Alias(type_="azure-native:apimanagement/v20201201:Backend"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20201201:Backend")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Backend, __self__).__init__(
'azure-native:apimanagement/v20210101preview:Backend',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Backend':
"""
Get an existing Backend resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = BackendArgs.__new__(BackendArgs)
__props__.__dict__["credentials"] = None
__props__.__dict__["description"] = None
__props__.__dict__["name"] = None
__props__.__dict__["properties"] = None
__props__.__dict__["protocol"] = None
__props__.__dict__["proxy"] = None
__props__.__dict__["resource_id"] = None
__props__.__dict__["title"] = None
__props__.__dict__["tls"] = None
__props__.__dict__["type"] = None
__props__.__dict__["url"] = None
return Backend(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def credentials(self) -> pulumi.Output[Optional['outputs.BackendCredentialsContractResponse']]:
"""
Backend Credentials Contract Properties
"""
return pulumi.get(self, "credentials")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Backend Description.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> pulumi.Output['outputs.BackendPropertiesResponse']:
"""
Backend Properties contract
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def protocol(self) -> pulumi.Output[str]:
"""
Backend communication protocol.
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter
def proxy(self) -> pulumi.Output[Optional['outputs.BackendProxyContractResponse']]:
"""
Backend Proxy Contract Properties
"""
return pulumi.get(self, "proxy")
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> pulumi.Output[Optional[str]]:
"""
Management Uri of the Resource in External System. This url can be the Arm Resource Id of Logic Apps, Function Apps or API Apps.
"""
return pulumi.get(self, "resource_id")
@property
@pulumi.getter
def title(self) -> pulumi.Output[Optional[str]]:
"""
Backend Title.
"""
return pulumi.get(self, "title")
@property
@pulumi.getter
def tls(self) -> pulumi.Output[Optional['outputs.BackendTlsPropertiesResponse']]:
"""
Backend TLS Properties
"""
return pulumi.get(self, "tls")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type for API Management resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def url(self) -> pulumi.Output[str]:
"""
Runtime Url of the Backend.
"""
return pulumi.get(self, "url")
| 44.514673 | 1,658 | 0.65071 |
794765247ae3c3418e4dd2a34c55ad54a8d44da3 | 7,861 | py | Python | lib_drl/nets/resnet_v1.py | chang010453/GRP-HAI | 60f7c7633e33dbdd852f5df3e0a3d1017b6b2a22 | [
"MIT"
] | null | null | null | lib_drl/nets/resnet_v1.py | chang010453/GRP-HAI | 60f7c7633e33dbdd852f5df3e0a3d1017b6b2a22 | [
"MIT"
] | null | null | null | lib_drl/nets/resnet_v1.py | chang010453/GRP-HAI | 60f7c7633e33dbdd852f5df3e0a3d1017b6b2a22 | [
"MIT"
] | null | null | null | # --------------------------------------------------------
# Tensorflow Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Zheqi He and Xinlei Chen
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.contrib.slim import losses
from tensorflow.contrib.slim import arg_scope
from tensorflow.contrib.slim.python.slim.nets import resnet_utils
from tensorflow.contrib.slim.python.slim.nets import resnet_v1
from tensorflow.contrib.slim.python.slim.nets.resnet_v1 import resnet_v1_block
import numpy as np
from nets.network import Network
from model.config import cfg
pyramid_maps = {
'resnet50': {'C1':'resnet_v1_50/pool1/Relu:0',
'C2':'resnet_v1_50/block1/unit_2/bottleneck_v1',
'C3':'resnet_v1_50/block2/unit_3/bottleneck_v1',
'C4':'resnet_v1_50/block3/unit_5/bottleneck_v1',
'C5':'resnet_v1_50/block4/unit_3/bottleneck_v1',
},
'resnet101': {'C1': '', 'C2': '',
'C3': '', 'C4': '',
'C5': '',
}
}
def resnet_arg_scope(is_training=True,
batch_norm_decay=0.997,
batch_norm_epsilon=1e-5,
batch_norm_scale=True):
batch_norm_params = {
'is_training': False,
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'scale': batch_norm_scale,
'trainable': False,
'updates_collections': tf.GraphKeys.UPDATE_OPS
}
with arg_scope(
[slim.conv2d],
weights_regularizer=slim.l2_regularizer(cfg.TRAIN.WEIGHT_DECAY),
weights_initializer=slim.variance_scaling_initializer(),
trainable=is_training,
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
with arg_scope([slim.batch_norm], **batch_norm_params) as arg_sc:
return arg_sc
class resnetv1(Network):
def __init__(self, num_layers=50):
Network.__init__(self)
self._feat_stride = cfg.ANCHOR_STRIDES
self._feat_compress = [1. / float(self._feat_stride[0]), ]
self._num_layers = num_layers
self._scope = 'resnet_v1_%d' % num_layers
self._decide_blocks()
def _crop_pool_layer(self, bottom, rois, _im_info, name):
with tf.variable_scope(name) as scope:
batch_ids = tf.squeeze(tf.slice(rois, [0, 0], [-1, 1], name="batch_id"), [1])
# Get the normalized coordinates of bounding boxes
img_height = _im_info[0]
img_width = _im_info[1]
x1 = tf.slice(rois, [0, 1], [-1, 1], name="x1") / img_width
y1 = tf.slice(rois, [0, 2], [-1, 1], name="y1") / img_height
x2 = tf.slice(rois, [0, 3], [-1, 1], name="x2") / img_width
y2 = tf.slice(rois, [0, 4], [-1, 1], name="y2") / img_height
# Won't be back-propagated to rois anyway, but to save time
bboxes = tf.stop_gradient(tf.concat([y1, x1, y2, x2], 1))
if cfg.RESNET.MAX_POOL:
pre_pool_size = cfg.POOLING_SIZE * 2
crops = tf.image.crop_and_resize(bottom, bboxes, tf.to_int32(batch_ids), [pre_pool_size, pre_pool_size],
name="crops")
crops = slim.max_pool2d(crops, [2, 2], padding='SAME')
else:
crops = tf.image.crop_and_resize(bottom, bboxes, tf.to_int32(batch_ids),
[cfg.POOLING_SIZE, cfg.POOLING_SIZE],
name="crops")
return crops
# Do the first few layers manually, because 'SAME' padding can behave inconsistently
# for images of different sizes: sometimes 0, sometimes 1
def _build_base(self):
with tf.variable_scope(self._scope, self._scope):
net = resnet_utils.conv2d_same(self._image, 64, 7, stride=2, scope='conv1')
net = tf.pad(net, [[0, 0], [1, 1], [1, 1], [0, 0]])
net = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID', scope='pool1')
return net
def _image_to_head(self, is_training, reuse=None):
assert (0 <= cfg.RESNET.FIXED_BLOCKS <= 3)
# Now the base is always fixed during training
with slim.arg_scope(resnet_arg_scope(is_training=False)):
net_conv = self._build_base()
if cfg.RESNET.FIXED_BLOCKS > 0:
with slim.arg_scope(resnet_arg_scope(is_training=False)):
net_conv, _ = resnet_v1.resnet_v1(net_conv,
self._blocks[0:cfg.RESNET.FIXED_BLOCKS],
global_pool=False,
include_root_block=False,
reuse=reuse,
scope=self._scope)
if cfg.RESNET.FIXED_BLOCKS < 3:
with slim.arg_scope(resnet_arg_scope(is_training=is_training)):
net_conv, _ = resnet_v1.resnet_v1(net_conv,
self._blocks[cfg.RESNET.FIXED_BLOCKS:-1],
global_pool=False,
include_root_block=False,
reuse=reuse,
scope=self._scope)
self._layers['head'] = net_conv
return net_conv
def _head_to_tail(self, pool5, is_training, reuse=None):
with slim.arg_scope(resnet_arg_scope(is_training=is_training)):
fc7, _ = resnet_v1.resnet_v1(pool5,
self._blocks[-1:],
global_pool=False,
include_root_block=False,
reuse=reuse,
scope=self._scope)
# average pooling done by reduce_mean
fc7 = tf.reduce_mean(fc7, axis=[1, 2])
return fc7
def _decide_blocks(self):
# choose different blocks for different number of layers
if self._num_layers == 50:
self._blocks = [resnet_v1_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v1_block('block2', base_depth=128, num_units=4, stride=2),
# use stride 1 for the last conv4 layer
resnet_v1_block('block3', base_depth=256, num_units=6, stride=1),
resnet_v1_block('block4', base_depth=512, num_units=3, stride=1)]
elif self._num_layers == 101:
self._blocks = [resnet_v1_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v1_block('block2', base_depth=128, num_units=4, stride=2),
# use stride 1 for the last conv4 layer
resnet_v1_block('block3', base_depth=256, num_units=23, stride=1),
resnet_v1_block('block4', base_depth=512, num_units=3, stride=1)]
else:
# other numbers are not supported
raise NotImplementedError
def get_variables_to_restore(self, variables, var_keep_dic):
variables_to_restore = []
for v in variables:
if v.name.split(':')[0] in var_keep_dic:
print('Variables restored: %s' % v.name)
variables_to_restore.append(v)
return variables_to_restore
| 45.97076 | 120 | 0.547767 |
794766c841fdc48765bf5b2025f60301d9413e6b | 1,411 | py | Python | flow_package_maths/arithmetic/add/add.py | theengineeringco/flow-package-maths | 50ca778a7a88aa0a58b951a6d7ab51950d8864c3 | [
"MIT"
] | null | null | null | flow_package_maths/arithmetic/add/add.py | theengineeringco/flow-package-maths | 50ca778a7a88aa0a58b951a6d7ab51950d8864c3 | [
"MIT"
] | null | null | null | flow_package_maths/arithmetic/add/add.py | theengineeringco/flow-package-maths | 50ca778a7a88aa0a58b951a6d7ab51950d8864c3 | [
"MIT"
] | null | null | null | from typing import List
from flow import Ports, Process, Settings, Setup
from flow_types import base, unions
# Define Settings
settings = Settings()
settings.add_int_setting(id="terms", default=2, minimum=2, maximum=20) # noqa: WPS432
# Define Ports
ports = Ports()
# Add Inports
ports.add_inport(id="value1", types=unions.Number)
ports.add_inport(id="value2", types=unions.Number)
# Add Outports
ports.add_outport(id="result", types=[base.Double])
def setup(component: Setup):
# Get Setting Values
terms: int = component.get_setting("terms")
# Add Dynamic Inports
inport_ids: List[str] = []
for idx in range(terms - 2):
inport_id = f"value{idx + 3}"
inport_ids.append(inport_id)
component.add_inport(name=f"Value {idx + 3}", id=inport_id, types=unions.Number)
# Set Instance Variables
component.set_variable("inport_ids", inport_ids)
def process(component: Process):
if not component.has_data():
return
# Get Instance Variables
inport_ids: List[str] = component.get_variable("inport_ids")
# Get Inport Data
value1 = float(component.get_data("value1"))
value2 = float(component.get_data("value2"))
# Add all values together
result = value1 + value2 + sum(float(component.get_data(inport_id)) for inport_id in inport_ids)
# Send Outport Data
component.send_data(base.Double(result), "result")
| 25.654545 | 100 | 0.700921 |
794766cbad7ca3f42703e203935a90fa0b87afa7 | 32,960 | py | Python | dask/dataframe/io/csv.py | SultanOrazbayev/dask | 56eeb06103efbf36cc73e9405bcec42a5b92515a | [
"BSD-3-Clause"
] | null | null | null | dask/dataframe/io/csv.py | SultanOrazbayev/dask | 56eeb06103efbf36cc73e9405bcec42a5b92515a | [
"BSD-3-Clause"
] | 1 | 2021-12-01T20:16:41.000Z | 2021-12-02T20:42:37.000Z | dask/dataframe/io/csv.py | scharlottej13/dask | fc1cea9cdb2ea31348204aa51e4f6f7327a2af33 | [
"BSD-3-Clause"
] | null | null | null | import os
from collections.abc import Mapping
from io import BytesIO
from warnings import catch_warnings, simplefilter, warn
try:
import psutil
except ImportError:
psutil = None
import fsspec.implementations.local
import numpy as np
import pandas as pd
from fsspec.compression import compr
from fsspec.core import get_fs_token_paths
from fsspec.core import open as open_file
from fsspec.core import open_files
from fsspec.utils import infer_compression
from pandas.api.types import (
CategoricalDtype,
is_datetime64_any_dtype,
is_float_dtype,
is_integer_dtype,
is_object_dtype,
)
from dask.base import tokenize
from dask.bytes import read_bytes
from dask.core import flatten
from dask.dataframe.io.io import from_map
from dask.dataframe.io.utils import DataFrameIOFunction
from dask.dataframe.utils import clear_known_categories
from dask.delayed import delayed
from dask.utils import asciitable, parse_bytes
class CSVFunctionWrapper(DataFrameIOFunction):
"""
CSV Function-Wrapper Class
Reads CSV data from disk to produce a partition (given a key).
"""
def __init__(
self,
full_columns,
columns,
colname,
head,
header,
reader,
dtypes,
enforce,
kwargs,
):
self.full_columns = full_columns
self._columns = columns
self.colname = colname
self.head = head
self.header = header
self.reader = reader
self.dtypes = dtypes
self.enforce = enforce
self.kwargs = kwargs
@property
def columns(self):
return self.full_columns if self._columns is None else self._columns
def project_columns(self, columns):
"""Return a new CSVFunctionWrapper object with
a sub-column projection.
"""
# Make sure columns is ordered correctly
columns = [c for c in self.head.columns if c in columns]
if columns == self.columns:
return self
return CSVFunctionWrapper(
self.full_columns,
columns,
self.colname,
self.head[columns],
self.header,
self.reader,
{c: self.dtypes[c] for c in columns},
self.enforce,
self.kwargs,
)
def __call__(self, part):
# Part will be a 3-element tuple
block, path, is_first, is_last = part
# Construct `path_info`
if path is not None:
path_info = (
self.colname,
path,
sorted(list(self.head[self.colname].cat.categories)),
)
else:
path_info = None
# Deal with arguments that are special
# for the first block of each file
write_header = False
rest_kwargs = self.kwargs.copy()
if not is_first:
write_header = True
rest_kwargs.pop("skiprows", None)
if rest_kwargs.get("header", 0) is not None:
rest_kwargs.pop("header", None)
if not is_last:
rest_kwargs.pop("skipfooter", None)
# Deal with column projection
columns = self.full_columns
project_after_read = False
if self._columns is not None:
if self.kwargs:
# To be safe, if any kwargs are defined, avoid
# changing `usecols` here. Instead, we can just
# select columns after the read
project_after_read = True
else:
columns = self._columns
rest_kwargs["usecols"] = columns
# Call `pandas_read_text`
df = pandas_read_text(
self.reader,
block,
self.header,
rest_kwargs,
self.dtypes,
columns,
write_header,
self.enforce,
path_info,
)
if project_after_read:
return df[self.columns]
return df
def pandas_read_text(
reader,
b,
header,
kwargs,
dtypes=None,
columns=None,
write_header=True,
enforce=False,
path=None,
):
"""Convert a block of bytes to a Pandas DataFrame
Parameters
----------
reader : callable
``pd.read_csv`` or ``pd.read_table``.
b : bytestring
The content to be parsed with ``reader``
header : bytestring
An optional header to prepend to ``b``
kwargs : dict
A dictionary of keyword arguments to be passed to ``reader``
dtypes : dict
dtypes to assign to columns
path : tuple
A tuple containing path column name, path to file, and an ordered list of paths.
See Also
--------
dask.dataframe.csv.read_pandas_from_bytes
"""
bio = BytesIO()
if write_header and not b.startswith(header.rstrip()):
bio.write(header)
bio.write(b)
bio.seek(0)
df = reader(bio, **kwargs)
if dtypes:
coerce_dtypes(df, dtypes)
if enforce and columns and (list(df.columns) != list(columns)):
raise ValueError("Columns do not match", df.columns, columns)
if path:
colname, path, paths = path
code = paths.index(path)
df = df.assign(
**{colname: pd.Categorical.from_codes(np.full(len(df), code), paths)}
)
return df
def coerce_dtypes(df, dtypes):
"""Coerce dataframe to dtypes safely
Operates in place
Parameters
----------
df: Pandas DataFrame
dtypes: dict like {'x': float}
"""
bad_dtypes = []
bad_dates = []
errors = []
for c in df.columns:
if c in dtypes and df.dtypes[c] != dtypes[c]:
actual = df.dtypes[c]
desired = dtypes[c]
if is_float_dtype(actual) and is_integer_dtype(desired):
bad_dtypes.append((c, actual, desired))
elif is_object_dtype(actual) and is_datetime64_any_dtype(desired):
# This can only occur when parse_dates is specified, but an
# invalid date is encountered. Pandas then silently falls back
# to object dtype. Since `object_array.astype(datetime)` will
# silently overflow, error here and report.
bad_dates.append(c)
else:
try:
df[c] = df[c].astype(dtypes[c])
except Exception as e:
bad_dtypes.append((c, actual, desired))
errors.append((c, e))
if bad_dtypes:
if errors:
ex = "\n".join(
f"- {c}\n {e!r}" for c, e in sorted(errors, key=lambda x: str(x[0]))
)
exceptions = (
"The following columns also raised exceptions on "
"conversion:\n\n%s\n\n"
) % ex
extra = ""
else:
exceptions = ""
# All mismatches are int->float, also suggest `assume_missing=True`
extra = (
"\n\nAlternatively, provide `assume_missing=True` "
"to interpret\n"
"all unspecified integer columns as floats."
)
bad_dtypes = sorted(bad_dtypes, key=lambda x: str(x[0]))
table = asciitable(["Column", "Found", "Expected"], bad_dtypes)
dtype_kw = "dtype={%s}" % ",\n ".join(
f"{k!r}: '{v}'" for (k, v, _) in bad_dtypes
)
dtype_msg = (
"{table}\n\n"
"{exceptions}"
"Usually this is due to dask's dtype inference failing, and\n"
"*may* be fixed by specifying dtypes manually by adding:\n\n"
"{dtype_kw}\n\n"
"to the call to `read_csv`/`read_table`."
"{extra}"
).format(table=table, exceptions=exceptions, dtype_kw=dtype_kw, extra=extra)
else:
dtype_msg = None
if bad_dates:
also = " also " if bad_dtypes else " "
cols = "\n".join("- %s" % c for c in bad_dates)
date_msg = (
"The following columns{also}failed to properly parse as dates:\n\n"
"{cols}\n\n"
"This is usually due to an invalid value in that column. To\n"
"diagnose and fix it's recommended to drop these columns from the\n"
"`parse_dates` keyword, and manually convert them to dates later\n"
"using `dd.to_datetime`."
).format(also=also, cols=cols)
else:
date_msg = None
if bad_dtypes or bad_dates:
rule = "\n\n%s\n\n" % ("-" * 61)
msg = "Mismatched dtypes found in `pd.read_csv`/`pd.read_table`.\n\n%s" % (
rule.join(filter(None, [dtype_msg, date_msg]))
)
raise ValueError(msg)
def text_blocks_to_pandas(
reader,
block_lists,
header,
head,
kwargs,
enforce=False,
specified_dtypes=None,
path=None,
blocksize=None,
urlpath=None,
):
"""Convert blocks of bytes to a dask.dataframe
This accepts a list of lists of values of bytes where each list corresponds
to one file, and the value of bytes concatenate to comprise the entire
file, in order.
Parameters
----------
reader : callable
``pd.read_csv`` or ``pd.read_table``.
block_lists : list of lists of delayed values of bytes
The lists of bytestrings where each list corresponds to one logical file
header : bytestring
The header, found at the front of the first file, to be prepended to
all blocks
head : pd.DataFrame
An example Pandas DataFrame to be used for metadata.
kwargs : dict
Keyword arguments to pass down to ``reader``
path : tuple, optional
A tuple containing column name for path and the path_converter if provided
Returns
-------
A dask.dataframe
"""
dtypes = head.dtypes.to_dict()
# dtypes contains only instances of CategoricalDtype, which causes issues
# in coerce_dtypes for non-uniform categories across partitions.
# We will modify `dtype` (which is inferred) to
# 1. contain instances of CategoricalDtypes for user-provided types
# 2. contain 'category' for data inferred types
categoricals = head.select_dtypes(include=["category"]).columns
if isinstance(specified_dtypes, Mapping):
known_categoricals = [
k
for k in categoricals
if isinstance(specified_dtypes.get(k), CategoricalDtype)
and specified_dtypes.get(k).categories is not None
]
unknown_categoricals = categoricals.difference(known_categoricals)
else:
unknown_categoricals = categoricals
# Fixup the dtypes
for k in unknown_categoricals:
dtypes[k] = "category"
columns = list(head.columns)
blocks = tuple(flatten(block_lists))
# Create mask of first blocks from nested block_lists
is_first = tuple(block_mask(block_lists))
is_last = tuple(block_mask_last(block_lists))
if path:
colname, path_converter = path
paths = [b[1].path for b in blocks]
if path_converter:
paths = [path_converter(p) for p in paths]
head = head.assign(
**{
colname: pd.Categorical.from_codes(
np.zeros(len(head), dtype=int), set(paths)
)
}
)
path = (colname, paths)
if len(unknown_categoricals):
head = clear_known_categories(head, cols=unknown_categoricals)
# Define parts
parts = []
colname, paths = path or (None, None)
for i in range(len(blocks)):
parts.append([blocks[i], paths[i] if paths else None, is_first[i], is_last[i]])
# Construct the output collection with from_map
return from_map(
CSVFunctionWrapper(
columns,
None,
colname,
head,
header,
reader,
dtypes,
enforce,
kwargs,
),
parts,
meta=head,
label="read-csv",
token=tokenize(reader, urlpath, columns, enforce, head, blocksize),
enforce_metadata=False,
produces_tasks=True,
)
def block_mask(block_lists):
"""
Yields a flat iterable of booleans to mark the zeroth elements of the
nested input ``block_lists`` in a flattened output.
>>> list(block_mask([[1, 2], [3, 4], [5]]))
[True, False, True, False, True]
"""
for block in block_lists:
if not block:
continue
yield True
yield from (False for _ in block[1:])
def block_mask_last(block_lists):
"""
Yields a flat iterable of booleans to mark the last element of the
nested input ``block_lists`` in a flattened output.
>>> list(block_mask_last([[1, 2], [3, 4], [5]]))
[False, True, False, True, True]
"""
for block in block_lists:
if not block:
continue
yield from (False for _ in block[:-1])
yield True
def auto_blocksize(total_memory, cpu_count):
memory_factor = 10
blocksize = int(total_memory // cpu_count / memory_factor)
return min(blocksize, int(64e6))
def _infer_block_size():
default = 2**25
if psutil is not None:
with catch_warnings():
simplefilter("ignore", RuntimeWarning)
mem = psutil.virtual_memory().total
cpu = psutil.cpu_count()
if mem and cpu:
return auto_blocksize(mem, cpu)
return default
# guess blocksize if psutil is installed or use acceptable default one if not
AUTO_BLOCKSIZE = _infer_block_size()
def read_pandas(
reader,
urlpath,
blocksize="default",
lineterminator=None,
compression="infer",
sample=256000,
sample_rows=10,
enforce=False,
assume_missing=False,
storage_options=None,
include_path_column=False,
**kwargs,
):
reader_name = reader.__name__
if lineterminator is not None and len(lineterminator) == 1:
kwargs["lineterminator"] = lineterminator
else:
lineterminator = "\n"
if include_path_column and isinstance(include_path_column, bool):
include_path_column = "path"
if "index" in kwargs or "index_col" in kwargs:
raise ValueError(
"Keywords 'index' and 'index_col' not supported. "
f"Use dd.{reader_name}(...).set_index('my-index') instead"
)
for kw in ["iterator", "chunksize"]:
if kw in kwargs:
raise ValueError(f"{kw} not supported for dd.{reader_name}")
if kwargs.get("nrows", None):
raise ValueError(
"The 'nrows' keyword is not supported by "
"`dd.{0}`. To achieve the same behavior, it's "
"recommended to use `dd.{0}(...)."
"head(n=nrows)`".format(reader_name)
)
if isinstance(kwargs.get("skiprows"), int):
skiprows = lastskiprow = firstrow = kwargs.get("skiprows")
elif kwargs.get("skiprows") is None:
skiprows = lastskiprow = firstrow = 0
else:
# When skiprows is a list, we expect more than max(skiprows) to
# be included in the sample. This means that [0,2] will work well,
# but [0, 440] might not work.
skiprows = set(kwargs.get("skiprows"))
lastskiprow = max(skiprows)
# find the firstrow that is not skipped, for use as header
firstrow = min(set(range(len(skiprows) + 1)) - set(skiprows))
if isinstance(kwargs.get("header"), list):
raise TypeError(f"List of header rows not supported for dd.{reader_name}")
if isinstance(kwargs.get("converters"), dict) and include_path_column:
path_converter = kwargs.get("converters").get(include_path_column, None)
else:
path_converter = None
# If compression is "infer", inspect the (first) path suffix and
# set the proper compression option if the suffix is recongnized.
if compression == "infer":
# Translate the input urlpath to a simple path list
paths = get_fs_token_paths(urlpath, mode="rb", storage_options=storage_options)[
2
]
# Check for at least one valid path
if len(paths) == 0:
raise OSError(f"{urlpath} resolved to no files")
# Infer compression from first path
compression = infer_compression(paths[0])
if blocksize == "default":
blocksize = AUTO_BLOCKSIZE
if isinstance(blocksize, str):
blocksize = parse_bytes(blocksize)
if blocksize and compression:
# NONE of the compressions should use chunking
warn(
"Warning %s compression does not support breaking apart files\n"
"Please ensure that each individual file can fit in memory and\n"
"use the keyword ``blocksize=None to remove this message``\n"
"Setting ``blocksize=None``" % compression
)
blocksize = None
if compression not in compr:
raise NotImplementedError("Compression format %s not installed" % compression)
if blocksize and sample and blocksize < sample and lastskiprow != 0:
warn(
"Unexpected behavior can result from passing skiprows when\n"
"blocksize is smaller than sample size.\n"
"Setting ``sample=blocksize``"
)
sample = blocksize
b_lineterminator = lineterminator.encode()
b_out = read_bytes(
urlpath,
delimiter=b_lineterminator,
blocksize=blocksize,
sample=sample,
compression=compression,
include_path=include_path_column,
**(storage_options or {}),
)
if include_path_column:
b_sample, values, paths = b_out
path = (include_path_column, path_converter)
else:
b_sample, values = b_out
path = None
if not isinstance(values[0], (tuple, list)):
values = [values]
# If we have not sampled, then use the first row of the first values
# as a representative sample.
if b_sample is False and len(values[0]):
b_sample = values[0][0].compute()
# Get header row, and check that sample is long enough. If the file
# contains a header row, we need at least 2 nonempty rows + the number of
# rows to skip.
names = kwargs.get("names", None)
header = kwargs.get("header", "infer" if names is None else None)
need = 1 if header is None else 2
if kwargs.get("comment"):
# if comment is provided, step through lines of b_sample and strip out comments
parts = []
for part in b_sample.split(b_lineterminator):
split_comment = part.decode().split(kwargs.get("comment"))
if len(split_comment) > 1:
# if line starts with comment, don't include that line in parts.
if len(split_comment[0]) > 0:
parts.append(split_comment[0].strip().encode())
else:
parts.append(part)
if len(parts) > need:
break
else:
parts = b_sample.split(b_lineterminator, lastskiprow + need)
# If the last partition is empty, don't count it
nparts = 0 if not parts else len(parts) - int(not parts[-1])
if sample is not False and nparts < lastskiprow + need and len(b_sample) >= sample:
raise ValueError(
"Sample is not large enough to include at least one "
"row of data. Please increase the number of bytes "
"in `sample` in the call to `read_csv`/`read_table`"
)
if isinstance(header, int):
firstrow += header
header = b"" if header is None else parts[firstrow] + b_lineterminator
# Use sample to infer dtypes and check for presence of include_path_column
head_kwargs = kwargs.copy()
head_kwargs.pop("skipfooter", None)
try:
head = reader(BytesIO(b_sample), nrows=sample_rows, **head_kwargs)
except pd.errors.ParserError as e:
if "EOF" in str(e):
raise ValueError(
"EOF encountered while reading header. \n"
"Pass argument `sample_rows` and make sure the value of `sample` "
"is large enough to accommodate that many rows of data"
) from e
raise
if include_path_column and (include_path_column in head.columns):
raise ValueError(
"Files already contain the column name: %s, so the "
"path column cannot use this name. Please set "
"`include_path_column` to a unique name." % include_path_column
)
specified_dtypes = kwargs.get("dtype", {})
if specified_dtypes is None:
specified_dtypes = {}
# If specified_dtypes is a single type, then all columns were specified
if assume_missing and isinstance(specified_dtypes, dict):
# Convert all non-specified integer columns to floats
for c in head.columns:
if is_integer_dtype(head[c].dtype) and c not in specified_dtypes:
head[c] = head[c].astype(float)
values = [[list(dsk.dask.values()) for dsk in block] for block in values]
return text_blocks_to_pandas(
reader,
values,
header,
head,
kwargs,
enforce=enforce,
specified_dtypes=specified_dtypes,
path=path,
blocksize=blocksize,
urlpath=urlpath,
)
READ_DOC_TEMPLATE = """
Read {file_type} files into a Dask.DataFrame
This parallelizes the :func:`pandas.{reader}` function in the following ways:
- It supports loading many files at once using globstrings:
>>> df = dd.{reader}('myfiles.*.csv') # doctest: +SKIP
- In some cases it can break up large files:
>>> df = dd.{reader}('largefile.csv', blocksize=25e6) # 25MB chunks # doctest: +SKIP
- It can read CSV files from external resources (e.g. S3, HDFS) by
providing a URL:
>>> df = dd.{reader}('s3://bucket/myfiles.*.csv') # doctest: +SKIP
>>> df = dd.{reader}('hdfs:///myfiles.*.csv') # doctest: +SKIP
>>> df = dd.{reader}('hdfs://namenode.example.com/myfiles.*.csv') # doctest: +SKIP
Internally ``dd.{reader}`` uses :func:`pandas.{reader}` and supports many of the
same keyword arguments with the same performance guarantees. See the docstring
for :func:`pandas.{reader}` for more information on available keyword arguments.
Parameters
----------
urlpath : string or list
Absolute or relative filepath(s). Prefix with a protocol like ``s3://``
to read from alternative filesystems. To read from multiple files you
can pass a globstring or a list of paths, with the caveat that they
must all have the same protocol.
blocksize : str, int or None, optional
Number of bytes by which to cut up larger files. Default value is computed
based on available physical memory and the number of cores, up to a maximum
of 64MB. Can be a number like ``64000000`` or a string like ``"64MB"``. If
``None``, a single block is used for each file.
sample : int, optional
Number of bytes to use when determining dtypes
assume_missing : bool, optional
If True, all integer columns that aren't specified in ``dtype`` are assumed
to contain missing values, and are converted to floats. Default is False.
storage_options : dict, optional
Extra options that make sense for a particular storage connection, e.g.
host, port, username, password, etc.
include_path_column : bool or str, optional
Whether or not to include the path to each particular file. If True a new
column is added to the dataframe called ``path``. If str, sets new column
name. Default is False.
**kwargs
Extra keyword arguments to forward to :func:`pandas.{reader}`.
Notes
-----
Dask dataframe tries to infer the ``dtype`` of each column by reading a sample
from the start of the file (or of the first file if it's a glob). Usually this
works fine, but if the ``dtype`` is different later in the file (or in other
files) this can cause issues. For example, if all the rows in the sample had
integer dtypes, but later on there was a ``NaN``, then this would error at
compute time. To fix this, you have a few options:
- Provide explicit dtypes for the offending columns using the ``dtype``
keyword. This is the recommended solution.
- Use the ``assume_missing`` keyword to assume that all columns inferred as
integers contain missing values, and convert them to floats.
- Increase the size of the sample using the ``sample`` keyword.
It should also be noted that this function may fail if a {file_type} file
includes quoted strings that contain the line terminator. To get around this
you can specify ``blocksize=None`` to not split files into multiple partitions,
at the cost of reduced parallelism.
"""
def make_reader(reader, reader_name, file_type):
def read(
urlpath,
blocksize="default",
lineterminator=None,
compression="infer",
sample=256000,
sample_rows=10,
enforce=False,
assume_missing=False,
storage_options=None,
include_path_column=False,
**kwargs,
):
return read_pandas(
reader,
urlpath,
blocksize=blocksize,
lineterminator=lineterminator,
compression=compression,
sample=sample,
sample_rows=sample_rows,
enforce=enforce,
assume_missing=assume_missing,
storage_options=storage_options,
include_path_column=include_path_column,
**kwargs,
)
read.__doc__ = READ_DOC_TEMPLATE.format(reader=reader_name, file_type=file_type)
read.__name__ = reader_name
return read
read_csv = make_reader(pd.read_csv, "read_csv", "CSV")
read_table = make_reader(pd.read_table, "read_table", "delimited")
read_fwf = make_reader(pd.read_fwf, "read_fwf", "fixed-width")
def _write_csv(df, fil, *, depend_on=None, **kwargs):
with fil as f:
df.to_csv(f, **kwargs)
return os.path.normpath(fil.path)
def to_csv(
df,
filename,
single_file=False,
encoding="utf-8",
mode="wt",
name_function=None,
compression=None,
compute=True,
scheduler=None,
storage_options=None,
header_first_partition_only=None,
compute_kwargs=None,
**kwargs,
):
"""
Store Dask DataFrame to CSV files
One filename per partition will be created. You can specify the
filenames in a variety of ways.
Use a globstring::
>>> df.to_csv('/path/to/data/export-*.csv') # doctest: +SKIP
The * will be replaced by the increasing sequence 0, 1, 2, ...
::
/path/to/data/export-0.csv
/path/to/data/export-1.csv
Use a globstring and a ``name_function=`` keyword argument. The
name_function function should expect an integer and produce a string.
Strings produced by name_function must preserve the order of their
respective partition indices.
>>> from datetime import date, timedelta
>>> def name(i):
... return str(date(2015, 1, 1) + i * timedelta(days=1))
>>> name(0)
'2015-01-01'
>>> name(15)
'2015-01-16'
>>> df.to_csv('/path/to/data/export-*.csv', name_function=name) # doctest: +SKIP
::
/path/to/data/export-2015-01-01.csv
/path/to/data/export-2015-01-02.csv
...
You can also provide an explicit list of paths::
>>> paths = ['/path/to/data/alice.csv', '/path/to/data/bob.csv', ...] # doctest: +SKIP
>>> df.to_csv(paths) # doctest: +SKIP
Parameters
----------
df : dask.DataFrame
Data to save
filename : string
Path glob indicating the naming scheme for the output files
single_file : bool, default False
Whether to save everything into a single CSV file. Under the
single file mode, each partition is appended at the end of the
specified CSV file. Note that not all filesystems support the
append mode and thus the single file mode, especially on cloud
storage systems such as S3 or GCS. A warning will be issued when
writing to a file that is not backed by a local filesystem.
encoding : string, optional
A string representing the encoding to use in the output file,
defaults to 'ascii' on Python 2 and 'utf-8' on Python 3.
mode : str
Python write mode, default 'w'
name_function : callable, default None
Function accepting an integer (partition index) and producing a
string to replace the asterisk in the given filename globstring.
Should preserve the lexicographic order of partitions. Not
supported when `single_file` is `True`.
compression : string, optional
a string representing the compression to use in the output file,
allowed values are 'gzip', 'bz2', 'xz',
only used when the first argument is a filename
compute : bool
If true, immediately executes. If False, returns a set of delayed
objects, which can be computed at a later time.
storage_options : dict
Parameters passed on to the backend filesystem class.
header_first_partition_only : boolean, default None
If set to `True`, only write the header row in the first output
file. By default, headers are written to all partitions under
the multiple file mode (`single_file` is `False`) and written
only once under the single file mode (`single_file` is `True`).
It must not be `False` under the single file mode.
compute_kwargs : dict, optional
Options to be passed in to the compute method
kwargs : dict, optional
Additional parameters to pass to `pd.DataFrame.to_csv()`
Returns
-------
The names of the file written if they were computed right away
If not, the delayed tasks associated to the writing of the files
Raises
------
ValueError
If `header_first_partition_only` is set to `False` or
`name_function` is specified when `single_file` is `True`.
"""
if single_file and name_function is not None:
raise ValueError("name_function is not supported under the single file mode")
if header_first_partition_only is None:
header_first_partition_only = single_file
elif not header_first_partition_only and single_file:
raise ValueError(
"header_first_partition_only cannot be False in the single file mode."
)
file_options = dict(
compression=compression,
encoding=encoding,
newline="",
**(storage_options or {}),
)
to_csv_chunk = delayed(_write_csv, pure=False)
dfs = df.to_delayed()
if single_file:
first_file = open_file(filename, mode=mode, **file_options)
if not isinstance(first_file.fs, fsspec.implementations.local.LocalFileSystem):
warn("Appending data to a network storage system may not work.")
value = to_csv_chunk(dfs[0], first_file, **kwargs)
append_mode = mode.replace("w", "") + "a"
append_file = open_file(filename, mode=append_mode, **file_options)
kwargs["header"] = False
for d in dfs[1:]:
value = to_csv_chunk(d, append_file, depend_on=value, **kwargs)
values = [value]
files = [first_file]
else:
files = open_files(
filename,
mode=mode,
name_function=name_function,
num=df.npartitions,
**file_options,
)
values = [to_csv_chunk(dfs[0], files[0], **kwargs)]
if header_first_partition_only:
kwargs["header"] = False
values.extend(
[to_csv_chunk(d, f, **kwargs) for d, f in zip(dfs[1:], files[1:])]
)
if compute:
if compute_kwargs is None:
compute_kwargs = dict()
if scheduler is not None:
warn(
"The 'scheduler' keyword argument for `to_csv()` is deprecated and"
"will be removed in a future version. "
"Please use the `compute_kwargs` argument instead. "
f"For example, df.to_csv(..., compute_kwargs={{scheduler: {scheduler}}})",
FutureWarning,
)
if (
scheduler is not None
and compute_kwargs.get("scheduler") is not None
and compute_kwargs.get("scheduler") != scheduler
):
raise ValueError(
f"Differing values for 'scheduler' have been passed in.\n"
f"scheduler argument: {scheduler}\n"
f"via compute_kwargs: {compute_kwargs.get('scheduler')}"
)
if scheduler is not None and compute_kwargs.get("scheduler") is None:
compute_kwargs["scheduler"] = scheduler
import dask
return list(dask.compute(*values, **compute_kwargs))
else:
return values
from dask.dataframe.core import _Frame
_Frame.to_csv.__doc__ = to_csv.__doc__
| 34.049587 | 91 | 0.620783 |
79476755617410d1d893aa08547ca5e5bbcf9955 | 29,733 | py | Python | bokeh/tests/test_properties.py | brian15co/bokeh | 6cecb7211277b9d838039d0eb15e50a10f9ac3d1 | [
"BSD-3-Clause"
] | 2 | 2021-09-01T12:36:06.000Z | 2021-11-17T10:48:36.000Z | bokeh/tests/test_properties.py | brian15co/bokeh | 6cecb7211277b9d838039d0eb15e50a10f9ac3d1 | [
"BSD-3-Clause"
] | null | null | null | bokeh/tests/test_properties.py | brian15co/bokeh | 6cecb7211277b9d838039d0eb15e50a10f9ac3d1 | [
"BSD-3-Clause"
] | null | null | null | import unittest
import numpy as np
from bokeh.properties import (
HasProps, Int, Array, String, Enum, Float, DataSpec, ColorSpec, DashPattern
)
class Basictest(unittest.TestCase):
def test_simple_class(self):
class Foo(HasProps):
x = Int(12)
y = String("hello")
z = Array(Int, [1, 2, 3])
s = String(None)
f = Foo()
self.assertEqual(f.x, 12)
self.assertEqual(f.y, "hello")
self.assert_(np.array_equal(np.array([1, 2, 3]), f.z))
self.assertEqual(f.s, None)
f.x = 18
self.assertEqual(f.x, 18)
f.y = "bar"
self.assertEqual(f.y, "bar")
def test_enum(self):
class Foo(HasProps):
x = Enum("blue", "red", "green") # the first item is the default
y = Enum("small", "medium", "large", default="large")
f = Foo()
self.assertEqual(f.x, "blue")
self.assertEqual(f.y, "large")
f.x = "red"
self.assertEqual(f.x, "red")
with self.assertRaises(ValueError):
f.x = "yellow"
f.y = "small"
self.assertEqual(f.y, "small")
with self.assertRaises(ValueError):
f.y = "yellow"
def test_inheritance(self):
class Base(HasProps):
x = Int(12)
y = String("hello")
class Child(Base):
z = Float(3.14)
c = Child()
self.assertEqual(frozenset(['x', 'y', 'z']), frozenset(c.properties()))
self.assertEqual(c.y, "hello")
def test_set(self):
class Foo(HasProps):
x = Int(12)
y = Enum("red", "blue", "green")
z = String("blah")
f = Foo()
self.assertEqual(f.x, 12)
self.assertEqual(f.y, "red")
self.assertEqual(f.z, "blah")
f.set(**dict(x=20, y="green", z="hello"))
self.assertEqual(f.x, 20)
self.assertEqual(f.y, "green")
self.assertEqual(f.z, "hello")
with self.assertRaises(ValueError):
f.set(y="orange")
def test_no_parens(self):
class Foo(HasProps):
x = Int
y = Int()
f = Foo()
self.assertEqual(f.x, f.y)
f.x = 13
self.assertEqual(f.x, 13)
# def test_kwargs_init(self):
# class Foo(HasProps):
# x = String
# y = Int
# z = Float
# f = Foo(x = "hello", y = 14)
# self.assertEqual(f.x, "hello")
# self.assertEqual(f.y, 14)
# with self.assertRaises(TypeError):
# # This should raise a TypeError: object.__init__() takes no parameters
# g = Foo(z = 3.14, q = "blah")
class TestDataSpec(unittest.TestCase):
def test_field(self):
class Foo(HasProps):
x = DataSpec("xfield")
f = Foo()
self.assertEqual(f.x, "xfield")
self.assertDictEqual(Foo.__dict__["x"].to_dict(f), {"field": "xfield", "units": "data"})
f.x = "my_x"
self.assertEqual(f.x, "my_x")
self.assertDictEqual(Foo.__dict__["x"].to_dict(f), {"field": "my_x", "units": "data"})
def test_value(self):
class Foo(HasProps):
x = DataSpec("xfield")
f = Foo()
self.assertEqual(f.x, "xfield")
f.x = 12
self.assertEqual(f.x, 12)
self.assertDictEqual(Foo.__dict__["x"].to_dict(f), {"value": 12, "units": "data"})
f.x = 15
self.assertEqual(f.x, 15)
self.assertDictEqual(Foo.__dict__["x"].to_dict(f), {"value": 15, "units": "data"})
f.x = dict(value=23, units="screen")
self.assertDictEqual(Foo.__dict__["x"].to_dict(f), {"value": 23, "units": "screen"})
f.x = dict(value=32)
self.assertDictEqual(Foo.__dict__["x"].to_dict(f), {"value": 32, "units": "data"})
def test_default(self):
class Foo(HasProps):
y = DataSpec(default=12)
f = Foo()
self.assertEqual(f.y, 12)
self.assertDictEqual(Foo.__dict__["y"].to_dict(f), {"value": 12, "units": "data"})
f.y = "y1"
self.assertEqual(f.y, "y1")
# Once we set a concrete value, the default is ignored, because it is unused
f.y = 32
self.assertEqual(f.y, 32)
self.assertDictEqual(Foo.__dict__["y"].to_dict(f), {"value": 32, "units": "data"})
def test_multiple_instances(self):
class Foo(HasProps):
x = DataSpec("xfield", default=12)
a = Foo()
b = Foo()
a.x = 13
b.x = 14
self.assertEqual(a.x, 13)
self.assertEqual(b.x, 14)
self.assertDictEqual(Foo.__dict__["x"].to_dict(a), {"value": 13, "units": "data"})
self.assertDictEqual(Foo.__dict__["x"].to_dict(b), {"value": 14, "units": "data"})
b.x = {"field": "x3", "units": "screen"}
self.assertDictEqual(Foo.__dict__["x"].to_dict(a), {"value": 13, "units": "data"})
self.assertDictEqual(Foo.__dict__["x"].to_dict(b), {"field": "x3", "units": "screen"})
class TestColorSpec(unittest.TestCase):
def test_field(self):
class Foo(HasProps):
col = ColorSpec("colorfield")
desc = Foo.__dict__["col"]
f = Foo()
self.assertEqual(f.col, "colorfield")
self.assertDictEqual(desc.to_dict(f), {"field": "colorfield"})
f.col = "myfield"
self.assertEqual(f.col, "myfield")
self.assertDictEqual(desc.to_dict(f), {"field": "myfield"})
def test_field_default(self):
class Foo(HasProps):
col = ColorSpec(default="red")
desc = Foo.__dict__["col"]
f = Foo()
self.assertEqual(f.col, "red")
self.assertDictEqual(desc.to_dict(f), {"value": "red"})
f.col = "myfield"
self.assertEqual(f.col, "myfield")
self.assertDictEqual(desc.to_dict(f), {"field": "myfield"})
def test_default_tuple(self):
class Foo(HasProps):
col = ColorSpec(default=(128, 255, 124))
desc = Foo.__dict__["col"]
f = Foo()
self.assertEqual(f.col, (128, 255, 124))
self.assertDictEqual(desc.to_dict(f), {"value": "rgb(128, 255, 124)"})
def test_fixed_value(self):
class Foo(HasProps):
col = ColorSpec("gray")
desc = Foo.__dict__["col"]
f = Foo()
self.assertEqual(f.col, "gray")
self.assertDictEqual(desc.to_dict(f), {"value": "gray"})
def test_named_value(self):
class Foo(HasProps):
col = ColorSpec("colorfield")
desc = Foo.__dict__["col"]
f = Foo()
f.col = "red"
self.assertEqual(f.col, "red")
self.assertDictEqual(desc.to_dict(f), {"value": "red"})
f.col = "forestgreen"
self.assertEqual(f.col, "forestgreen")
self.assertDictEqual(desc.to_dict(f), {"value": "forestgreen"})
def test_named_value_set_none(self):
class Foo(HasProps):
col = ColorSpec("colorfield")
desc = Foo.__dict__["col"]
f = Foo()
f.col = None
self.assertDictEqual(desc.to_dict(f), {"value": None})
def test_named_value_unset(self):
class Foo(HasProps):
col = ColorSpec("colorfield")
desc = Foo.__dict__["col"]
f = Foo()
self.assertDictEqual(desc.to_dict(f), {"field": "colorfield"})
def test_named_color_overriding_default(self):
class Foo(HasProps):
col = ColorSpec("colorfield", default="blue")
desc = Foo.__dict__["col"]
f = Foo()
f.col = "forestgreen"
self.assertEqual(f.col, "forestgreen")
self.assertDictEqual(desc.to_dict(f), {"value": "forestgreen"})
f.col = "myfield"
self.assertEqual(f.col, "myfield")
self.assertDictEqual(desc.to_dict(f), {"field": "myfield"})
def test_hex_value(self):
class Foo(HasProps):
col = ColorSpec("colorfield")
desc = Foo.__dict__["col"]
f = Foo()
f.col = "#FF004A"
self.assertEqual(f.col, "#FF004A")
self.assertDictEqual(desc.to_dict(f), {"value": "#FF004A"})
f.col = "myfield"
self.assertEqual(f.col, "myfield")
self.assertDictEqual(desc.to_dict(f), {"field": "myfield"})
def test_tuple_value(self):
class Foo(HasProps):
col = ColorSpec("colorfield")
desc = Foo.__dict__["col"]
f = Foo()
f.col = (128, 200, 255)
self.assertEqual(f.col, (128, 200, 255))
self.assertDictEqual(desc.to_dict(f), {"value": "rgb(128, 200, 255)"})
f.col = "myfield"
self.assertEqual(f.col, "myfield")
self.assertDictEqual(desc.to_dict(f), {"field": "myfield"})
f.col = (100, 150, 200, 0.5)
self.assertEqual(f.col, (100, 150, 200, 0.5))
self.assertDictEqual(desc.to_dict(f), {"value": "rgba(100, 150, 200, 0.5)"})
def test_set_dict(self):
class Foo(HasProps):
col = ColorSpec("colorfield")
desc = Foo.__dict__["col"]
f = Foo()
f.col = {"field": "myfield"}
self.assertDictEqual(f.col, {"field": "myfield"})
f.col = "field2"
self.assertEqual(f.col, "field2")
self.assertDictEqual(desc.to_dict(f), {"field": "field2"})
class TestDashPattern(unittest.TestCase):
def test_named(self):
class Foo(HasProps):
pat = DashPattern
f = Foo()
self.assertEqual(f.pat, [])
f.pat = "solid"
self.assertEqual(f.pat, [])
f.pat = "dashed"
self.assertEqual(f.pat, [6])
f.pat = "dotted"
self.assertEqual(f.pat, [2, 4])
f.pat = "dotdash"
self.assertEqual(f.pat, [2, 4, 6, 4])
f.pat = "dashdot"
self.assertEqual(f.pat, [6, 4, 2, 4])
def test_string(self):
class Foo(HasProps):
pat = DashPattern
f = Foo()
f.pat = ""
self.assertEqual(f.pat, [])
f.pat = "2"
self.assertEqual(f.pat, [2])
f.pat = "2 4"
self.assertEqual(f.pat, [2, 4])
f.pat = "2 4 6"
self.assertEqual(f.pat, [2, 4, 6])
with self.assertRaises(ValueError):
f.pat = "abc 6"
def test_list(self):
class Foo(HasProps):
pat = DashPattern
f = Foo()
f.pat = []
self.assertEqual(f.pat, [])
f.pat = [2]
self.assertEqual(f.pat, [2])
f.pat = [2, 4]
self.assertEqual(f.pat, [2, 4])
f.pat = [2, 4, 6]
self.assertEqual(f.pat, [2, 4, 6])
with self.assertRaises(ValueError):
f.pat = [2, 4.2]
with self.assertRaises(ValueError):
f.pat = [2, "a"]
def test_invalid(self):
class Foo(HasProps):
pat = DashPattern
f = Foo()
with self.assertRaises(ValueError):
f.pat = 10
with self.assertRaises(ValueError):
f.pat = 10.1
with self.assertRaises(ValueError):
f.pat = {}
from bokeh.properties import (Bool, Int, Float, Complex, String,
Regex, List, Dict, Tuple, Array, Instance, Any, Range, Either,
Enum, Color, Align, DashPattern, Size, Percent, Angle)
class Foo(HasProps):
pass
class Bar(HasProps):
pass
class Baz(HasProps):
pass
class TestProperties(unittest.TestCase):
def test_Any(self):
prop = Any()
self.assertTrue(prop.is_valid(None))
self.assertTrue(prop.is_valid(False))
self.assertTrue(prop.is_valid(True))
self.assertTrue(prop.is_valid(0))
self.assertTrue(prop.is_valid(1))
self.assertTrue(prop.is_valid(0.0))
self.assertTrue(prop.is_valid(1.0))
self.assertTrue(prop.is_valid(1.0+1.0j))
self.assertTrue(prop.is_valid(""))
self.assertTrue(prop.is_valid(()))
self.assertTrue(prop.is_valid([]))
self.assertTrue(prop.is_valid({}))
self.assertTrue(prop.is_valid(Foo()))
def test_Bool(self):
prop = Bool()
self.assertTrue(prop.is_valid(None))
self.assertTrue(prop.is_valid(False))
self.assertTrue(prop.is_valid(True))
self.assertFalse(prop.is_valid(0))
self.assertFalse(prop.is_valid(1))
self.assertFalse(prop.is_valid(0.0))
self.assertFalse(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertFalse(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertFalse(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
def test_Int(self):
prop = Int()
self.assertTrue(prop.is_valid(None))
# TODO: self.assertFalse(prop.is_valid(False))
# TODO: self.assertFalse(prop.is_valid(True))
self.assertTrue(prop.is_valid(0))
self.assertTrue(prop.is_valid(1))
self.assertFalse(prop.is_valid(0.0))
self.assertFalse(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertFalse(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertFalse(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
def test_Float(self):
prop = Float()
self.assertTrue(prop.is_valid(None))
# TODO: self.assertFalse(prop.is_valid(False))
# TODO: self.assertFalse(prop.is_valid(True))
self.assertTrue(prop.is_valid(0))
self.assertTrue(prop.is_valid(1))
self.assertTrue(prop.is_valid(0.0))
self.assertTrue(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertFalse(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertFalse(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
def test_Complex(self):
prop = Complex()
self.assertTrue(prop.is_valid(None))
# TODO: self.assertFalse(prop.is_valid(False))
# TODO: self.assertFalse(prop.is_valid(True))
self.assertTrue(prop.is_valid(0))
self.assertTrue(prop.is_valid(1))
self.assertTrue(prop.is_valid(0.0))
self.assertTrue(prop.is_valid(1.0))
self.assertTrue(prop.is_valid(1.0+1.0j))
self.assertFalse(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertFalse(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
def test_String(self):
prop = String()
self.assertTrue(prop.is_valid(None))
self.assertFalse(prop.is_valid(False))
self.assertFalse(prop.is_valid(True))
self.assertFalse(prop.is_valid(0))
self.assertFalse(prop.is_valid(1))
self.assertFalse(prop.is_valid(0.0))
self.assertFalse(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertTrue(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertFalse(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
def test_Regex(self):
with self.assertRaises(TypeError):
prop = Regex()
prop = Regex("^x*$")
self.assertTrue(prop.is_valid(None))
self.assertFalse(prop.is_valid(False))
self.assertFalse(prop.is_valid(True))
self.assertFalse(prop.is_valid(0))
self.assertFalse(prop.is_valid(1))
self.assertFalse(prop.is_valid(0.0))
self.assertFalse(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertTrue(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertFalse(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
def test_List(self):
with self.assertRaises(TypeError):
prop = List()
prop = List(Int)
self.assertTrue(prop.is_valid(None))
self.assertFalse(prop.is_valid(False))
self.assertFalse(prop.is_valid(True))
self.assertFalse(prop.is_valid(0))
self.assertFalse(prop.is_valid(1))
self.assertFalse(prop.is_valid(0.0))
self.assertFalse(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertFalse(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertTrue(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
def test_Dict(self):
with self.assertRaises(TypeError):
prop = Dict()
prop = Dict(String, List(Int))
self.assertTrue(prop.is_valid(None))
self.assertFalse(prop.is_valid(False))
self.assertFalse(prop.is_valid(True))
self.assertFalse(prop.is_valid(0))
self.assertFalse(prop.is_valid(1))
self.assertFalse(prop.is_valid(0.0))
self.assertFalse(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertFalse(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertFalse(prop.is_valid([]))
self.assertTrue(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
def test_Tuple(self):
with self.assertRaises(TypeError):
prop = Tuple()
with self.assertRaises(TypeError):
prop = Tuple(Int)
prop = Tuple(Int, String, List(Int))
self.assertTrue(prop.is_valid(None))
self.assertFalse(prop.is_valid(False))
self.assertFalse(prop.is_valid(True))
self.assertFalse(prop.is_valid(0))
self.assertFalse(prop.is_valid(1))
self.assertFalse(prop.is_valid(0.0))
self.assertFalse(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertFalse(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertFalse(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
self.assertTrue(prop.is_valid((1, "", [1, 2, 3])))
self.assertFalse(prop.is_valid((1.0, "", [1, 2, 3])))
self.assertFalse(prop.is_valid((1, True, [1, 2, 3])))
self.assertFalse(prop.is_valid((1, "", (1, 2, 3))))
self.assertFalse(prop.is_valid((1, "", [1, 2, "xyz"])))
def test_Instance(self):
with self.assertRaises(TypeError):
prop = Instance()
prop = Instance(Foo)
self.assertTrue(prop.is_valid(None))
self.assertFalse(prop.is_valid(False))
self.assertFalse(prop.is_valid(True))
self.assertFalse(prop.is_valid(0))
self.assertFalse(prop.is_valid(1))
self.assertFalse(prop.is_valid(0.0))
self.assertFalse(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertFalse(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertFalse(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertTrue(prop.is_valid(Foo()))
self.assertFalse(prop.is_valid(Bar()))
self.assertFalse(prop.is_valid(Baz()))
def test_Range(self):
with self.assertRaises(TypeError):
prop = Range()
with self.assertRaises(ValueError):
prop = Range(Int, 0.0, 1.0)
prop = Range(Int, 0, 255)
self.assertTrue(prop.is_valid(None))
# TODO: self.assertFalse(prop.is_valid(False))
# TODO: self.assertFalse(prop.is_valid(True))
self.assertTrue(prop.is_valid(0))
self.assertTrue(prop.is_valid(1))
self.assertFalse(prop.is_valid(0.0))
self.assertFalse(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertFalse(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertFalse(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
self.assertTrue(prop.is_valid(127))
self.assertFalse(prop.is_valid(-1))
self.assertFalse(prop.is_valid(256))
prop = Range(Float, 0.0, 1.0)
self.assertTrue(prop.is_valid(None))
# TODO: self.assertFalse(prop.is_valid(False))
# TODO: self.assertFalse(prop.is_valid(True))
self.assertTrue(prop.is_valid(0))
self.assertTrue(prop.is_valid(1))
self.assertTrue(prop.is_valid(0.0))
self.assertTrue(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertFalse(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertFalse(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
self.assertTrue(prop.is_valid(0.5))
self.assertFalse(prop.is_valid(-0.001))
self.assertFalse(prop.is_valid( 1.001))
def test_Either(self):
with self.assertRaises(TypeError):
prop = Either()
prop = Either(Range(Int, 0, 100), Regex("^x*$"), List(Int))
self.assertTrue(prop.is_valid(None))
# TODO: self.assertFalse(prop.is_valid(False))
# TODO: self.assertFalse(prop.is_valid(True))
self.assertTrue(prop.is_valid(0))
self.assertTrue(prop.is_valid(1))
self.assertFalse(prop.is_valid(0.0))
self.assertFalse(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertTrue(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertTrue(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
self.assertTrue(prop.is_valid(100))
self.assertFalse(prop.is_valid(-100))
self.assertTrue(prop.is_valid("xxx"))
self.assertFalse(prop.is_valid("yyy"))
self.assertTrue(prop.is_valid([1, 2, 3]))
self.assertFalse(prop.is_valid([1, 2, ""]))
def test_Enum(self):
with self.assertRaises(TypeError):
prop = Enum()
with self.assertRaises(TypeError):
prop = Enum("red", "green", 1)
with self.assertRaises(TypeError):
prop = Enum("red", "green", "red")
prop = Enum("red", "green", "blue")
self.assertTrue(prop.is_valid(None))
self.assertFalse(prop.is_valid(False))
self.assertFalse(prop.is_valid(True))
self.assertFalse(prop.is_valid(0))
self.assertFalse(prop.is_valid(1))
self.assertFalse(prop.is_valid(0.0))
self.assertFalse(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertFalse(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertFalse(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
self.assertTrue(prop.is_valid("red"))
self.assertTrue(prop.is_valid("green"))
self.assertTrue(prop.is_valid("blue"))
self.assertFalse(prop.is_valid("RED"))
self.assertFalse(prop.is_valid("GREEN"))
self.assertFalse(prop.is_valid("BLUE"))
self.assertFalse(prop.is_valid(" red"))
self.assertFalse(prop.is_valid(" green"))
self.assertFalse(prop.is_valid(" blue"))
from bokeh.enums import LineJoin
prop = Enum(LineJoin)
self.assertTrue(prop.is_valid(None))
self.assertFalse(prop.is_valid(False))
self.assertFalse(prop.is_valid(True))
self.assertFalse(prop.is_valid(0))
self.assertFalse(prop.is_valid(1))
self.assertFalse(prop.is_valid(0.0))
self.assertFalse(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertFalse(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertFalse(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
self.assertTrue(prop.is_valid("miter"))
self.assertTrue(prop.is_valid("round"))
self.assertTrue(prop.is_valid("bevel"))
self.assertFalse(prop.is_valid("MITER"))
self.assertFalse(prop.is_valid("ROUND"))
self.assertFalse(prop.is_valid("BEVEL"))
self.assertFalse(prop.is_valid(" miter"))
self.assertFalse(prop.is_valid(" round"))
self.assertFalse(prop.is_valid(" bevel"))
def test_Color(self):
prop = Color()
self.assertTrue(prop.is_valid(None))
self.assertFalse(prop.is_valid(False))
self.assertFalse(prop.is_valid(True))
self.assertFalse(prop.is_valid(0))
self.assertFalse(prop.is_valid(1))
self.assertFalse(prop.is_valid(0.0))
self.assertFalse(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertFalse(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertFalse(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
self.assertTrue(prop.is_valid((0, 127, 255)))
self.assertFalse(prop.is_valid((0, -127, 255)))
self.assertFalse(prop.is_valid((0, 127)))
self.assertFalse(prop.is_valid((0, 127, 1.0)))
self.assertFalse(prop.is_valid((0, 127, 255, 255)))
self.assertTrue(prop.is_valid((0, 127, 255, 1.0)))
self.assertTrue(prop.is_valid("#00aaff"))
self.assertTrue(prop.is_valid("#00AAFF"))
self.assertTrue(prop.is_valid("#00AaFf"))
self.assertFalse(prop.is_valid("00aaff"))
self.assertFalse(prop.is_valid("00AAFF"))
self.assertFalse(prop.is_valid("00AaFf"))
self.assertFalse(prop.is_valid("#00AaFg"))
self.assertFalse(prop.is_valid("#00AaFff"))
self.assertTrue(prop.is_valid("blue"))
self.assertFalse(prop.is_valid("BLUE"))
self.assertFalse(prop.is_valid("foobar"))
def test_Align(self):
prop = Align() # TODO
def test_DashPattern(self):
prop = DashPattern()
self.assertTrue(prop.is_valid(None))
self.assertFalse(prop.is_valid(False))
self.assertFalse(prop.is_valid(True))
self.assertFalse(prop.is_valid(0))
self.assertFalse(prop.is_valid(1))
self.assertFalse(prop.is_valid(0.0))
self.assertFalse(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertTrue(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertTrue(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
self.assertTrue(prop.is_valid("solid"))
self.assertTrue(prop.is_valid("dashed"))
self.assertTrue(prop.is_valid("dotted"))
self.assertTrue(prop.is_valid("dotdash"))
self.assertTrue(prop.is_valid("dashdot"))
self.assertFalse(prop.is_valid("DASHDOT"))
self.assertTrue(prop.is_valid([1, 2, 3]))
self.assertFalse(prop.is_valid([1, 2, 3.0]))
self.assertTrue(prop.is_valid("1 2 3"))
self.assertFalse(prop.is_valid("1 2 x"))
def test_Size(self):
prop = Size()
self.assertTrue(prop.is_valid(None))
# TODO: self.assertFalse(prop.is_valid(False))
# TODO: self.assertFalse(prop.is_valid(True))
self.assertTrue(prop.is_valid(0))
self.assertTrue(prop.is_valid(1))
self.assertTrue(prop.is_valid(0.0))
self.assertTrue(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertFalse(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertFalse(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
self.assertTrue(prop.is_valid(100))
self.assertTrue(prop.is_valid(100.1))
self.assertFalse(prop.is_valid(-100))
self.assertFalse(prop.is_valid(-0.001))
def test_Percent(self):
prop = Percent()
self.assertTrue(prop.is_valid(None))
# TODO: self.assertFalse(prop.is_valid(False))
# TODO: self.assertFalse(prop.is_valid(True))
self.assertTrue(prop.is_valid(0))
self.assertTrue(prop.is_valid(1))
self.assertTrue(prop.is_valid(0.0))
self.assertTrue(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertFalse(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertFalse(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
self.assertTrue(prop.is_valid(0.5))
self.assertFalse(prop.is_valid(-0.001))
self.assertFalse(prop.is_valid( 1.001))
def test_Angle(self):
prop = Angle()
self.assertTrue(prop.is_valid(None))
# TODO: self.assertFalse(prop.is_valid(False))
# TODO: self.assertFalse(prop.is_valid(True))
self.assertTrue(prop.is_valid(0))
self.assertTrue(prop.is_valid(1))
self.assertTrue(prop.is_valid(0.0))
self.assertTrue(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0+1.0j))
self.assertFalse(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertFalse(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
def test_HasProps_clone():
from bokeh.models import Plot
p1 = Plot(plot_width=1000)
c1 = p1.changed_properties()
p2 = p1.clone()
c2 = p2.changed_properties()
assert c1 == c2
if __name__ == "__main__":
unittest.main()
| 34.775439 | 96 | 0.592069 |
794767c4371d7b8f6b9bf3ed7baf04e774914ac0 | 4,740 | py | Python | src/MiniMax.py | keivanipchihagh/Tic-Tac-Toe-ML-EXTENDED | 8849bb48d781b11e95d982c34f7bdf6d6998fd63 | [
"MIT"
] | null | null | null | src/MiniMax.py | keivanipchihagh/Tic-Tac-Toe-ML-EXTENDED | 8849bb48d781b11e95d982c34f7bdf6d6998fd63 | [
"MIT"
] | null | null | null | src/MiniMax.py | keivanipchihagh/Tic-Tac-Toe-ML-EXTENDED | 8849bb48d781b11e95d982c34f7bdf6d6998fd63 | [
"MIT"
] | null | null | null | '''
MiniMax implementation of Tic Tac Toe with Python
Author: Keivan Ipchi Hagh
Year: 2020
License: MIT License
Follow me for more (https://github.com/keivanipchihagh)
'''
# Imports
from math import inf as infinity
from random import choice
class MiniMax:
''' Implementation is somewhat Object-Oriented for reusability - Implementation supports bigger boards for fancy scenarios '''
def __init__(self):
''' Constructor '''
# Variables initialization
self.board = None
self.HUMAN = -1
self.HUMAN_score = -10
self.AI = +1
self.AI_score = 10
def evaluate(self):
''' Heuristic evaluation of game state. Rewards the AI if it wins '''
if self.wins() == +1:
return self.AI_score # Reward the AI if it outruns the opponent
elif self.wins() == -1:
return self.HUMAN_score # Punish the AI if it is outran by the opponent
else:
return 0 # Draw
def wins(self,):
''' This function determines whether a player wins for not - Returns +1 if AI has won & -1 if HUMAN has won '''
# Check rows
for row in self.board:
if row.count(self.AI) == len(self.board):
return +1
elif row.count(self.HUMAN) == len(self.board):
return -1
# Check columns
for i in range(len(self.board)):
column = [row[i] for row in self.board]
if column.count(self.AI) == len(self.board):
return +1
elif column.count(self.HUMAN) == len(self.board):
return -1
# Check diagonals
diagonal_1 = [self.board[i][i] for i in range(len(self.board))]
diagonal_2 = [self.board[len(self.board) - i - 1][i] for i in range(len(self.board) - 1, -1, -1)]
if diagonal_1.count(self.AI) == len(self.board) or diagonal_2.count(self.AI) == len(self.board):
return +1
elif diagonal_1.count(self.HUMAN) == len(self.board) or diagonal_2.count(self.HUMAN) == len(self.board):
return -1
return None # No winner
def is_game_over(self):
''' Determines whether any of the players has won the game '''
return self.wins() == +1 or self.wins() == -1
def get_empty_cells(self):
''' Returns a list of empty cells (Available moves) '''
empty_cells = []
for x, row in enumerate(self.board):
for y, cell in enumerate(row):
if cell == 0:
empty_cells.append([x, y])
return empty_cells
def has_empty_cells(self):
''' Returns whether board has any empty cells or not '''
return len(self.get_empty_cells()) == 0
def minimax(self, depth, player):
''' The brain of the AI, which moves the best possible move '''
score = self.evaluate()
empty_cells = self.get_empty_cells()
# We have a winner, return it's score
if score in [self.AI_score, self.HUMAN_score]:
return score
# Draw?
elif self.has_empty_cells():
return 0
# Set best score
best_score = -10 if (player == self.AI) else +10
for cell in empty_cells:
# Get cell coordinates
x, y = cell[0], cell[1]
# Temporarily set the move
self.board[x][y] = player
# Recalculate best move by recursively going down the tree - To make our AI smarter we want to win faster so Depth is important here!
if player == self.AI:
best_score = max(best_score, self.minimax(depth + 1, -player)) - depth
else:
best_score = min(best_score, self.minimax(depth + 1, -player)) + depth
# Reset the move back to it's original state
self.board[x][y] = 0
return best_score
def get_move(self, board):
''' Gets the best possible move using the algorithm '''
# Set the board
self.board = board
best_score = -infinity
best_move = tuple()
empty_cells = self.get_empty_cells()
# Choose random if it's the first move
if len(empty_cells) == 9:
return choice([0, 1, 2]), choice([0, 1, 2])
for cell in empty_cells:
x, y = cell[0], cell[1]
board[x][y] = self.AI
move_score = self.minimax(0, self.HUMAN)
board[x][y] = 0
# Compare to see if current move is better
if move_score > best_score:
best_score, best_move = move_score, (x, y)
return best_move | 29.259259 | 145 | 0.558017 |
79476823d72f30f64c58615996b56f45ea235fc4 | 1,180 | py | Python | application/views.py | amarlearning/Footstep | 557beda097834a031fa2f114bad5de261c7daf95 | [
"MIT"
] | null | null | null | application/views.py | amarlearning/Footstep | 557beda097834a031fa2f114bad5de261c7daf95 | [
"MIT"
] | 2 | 2017-05-12T14:38:01.000Z | 2017-05-18T13:25:35.000Z | application/views.py | amarlearning/Footstep | 557beda097834a031fa2f114bad5de261c7daf95 | [
"MIT"
] | null | null | null | from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render, redirect
from django import forms
import urllib2
import json
class NameForm(forms.Form):
username = forms.CharField(
widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder' : 'GitHub username'}),
label='',
max_length=200
)
# Create your views here.
def index(request):
if request.method == 'POST':
form = NameForm(request.POST)
if form.is_valid():
username = form.cleaned_data['username']
return HttpResponseRedirect('stats/'+ username)
else:
form = NameForm()
return render(request, 'application/index.html', {'form': form})
def stats(request):
return redirect("/")
def username(request, username):
url = 'https://api.github.com/users/' + username + '/events'
serialized_data = urllib2.urlopen(url).read().decode("utf-8")
data = json.loads(serialized_data)
tabs = {
'CommitCommentEvent' : 'Comments',
'IssueCommentEvent' : 'Comments',
'PullRequestReviewCommentEvent' : 'Comments'
}
context = {
'username' : username,
'json' : data,
'tabs' : tabs
}
return render(request, 'application/stats.html', context) | 27.44186 | 96 | 0.705085 |
79476a8e9a0e10d14e74731e17ac8213a1345b48 | 2,284 | py | Python | openeogeotrellis/cleaner.py | Open-EO/openeo-geopyspark-driver | afd5902f426d2aa456d70ed6f2d51b6907de1cab | [
"Apache-2.0"
] | 12 | 2018-03-22T15:02:24.000Z | 2022-03-30T20:13:29.000Z | openeogeotrellis/cleaner.py | Open-EO/openeo-geopyspark-driver | afd5902f426d2aa456d70ed6f2d51b6907de1cab | [
"Apache-2.0"
] | 116 | 2018-09-27T17:17:14.000Z | 2022-03-30T18:32:29.000Z | openeogeotrellis/cleaner.py | Open-EO/openeo-geopyspark-driver | afd5902f426d2aa456d70ed6f2d51b6907de1cab | [
"Apache-2.0"
] | 3 | 2019-06-28T15:44:32.000Z | 2021-10-30T07:05:54.000Z | import argparse
from datetime import datetime, timedelta
import logging
import kazoo.client
from py4j.java_gateway import JavaGateway, JVMView
import openeogeotrellis.backend
from openeogeotrellis.backend import GpsBatchJobs, GpsSecondaryServices
from openeogeotrellis.configparams import ConfigParams
from openeogeotrellis.service_registry import ZooKeeperServiceRegistry
logging.basicConfig(level=logging.INFO)
openeogeotrellis.backend.logger.setLevel(logging.DEBUG)
kazoo.client.log.setLevel(logging.WARNING)
_log = logging.getLogger(__name__)
def remove_batch_jobs_before(upper: datetime, jvm: JVMView) -> None:
_log.info("removing batch jobs before {d}...".format(d=upper))
# TODO: how to cope with unneeded arguments?
batch_jobs = GpsBatchJobs(catalog=None, jvm=jvm, principal="", key_tab="")
batch_jobs.delete_jobs_before(upper)
def remove_secondary_services_before(upper: datetime) -> None:
_log.info("removing secondary services before {d}...".format(d=upper))
secondary_services = GpsSecondaryServices(ZooKeeperServiceRegistry())
secondary_services.remove_services_before(upper)
def main():
_log.info("ConfigParams(): {c}".format(c=ConfigParams()))
parser = argparse.ArgumentParser(usage="OpenEO Cleaner", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--py4j-jarpath", default="venv/share/py4j/py4j0.10.9.2.jar", help='Path to the Py4J jar')
parser.add_argument("--py4j-classpath", default="geotrellis-extensions-2.2.0-SNAPSHOT.jar",
help='Classpath used to launch the Java Gateway')
args = parser.parse_args()
java_opts = [
"-client",
"-Dsoftware.amazon.awssdk.http.service.impl=software.amazon.awssdk.http.urlconnection.UrlConnectionSdkHttpService"
]
java_gateway = JavaGateway.launch_gateway(jarpath=args.py4j_jarpath,
classpath=args.py4j_classpath,
javaopts=java_opts,
die_on_exit=True)
max_date = datetime.today() - timedelta(days=60)
remove_batch_jobs_before(max_date, java_gateway.jvm)
remove_secondary_services_before(max_date)
if __name__ == '__main__':
main()
| 36.83871 | 122 | 0.721103 |
79476aeeda7b595219e7aa1c9df488b2c5cac563 | 1,567 | py | Python | auth_service/src/AuthService/manage.py | newgene/biothings_oauth | 363a30e43537b38988363d02e9fa7c9016064a72 | [
"Apache-2.0"
] | null | null | null | auth_service/src/AuthService/manage.py | newgene/biothings_oauth | 363a30e43537b38988363d02e9fa7c9016064a72 | [
"Apache-2.0"
] | null | null | null | auth_service/src/AuthService/manage.py | newgene/biothings_oauth | 363a30e43537b38988363d02e9fa7c9016064a72 | [
"Apache-2.0"
] | null | null | null | import sys
from alembic.config import Config
from alembic import command
from AuthService import settings
def drop_all(alembic_conf=None):
"""
Drops all tables in the database.
@:param alembic_conf: Alembic configuration to be used.
"""
if alembic_conf is None:
alembic_conf = initialize_alembic_conf()
print("Dropping all tables in 'auth.models'..")
from auth import models
command.downgrade(alembic_conf, "base")
print("SUCCESS")
def initialize_alembic_conf():
"""
Initializes alembic configuration.
"""
config = Config("alembic.ini")
config.set_main_option('script_location', "alembic")
config.set_main_option('sqlalchemy.url', settings.SQLALCHEMY_DB_URL)
return config
def flush_db():
"""
Clears the current database tables by dropping tables and creating new
empty ones.
"""
conf = initialize_alembic_conf()
drop_all(conf)
print("Upgrading migrations to head..")
command.upgrade(conf, "head")
print("SUCCESS")
def call_command():
"""
Parses the system arguments to call the appropriate command.
"""
commands = {
"drop_all": drop_all,
"flush_db": flush_db
}
if len(sys.argv) != 2:
raise Exception(
"Bad script usage. Example: python manage.py [command]"
)
command_name = sys.argv[1]
if command_name not in commands:
raise Exception(f"Unrecognized command '{command_name}'")
commands[command_name]()
if __name__ == "__main__":
call_command()
| 21.175676 | 74 | 0.661774 |
79476b35a277ca92d2f619f586748ed591a9671a | 29,133 | py | Python | staircase_bw_new.py | lbeerendonk/blackwhite | 4ec0deced93da59218a90838bf60d10a981da37a | [
"MIT"
] | null | null | null | staircase_bw_new.py | lbeerendonk/blackwhite | 4ec0deced93da59218a90838bf60d10a981da37a | [
"MIT"
] | null | null | null | staircase_bw_new.py | lbeerendonk/blackwhite | 4ec0deced93da59218a90838bf60d10a981da37a | [
"MIT"
] | null | null | null | """
Staircase (3-up, 1-down) for auditory detection or discrimination.
Runs on Python 2.7 because some dependencies (pygaze) do not work with Python 3.
Becomes compatible with Python 3 by changing all instances of "raw_input" to "input".
Created by Lola Beerendonk 28-05-2019
Copyright (c) 2019; All rights reserved.
"""
import numpy as np
# import psychopy
# from psychopy import prefs
# prefs.general['audioLib'] = ['sounddevice']
from psychopy import sound
from psychopy import core, visual, event, monitors, data, tools, misc
from psychopy.tools import monitorunittools
#import numpy as np
from numpy.random import shuffle
import copy, time
import json
import glob
import csv
import sys
import datetime
import os
import matplotlib.pyplot as plt
#sys.path.append('D:\USERS\Stijn\exptools')
from IPython import embed as shell
screenSize = [1920,1080]
number_of_trials = 2
fullscreen = True
class staircase_interleaved():
def __init__(self,subject_initials, task): # self, parameters = {}, phase_durations=[], session=None, screen=None, tracker=None, ID=0,
self.subject_initials = subject_initials
self.task = task
self.run = 0
# Version for counterbalancing the mapping of the responses between subjects
if int(self.subject_initials) % 2 == 1:
self.version = 1
elif int(self.subject_initials) % 2 == 0:
self.version = 2
if len(event.getKeys(keyList=['escape'])):
print('Quit by user.')
core.quit()
self.setup_files()
self.info={}
self.info['startPoints']=[0.2,0.15] #initial guesses for threshold
self.info['nTrials']=number_of_trials # number of trials per staircase per block
screen = monitors.Monitor('testMonitor')
screen.setSizePix(screenSize)
screen.setWidth(52.1)
screen.setDistance(80)
self.win = visual.Window(size = screenSize, units='deg', monitor=screen, fullscr=fullscreen, color=(.5,.5,.5), colorSpace='rgb')
#### Does not work in Py2.7
# #to escape the experiment at all times
# def escape_exp():
# self.win.close()
# core.quit()
# event.globalKeys.clear()
# event.globalKeys.add(key='escape',func=escape_exp)
self.setup_stimuli()
#----------- Run staircase first run (= run 0)
self.run_staircase()
# wait for participant to respond
event.waitKeys()
#----------- Second run (= run 1)
self.run = 1
# Use approximate thresholds (or final points) of first run as startpoints for second run
self.info['startPoints']=[self.newstart1,self.newstart2]
#self.setup_files()
self.stairs = []
self.feedback1.text = 'This was the second block. Press the spacebar to continue with the third and final block.'
self.run_staircase()
# event.waitKeys()
# self.run = 2
# # Use approximate thresholds (or final points) of first run as startpoints for second run
# self.info['startPoints']=[self.newstart1,self.newstart2]
# #self.setup_files()
# self.stairs = []
# self.feedback1.text = 'This is the end of the staircase procedure. \n\n\n This window will close automatically.'
# self.run_staircase()
self.feedback1.text = 'This is the end of the staircase procedure.\n\n\nThis window will close automatically.'
self.feedback1.draw()
self.win.flip()
# calculate performance per tone/condition. Performance is here calculated as the percentage of correct X responses of all X responses (i.e. controlling for bias)
if self.task == 'discrim':
self.answered_low = self.disc_low_correct_count + self.disc_high_count - self.disc_high_correct_count
self.answered_high = self.disc_high_correct_count + self.disc_low_count - self.disc_low_correct_count
self.perf_low = float(self.disc_low_correct_count)/float(self.answered_low)
self.perf_high = float(self.disc_high_correct_count)/float(self.answered_high)
self.disc_perf_all = (float(self.disc_low_correct_count)+float(self.disc_high_correct_count))/(float(number_of_trials)*4.0)
print("Performance on low tone: ", self.disc_low_correct_count, "of ", self.answered_low , "correct: ", self.perf_low, "%")
print("Performance on high tone: ", self.disc_high_correct_count, "of ", self.answered_high, "correct: ", self.perf_high, "%")
print("Performance of run 2 and 3: ", self.disc_perf_all)
self.perfFile.write('%.3f,%.3f,%.3f' %(self.perf_low, self.perf_high,self.disc_perf_all))
elif self.task == 'detect':
self.answered_absent = self.det_absent_correct_count + self.det_present_count - self.det_present_correct_count
self.answered_present = self.det_present_correct_count + self.det_absent_count - self.det_absent_correct_count
self.perf_absent = float(self.det_absent_correct_count)/float(self.answered_absent)
self.perf_present = float(self.det_present_correct_count)/float(self.answered_present)
self.det_perf_all = (float(self.det_present_correct_count)+float(self.det_absent_correct_count))/(float(number_of_trials)*4.0)
print("Performance on absent target: ", self.det_absent_correct_count, "of ", self.answered_absent, "correct: ", self.perf_absent, "%")
print("Performance on present target: ", self.det_present_correct_count, "of ",self.answered_present, "correct: ", self.perf_present, "%")
print("Performance of run 2 and 3: ", self.det_perf_all)
self.perfFile.write('%.3f,%.3f,%.3f' %(self.perf_absent, self.perf_present,self.det_perf_all))
self.fig.savefig(self.fig_fileName + '.png')
core.wait(3)
self.win.close()
#core.quit()
def run_staircase(self):
if self.run == 0:
self.first_text.draw()
self.win.flip()
event.waitKeys('spacebar')
#start experiment
self.message1.draw()
self.intro_text.draw()
self.draw_fixation()
self.win.flip()
event.waitKeys()
self.draw_fixation()
self.win.flip()
# Play separate tones and noise (only before run 0)
if self.run == 0:
if self.task == 'detect':
self.target_tone.draw()
elif self.task == 'discrim':
self.low_tone.draw()
self.draw_fixation()
self.win.flip()
self.target1.setVolume(0.5)
self.target2.setVolume(0.5)
for i in range(0,3):
self.target1.play()
core.wait(0.5)
self.target1.stop()
core.wait(1)
core.wait(1)
if self.task == 'discrim':
self.high_tone.draw()
self.draw_fixation()
self.win.flip()
for i in range(0,3):
self.target2.play()
core.wait(0.5)
self.target2.stop()
core.wait(1)
core.wait(1)
self.noise_tone.draw()
self.draw_fixation()
self.win.flip()
for i in range(0,3):
self.noise.play()
core.wait(0.5)
self.noise.stop()
core.wait(1)
core.wait(1)
# After run 0, only present tones + noise before each run.
self.target_tone.text = 'This is the target tone with noise.'
self.low_tone.text = 'This is the low tone with noise.'
self.high_tone.text = 'This is the high tone with noise.'
# Example tones so participants know what to expect.
if self.task == 'detect':
self.target_tone.draw()
elif self.task == 'discrim':
self.low_tone.draw()
self.draw_fixation()
self.win.flip()
self.target1.setVolume(0.2)
self.target2.setVolume(0.2)
for i in range(0,3):
self.noise.play()
self.target1.play()
core.wait(0.5)
self.noise.stop()
self.target1.stop()
core.wait(1)
core.wait(1)
if self.task == 'discrim':
self.high_tone.draw()
self.draw_fixation()
self.win.flip()
for i in range(0,3):
self.noise.play()
self.target2.play()
core.wait(0.5)
self.noise.stop()
self.target2.stop()
core.wait(1)
self.message1.draw()
self.message2.draw()
self.draw_fixation()
self.win.flip()
event.waitKeys()
self.stairs1 = []
self.stairs2 = []
self.trial_counter_1 = 0
self.trial_counter_2 = 0 #reset trial counter for this run
self.trial_counter = 0
self.run_order_1 = np.argsort(np.random.rand(number_of_trials)) #create new run order for each run
self.run_order_2 = np.argsort(np.random.rand(number_of_trials))
if self.run == 0:
step_sizes = [2,1]
else:
step_sizes = 1
for thisStart in self.info['startPoints']:
#we need a COPY of the info for each staircase (or the changes here will be made to all the other staircases)
thisInfo = copy.copy(self.info)
#now add any specific info for this staircase
thisInfo['thisStart']=thisStart
thisInfo['observer']='jwp' #we might want to keep track of this
thisInfo['nTrials']=self.info['nTrials']
thisStair = data.StairHandler(startVal=thisStart, nReversals=5, stepSizes=step_sizes, nTrials=100, nUp=1, nDown=3, extraInfo=thisInfo,
method='2AFC',stepType='db',minVal=0.01,maxVal=0.3)
#applyInitialRule=True,
self.stairs.append(thisStair)
for trialN in range(self.info['nTrials']):
shuffle(self.stairs)
for thisStair in self.stairs:
thisIntensity = next(thisStair)
too_early = []
if thisStair.extraInfo['thisStart'] == self.info['startPoints'][0]:
print('Stair 1, trial: ', self.trial_counter_1)
thisStaircase = 'A' #for the data file
self.stairs1.append(thisIntensity)
print('New intensity stairs 1: ',thisIntensity)
this_disc_stim = self.disc_stim[self.run_order_1[self.trial_counter_1]]
this_present = self.present[self.run_order_1[self.trial_counter_1]]
elif thisStair.extraInfo['thisStart'] == self.info['startPoints'][1]:
print('Stair 2, trial: ', self.trial_counter_2)
thisStaircase = 'B' #for the data file
self.stairs2.append(thisIntensity)
print('New intensity stairs 2: ',thisIntensity)
this_disc_stim = self.disc_stim[self.run_order_2[self.trial_counter_2]]
this_present = self.present[self.run_order_2[self.trial_counter_2]]
# update the difficulty (the thisIntensity)
self.target1.setVolume(thisIntensity)
self.target2.setVolume(thisIntensity)
self.draw_fixation()
self.win.flip()
core.wait(1)
try:
self.noise.play(loops=None)
except:
print('Fail noise')
self.noise.play(loops=None)
if self.task == 'discrim':
if this_disc_stim == 1:
try:
self.target1.play(loops=None)
except:
print('Fail')
self.target1.play(loops=None)
elif this_disc_stim == 2:
try:
self.target2.play(loops=None)
except:
print('Fail')
self.target2.play(loops=None)
elif self.task == 'detect':
if this_present == 1:
try:
self.target1.play()
except:
print('Fail')
self.target1.play()
core.wait(0.5)
self.noise.stop()
self.target1.stop()
self.target2.stop()
#turn red if pressed during stim
too_early = event.getKeys(keyList=self.responses)
if len(too_early) > 0:
self.draw_red_fixation()
self.win.flip()
else:
self.draw_fixation()
self.win.flip()
# get response
response=None
while response==None:
allKeys=event.waitKeys(keyList = self.responses)
for thisKey in allKeys:
if self.task == 'discrim':
if self.responses[self.disc_stim_check.index(this_disc_stim)] in thisKey:
response = 1
else:
response = 0
elif self.task == 'detect':
if self.responses[self.present_check.index(this_present)] in thisKey:
response = 1
else:
response = 0
if thisKey in ['q', 'escape']:
core.quit() # abort experiment
# for checking performance per tone
if self.run > 0:
if self.task == 'discrim':
if this_disc_stim == 1:
self.disc_low_count += 1
if response == 1:
self.disc_low_correct_count += 1
elif this_disc_stim == 2:
self.disc_high_count += 1
if response == 1:
self.disc_high_correct_count += 1
elif self.task == 'detect':
if this_present == 0:
self.det_absent_count += 1
if response == 1:
self.det_absent_correct_count += 1
elif this_present == 1:
self.det_present_count += 1
if response == 1:
self.det_present_correct_count += 1
self.trial_counter += 1
if thisStair.extraInfo['thisStart'] == self.info['startPoints'][0]:
# self.mean_stairs1 = thisStair.mean()
self.trial_counter_1 += 1
elif thisStair.extraInfo['thisStart'] == self.info['startPoints'][1]:
# self.mean_stairs2 = thisStair.mean()
self.trial_counter_2 += 1
# inform staircase of the response
thisStair.addResponse(response, thisIntensity)
self.draw_fixation()
self.win.flip()
if self.task == 'discrim':
self.dataFile.write('%i,%s,%i,%.3f,%i\n' %(self.trial_counter, thisStaircase, this_disc_stim, thisIntensity, response))
elif self.task == 'detect':
self.dataFile.write('%i,%s,%i,%.3f,%i\n' %(self.trial_counter, thisStaircase, this_present, thisIntensity, response))
self.previous_correct = response #update
event.clearEvents()
if len(event.getKeys(keyList=['escape'])):
print('Quit by user.')
core.quit()
self.newstart1 = float(np.mean(self.stairs1[-5:]))
print('New start stair 1: ', self.newstart1)
self.newstart2 = float(np.mean(self.stairs2[-5:]))
print('New start stair 2: ', self.newstart2)
#if there are more two or more reversals, use the average as the new startpoint
if len(self.stairs[0].reversalIntensities) > 1:
self.approxThresh1 = float(np.mean(self.stairs[0].reversalIntensities[1:]))
print('Average of ', len(self.stairs[0].reversalIntensities)-1, ' reversals: ', self.approxThresh1)
if self.run > 0:
self.approxThresh1_all = np.concatenate((self.approxThresh1_all,self.stairs[0].reversalIntensities[1:]),axis=None)
else:
self.approxThresh1 = self.newstart1
print('Not enough reversals. Average of last five intensities: ', self.approxThresh1)
if len(self.stairs[1].reversalIntensities) > 1:
self.approxThresh2 = float(np.mean(self.stairs[1].reversalIntensities[1:]))
print('Average of ', len(self.stairs[1].reversalIntensities)-1, ' reversals: ', self.approxThresh2)
if self.run > 0:
self.approxThresh2_all = np.concatenate((self.approxThresh2_all,self.stairs[1].reversalIntensities[1:]),axis=None)
else:
self.approxThresh2 = self.newstart2
print('Not enough reversals. Average of last five intensities: ', self.approxThresh2)
self.mean_threshold_run = float(np.mean(np.array([self.approxThresh1,self.approxThresh2])))
print('Approximate threshold of this run: ', self.mean_threshold_run)
#calculate the average of the reversals (except for the first one) of all runs so far
if len(self.approxThresh1_all) > 0 and len(self.approxThresh2_all) > 0:
print('Average of all reversals of stair 1 so far: ', float(np.mean(np.array(self.approxThresh1_all))))
print('Average of all reversals of stair 2 so far: ', float(np.mean(np.array(self.approxThresh2_all))))
self.mean_threshold = float(np.mean(np.concatenate((self.approxThresh1_all,self.approxThresh2_all),axis=None)))
print('Average threshold of all runs combined so far: ',self.mean_threshold)
else:
print('Not enough reversals to calculate a threshold. Please redo the staircase procedure.')
self.mean_threshold = 0.0
# Save threshold
self.file.write('%i,%.4f,%.4f,%.4f,%f\n' %(self.run, self.approxThresh1, self.approxThresh2, self.mean_threshold_run, self.mean_threshold))
#overwrites the file on every run so the last threshold estimate is saved
file = open(self.fileName+'.txt','w')
file.write(str(self.mean_threshold))
file.close()
xvals = range(len(self.stairs1))
if self.run == 0:
self.fig,self.axs = plt.subplots(nrows=3,ncols=1,sharex=True,sharey=True,figsize =(5,15))
#self.axs[self.run].set_xticks(np.arange(0,len(self.stairs),float(len(self.stairs))/7))
self.axs[self.run].set_yticks(np.arange(0,0.35,0.05))
self.axs[self.run].plot(self.stairs1,'r',self.stairs2,'b')
self.axs[self.run].set_ylabel('Stimulus intensity')
self.axs[self.run].set_xlabel('Trial number')
self.axs[self.run].set_title('Run ' + str(self.run))
self.fig.savefig(self.fig_fileName + '.png') #save intermediate?
self.feedback1.draw()
self.draw_fixation()
self.win.flip()
def draw_fixation(self): #For the circle + cross fixation (see Thaler, Shutz, Goodale & Gegenfurtner (2013))
self.fixation1.fillColor = 'black'
self.fixation2.lineColor = 'black'
self.fixation2.fillColor = 'black'
self.fixation1.draw()
self.line1.draw()
self.line2.draw()
self.fixation2.draw()
def draw_red_fixation(self): #Red version of the circle + cross fixation
self.fixation1.fillColor = 'red'
self.fixation2.lineColor = 'red'
self.fixation2.fillColor = 'red'
self.fixation1.draw()
self.line1.draw()
self.line2.draw()
self.fixation2.draw()
def setup_stimuli(self): #Including some variables
# ### For windows:
# self.target1 = sound.backend_sounddevice.SoundDeviceSound('A', octave=3.5, sampleRate=44100, secs=0.5, stereo=True)
# self.target2 = sound.backend_sounddevice.SoundDeviceSound('B', octave=3.5, sampleRate=44100, secs=0.5, stereo=True)
# self.noise = sound.backend_sounddevice.SoundDeviceSound('TORC_424_02_h501.wav',stereo=True)
### For mac:
self.target1 = sound.Sound('A', octave=3.5, sampleRate=44100, secs=0.5)
self.target2 = sound.Sound('B', octave=3.5, sampleRate=44100, secs=0.5)
self.noise = sound.Sound('TORC_424_02_h501.wav')
self.noise.setVolume(1)
# For checking if responses are correct
self.disc_stim_check = [1,2]
self.present_check = [0,1]
#Create equal amount of trials for each condition
self.present = np.ones(number_of_trials,dtype=int)
self.present[0:int(number_of_trials/2)] = 0
self.disc_stim = np.ones(number_of_trials,dtype=int)
self.disc_stim[0:int(number_of_trials/2)] = 2
#print(self.disc_stim)
if self.version == 1:
self.responses = ['a','l','q','escape']
elif self.version == 2:
self.responses = ['l','a','q','escape']
#for the improved fixation dot
self.d1 = 0.7 #diameter outer circle. larger option: 1.5, 0.15, 7
self.d2 = 0.05 #diameter inner circle
self.w1 = 4 #linewidth
self.backgroundColor = (.5,.5,.5) #Set according to the backgroundcolor of the experiment
self.fixation1 = visual.Circle(self.win, lineColor =self.backgroundColor, lineColorSpace = 'rgb', fillColor = 'black', fillColorSpace='rgb', size=self.d1)
self.line1 = visual.Line(self.win,lineWidth=self.w1,start=(self.d1/2,0),end=(-self.d1/2,0),lineColor=self.backgroundColor,lineColorSpace='rgb')
self.line2 = visual.Line(self.win,lineWidth=self.w1,start=(0,self.d1/2),end=(0,-self.d1/2),lineColor=self.backgroundColor,lineColorSpace='rgb')
self.fixation2 = visual.Circle(self.win, lineColor ='black', lineColorSpace = 'rgb', fillColor ='black', fillColorSpace='rgb', size=self.d2)
self.fixation = visual.GratingStim(self.win, color=-1, colorSpace='rgb',tex=None, mask='circle', size=0.2)
self.stairs=[]
# create all text stimuli
self.message1 = visual.TextStim(self.win,height=.5,pos=[0,-3],text='Hit the spacebar when you are ready.', color=(-1,-1,-1), font = 'avenir',wrapWidth=20)
self.message2 = visual.TextStim(self.win,height=.5,pos=[0,+3],text='X',wrapWidth=30, color=(-1,-1,-1), font = 'avenir') #Empty to adapt below
if self.task == 'discrim':
self.first_text = visual.TextStim(self.win, pos = [0,0], wrapWidth=20, height=.5,font='avenir',color=(-1,-1,-1),text='Your task is to discriminate between two target tones embedded in noise.\n\nNow, we will estimate the appropriate difficulty for you. To do so, we will increase the difficulty until you make mistakes. Then we will decrease the difficulty again until you are performing well. This process will go on for a while in order to get a good estimate.\n\nAt some point, you will probably not hear any target anymore. Just continue, the difficulty will be adjusted.\n\nPress the spacebar to continue.')
self.intro_text = visual.TextStim(self.win, pos=[0,+3],height=.5,text='You will now get to hear the target tones.',wrapWidth=50, color=(-1,-1,-1), font = 'avenir')
if self.version ==1:
self.message2.text = 'Press LEFT (green) when you hear the low tone and RIGHT (green) when you hear the high tone.'
elif self.version==2:
self.message2.text = 'Press LEFT (green) when you hear the high tone and RIGHT (green) when you hear the low tone.'
elif self.task == 'detect':
self.first_text = visual.TextStim(self.win, pos = [0,0], height=.5, wrapWidth=30, font='avenir',color=(-1,-1,-1),text='Your task is to detect the target tone embedded in noise.\n\nNow, we will estimate the appropriate difficulty for you. To do so, we will increase the difficulty until you make mistakes. Then we will decrease the difficulty again until you are performing well. This process will go on for a while in order to get a good estimate.\n\nAt some point, you will probably not hear the target anymore. Just continue, the difficulty will be adjusted.\n\nPress the spacebar to continue.')
self.intro_text = visual.TextStim(self.win, pos=[0,+3],height=.5,text='You will now get to hear the target tone.',wrapWidth=20, color=(-1,-1,-1), font = 'avenir')
if self.version==1:
self.message2.text = 'Press LEFT (green) when the target tone is absent and RIGHT (green) when the target tone is present.'
elif self.version==2:
self.message2.text = 'Press LEFT (green) when the target tone is present and RIGHT (green) when the target tone is absent.'
self.feedback1 = visual.TextStim(self.win, height=.5,pos=[0,+3], text='This was the first run. Press the spacebar to continue.',wrapWidth=20, color=(-1,-1,-1), font = 'avenir')
self.target_tone = visual.TextStim(self.win,height=.5, pos=[0,+3], text='This is the target tone.', color=(-1,-1,-1), font = 'avenir')
self.high_tone = visual.TextStim(self.win, height=.5,pos=[0,+3], text='This is the high tone.', color=(-1,-1,-1), font = 'avenir')
self.low_tone = visual.TextStim(self.win, height=.5,pos=[0,+3], text='This is the low tone.', color=(-1,-1,-1), font = 'avenir')
self.noise_tone = visual.TextStim(self.win, height=.5,pos=[0,+3], text='This is the noise sound.', color=(-1,-1,-1), font = 'avenir')
self.newstart1 = self.newstart2 = self.approxThresh1 = self.approxThresh2 = self.mean_threshold_run = self.mean_threshold = None
self.approxThresh1_all = []
self.approxThresh2_all = []
self.disc_perf_all = None
self.det_perf_all = None
# for collecting the performance for each condition
self.disc_low_count = self.disc_low_correct_count = self.disc_high_count = self.disc_high_correct_count = self.det_absent_count = self.det_absent_correct_count = self.det_present_count = self.det_present_correct_count = 0
def setup_files(self):
if not os.path.exists('data/' + self.task + '_staircase/participant_' + self.subject_initials):
os.makedirs('data/' + self.task + '_staircase/participant_' + self.subject_initials + '/')
self.fileName = os.path.join('data/' + self.task + '_staircase/participant_' + str(self.subject_initials) + '/' + str(self.subject_initials) + '_' + self.task + '_threshold')
self.fig_fileName = os.path.join('data/' + self.task + '_staircase/participant_' + str(self.subject_initials) + '/' + str(self.subject_initials) +'_' + self.task + '_threshold')
self.perf_fileName = os.path.join('data/' + self.task + '_staircase/participant_' + str(self.subject_initials) + '/' + str(self.subject_initials) +'_' + self.task + '_performance')
try: # try to get a previous parameters file
self.expInfo = fromFile('lastParams.pickle')
except: # if not there then use a default set
self.expInfo = {'observer':'jwp', 'refOrientation':0}
self.expInfo['dateStr'] = data.getDateStr() # add the current time
# make a text file to save data
self.dataFile = open(self.fileName+'_trials.txt', 'w') # a simple text file
self.perfFile = open(self.perf_fileName+'.txt','w')
self.file = open(self.fileName+'_runs.txt', 'w')
self.file.write('run,th1,th2,mean_run,mean_all\n')
if self.task == 'discrim':
self.dataFile.write('trial,stair_nr,disc_stim,intensity,correct\n')
self.perfFile.write('low tone,high tone,ALL\n')
elif self.task == 'detect':
self.dataFile.write('trial,stair_nr,present,intensity,correct\n')
self.perfFile.write('absent,present,ALL\n')
def main(subject_initials, task):
ts = staircase_interleaved(subject_initials = subject_initials, task = task)
ts.run_staircase()
# if __name__ == '__main__':
# subject_initials = raw_input("Participant: ")
# task = raw_input("detect or discrim: ")
#main(subject_initials=subject_initials, task = task)
| 48.963025 | 623 | 0.589503 |
79476bf75ad334e7501831c8a945f13d4f3c38a9 | 53 | py | Python | spyder_unittesting/__init__.py | Nodd/spyder.unittesting | 364925f5a029a1ad9fc3ed21380f724a45805a8a | [
"MIT"
] | null | null | null | spyder_unittesting/__init__.py | Nodd/spyder.unittesting | 364925f5a029a1ad9fc3ed21380f724a45805a8a | [
"MIT"
] | null | null | null | spyder_unittesting/__init__.py | Nodd/spyder.unittesting | 364925f5a029a1ad9fc3ed21380f724a45805a8a | [
"MIT"
] | null | null | null | from .unittesting import UnitTesting as PLUGIN_CLASS
| 26.5 | 52 | 0.867925 |
79476ef07a27d73cc740ab6ca2ec97653f0814df | 953 | py | Python | httprunner_x/builtin/functions.py | xinxi1990/httprunner_x | 785e5e6e14a3bfc3cb2b5458e13f478fb2449abb | [
"Apache-2.0"
] | null | null | null | httprunner_x/builtin/functions.py | xinxi1990/httprunner_x | 785e5e6e14a3bfc3cb2b5458e13f478fb2449abb | [
"Apache-2.0"
] | null | null | null | httprunner_x/builtin/functions.py | xinxi1990/httprunner_x | 785e5e6e14a3bfc3cb2b5458e13f478fb2449abb | [
"Apache-2.0"
] | null | null | null | """
Built-in functions used in YAML/JSON testcases.
"""
import datetime
import random
import string
import time
from httprunner_x.compat import builtin_str, integer_types
from httprunner_x.exceptions import ParamsError
def gen_random_string(str_len):
""" generate random string with specified length
"""
return ''.join(
random.choice(string.ascii_letters + string.digits) for _ in range(str_len))
def get_timestamp(str_len=13):
""" get timestamp string, length can only between 0 and 16
"""
if isinstance(str_len, integer_types) and 0 < str_len < 17:
return builtin_str(time.time()).replace(".", "")[:str_len]
raise ParamsError("timestamp length can only between 0 and 16.")
def get_current_date(fmt="%Y-%m-%d"):
""" get current date, default format is %Y-%m-%d
"""
return datetime.datetime.now().strftime(fmt)
def sleep(n_secs):
""" sleep n seconds
"""
time.sleep(n_secs)
| 23.243902 | 84 | 0.690451 |
79476ef94c3633f780eebd926d354a810310b24a | 849 | py | Python | tests/read_tail_test.py | aescwork/sqliteminor | 83aa4f76a28141b085bbfc7f725531e0b67aeed5 | [
"BSD-2-Clause"
] | 1 | 2020-01-31T11:38:47.000Z | 2020-01-31T11:38:47.000Z | tests/read_tail_test.py | aescwork/sqliteminor | 83aa4f76a28141b085bbfc7f725531e0b67aeed5 | [
"BSD-2-Clause"
] | null | null | null | tests/read_tail_test.py | aescwork/sqliteminor | 83aa4f76a28141b085bbfc7f725531e0b67aeed5 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import unittest
import sqlite3
import sys
sys.path.append("../sqliteminor/")
sys.path.append("../temp/")
import sqliteminor
import setup_db
class ReadTailTest(unittest.TestCase):
def setUp(self):
self.conn = setup_db.setup_db()
self.read_tail_comp = [(13, u'Alder Buckthorn'), (14, u'Common Hawthorn'), (15, u'Common Hazel'), (16, u'Midland Hawthorn'), \
(17, u'Redwood (Cupressaceae)'), (18, u'Guelder Rose')]
self.sm = sqliteminor.SQLiteMinor(self.conn, "trees")
self.read_tail_result = self.sm.read_tail(6, "tree_id", "tree_id, name")
def test_some_method(self):
self.assertEqual(self.read_tail_result, self.read_tail_comp)
def test_result(self):
self.assertEqual(self.sm.result, "OK")
def tearDown(self):
self.sm.__del__()
if __name__ == '__main__':
unittest.main()
| 16.98 | 128 | 0.68669 |
79476f21bcd01ba4dda4c843311ca97e5e2de343 | 317 | py | Python | string-permutations.py | leaen/Codeeval-solutions | fa83cb4fba3e56f79c0a6b00361c18cd3092c3f0 | [
"MIT"
] | null | null | null | string-permutations.py | leaen/Codeeval-solutions | fa83cb4fba3e56f79c0a6b00361c18cd3092c3f0 | [
"MIT"
] | null | null | null | string-permutations.py | leaen/Codeeval-solutions | fa83cb4fba3e56f79c0a6b00361c18cd3092c3f0 | [
"MIT"
] | null | null | null | import sys
from itertools import permutations
def find_permutations(s):
return sorted([''.join(p) for p in permutations(s)])
def main():
with open(sys.argv[1]) as input_file:
for line in input_file:
print(','.join(find_permutations(line.strip())))
if __name__ == '__main__':
main()
| 22.642857 | 60 | 0.656151 |
794770a5d9f82714d2710b963ca2c4cb76c7063e | 3,779 | py | Python | src/scripts/lgb_benchmark_kfold.py | arnabbiswas1/k_tab_aug_muticlass_rmse_logloss_weightedf1_stratified_tsfresh_cesium | 13db3cb9d0b2f25181ccf4b1316e12425abfc276 | [
"Apache-2.0"
] | null | null | null | src/scripts/lgb_benchmark_kfold.py | arnabbiswas1/k_tab_aug_muticlass_rmse_logloss_weightedf1_stratified_tsfresh_cesium | 13db3cb9d0b2f25181ccf4b1316e12425abfc276 | [
"Apache-2.0"
] | null | null | null | src/scripts/lgb_benchmark_kfold.py | arnabbiswas1/k_tab_aug_muticlass_rmse_logloss_weightedf1_stratified_tsfresh_cesium | 13db3cb9d0b2f25181ccf4b1316e12425abfc276 | [
"Apache-2.0"
] | null | null | null | """
LGB Benchamrk with KFold
"""
import os
from datetime import datetime
from timeit import default_timer as timer
from sklearn.model_selection import KFold
import src.common as common
import src.config.constants as constants
import src.modeling.train_util as model
import src.munging.process_data_util as process_data
if __name__ == "__main__":
common.set_timezone()
start = timer()
# Create RUN_ID
RUN_ID = datetime.now().strftime("%m%d_%H%M")
MODEL_NAME = os.path.basename(__file__).split(".")[0]
SEED = 42
EXP_DETAILS = "LGB Benchamrk with KFold"
IS_TEST = False
PLOT_FEATURE_IMPORTANCE = False
TARGET = "loss"
MODEL_TYPE = "lgb"
OBJECTIVE = "root_mean_squared_error"
METRIC = "RMSE"
BOOSTING_TYPE = "gbdt"
VERBOSE = 100
N_THREADS = -1
NUM_LEAVES = 31
MAX_DEPTH = -1
N_ESTIMATORS = 1000
LEARNING_RATE = 0.1
EARLY_STOPPING_ROUNDS = 100
lgb_params = {
"objective": OBJECTIVE,
"boosting_type": BOOSTING_TYPE,
"learning_rate": LEARNING_RATE,
"num_leaves": NUM_LEAVES,
"tree_learner": "serial",
"n_jobs": N_THREADS,
"seed": SEED,
"max_depth": MAX_DEPTH,
"max_bin": 255,
"metric": METRIC,
"verbose": -1,
}
LOGGER_NAME = "sub_1"
logger = common.get_logger(LOGGER_NAME, MODEL_NAME, RUN_ID, constants.LOG_DIR)
common.set_seed(SEED)
logger.info(f"Running for Model Number [{MODEL_NAME}] & [{RUN_ID}]")
common.update_tracking(
RUN_ID, "model_number", MODEL_NAME, drop_incomplete_rows=True
)
common.update_tracking(RUN_ID, "model_type", MODEL_TYPE)
common.update_tracking(RUN_ID, "is_test", IS_TEST)
common.update_tracking(RUN_ID, "n_estimators", N_ESTIMATORS)
common.update_tracking(RUN_ID, "learning_rate", LEARNING_RATE)
common.update_tracking(RUN_ID, "num_leaves", NUM_LEAVES)
common.update_tracking(RUN_ID, "early_stopping_rounds", EARLY_STOPPING_ROUNDS)
train_df, test_df, sample_submission_df = process_data.read_processed_data(
logger,
constants.PROCESSED_DATA_DIR,
train=True,
test=True,
sample_submission=True,
)
logger.info(train_df.columns)
train_X = train_df.drop([TARGET], axis=1)
train_Y = train_df[TARGET]
test_X = test_df
logger.info(
f"Shape of train_X : {train_X.shape}, test_X: {test_X.shape}, train_Y: {train_Y.shape}"
)
predictors = list(train_X.columns)
sk = KFold(n_splits=10, shuffle=False)
common.update_tracking(RUN_ID, "no_of_features", len(predictors), is_integer=True)
common.update_tracking(RUN_ID, "cv_method", "KFold")
results_dict = model.lgb_train_validate_on_cv(
logger=logger,
run_id=RUN_ID,
train_X=train_X,
train_Y=train_Y,
test_X=test_X,
num_class=None,
kf=sk,
features=predictors,
params=lgb_params,
n_estimators=N_ESTIMATORS,
early_stopping_rounds=EARLY_STOPPING_ROUNDS,
cat_features="auto",
is_test=False,
verbose_eval=100,
)
train_index = train_df.index
common.save_artifacts(
logger,
is_test=False,
is_plot_fi=True,
result_dict=results_dict,
submission_df=sample_submission_df,
train_index=train_index,
model_number=MODEL_NAME,
run_id=RUN_ID,
sub_dir=constants.SUBMISSION_DIR,
oof_dir=constants.OOF_DIR,
fi_dir=constants.FI_DIR,
fi_fig_dir=constants.FI_FIG_DIR,
)
end = timer()
common.update_tracking(RUN_ID, "training_time", end - start, is_integer=True)
common.update_tracking(RUN_ID, "comments", EXP_DETAILS)
logger.info("Execution Complete") | 28.413534 | 95 | 0.669754 |
7947717b579a25396a03c87e28eaa2ce361dbaf4 | 213 | py | Python | pytest_suit/routes/uploads/__init__.py | HotMaps/Toolbox | ba1e287dbc63e34bf9feb80b65b02c1db93ce91c | [
"Apache-2.0"
] | 4 | 2020-10-01T10:38:06.000Z | 2021-12-28T03:11:18.000Z | pytest_suit/routes/uploads/__init__.py | HotMaps/Toolbox | ba1e287dbc63e34bf9feb80b65b02c1db93ce91c | [
"Apache-2.0"
] | 9 | 2017-11-08T17:29:10.000Z | 2020-08-31T15:28:31.000Z | pytest_suit/routes/uploads/__init__.py | HotMaps/Toolbox | ba1e287dbc63e34bf9feb80b65b02c1db93ce91c | [
"Apache-2.0"
] | 4 | 2019-03-25T13:24:14.000Z | 2021-07-16T20:52:51.000Z | from .. import BASE_URL, test_csv_file, test_tif_file
from ..user import test_token
import uuid
import os
test_upload_name = 'pytest_upload_csv'
test_export_cm_layer_uuid = '3ef057c3-b65a-448f-9718-d46c33b9ec3b'
| 26.625 | 66 | 0.826291 |
794772e5b8c7b59f8be35d867e5c420464ce94ce | 524 | py | Python | my_shapes.py | goncalo-leal/tetris_ai_agent | ceebf9331bf4e4cafe25a61e1b53ffa20f104825 | [
"MIT"
] | null | null | null | my_shapes.py | goncalo-leal/tetris_ai_agent | ceebf9331bf4e4cafe25a61e1b53ffa20f104825 | [
"MIT"
] | null | null | null | my_shapes.py | goncalo-leal/tetris_ai_agent | ceebf9331bf4e4cafe25a61e1b53ffa20f104825 | [
"MIT"
] | 1 | 2022-02-16T17:53:55.000Z | 2022-02-16T17:53:55.000Z | """
Cada forma deve ter uma classe, no entanto essa classe não deve extender shape.
As classes individuais servirão para definir o número de rotações que cada peça pode
dar e para avaliar se um movimento é possível ou não.
"""
from constants import *
from shape import *
class MyShape:
def __init__(self, shape):
self.shape = shape
def get_min_x(self):
return min([x for x, y in self.shape.positions])
def get_max_x(self):
return max([x for x, y in self.shape.positions]) | 29.111111 | 89 | 0.685115 |
794773918c3da58b661bd4e442d7e1f68a61572e | 15,149 | py | Python | zabbix_script.py | bmcrosen/zabbix_script | bb846bd2622f219a823ad1a5b291dc54578fa927 | [
"MIT"
] | null | null | null | zabbix_script.py | bmcrosen/zabbix_script | bb846bd2622f219a823ad1a5b291dc54578fa927 | [
"MIT"
] | null | null | null | zabbix_script.py | bmcrosen/zabbix_script | bb846bd2622f219a823ad1a5b291dc54578fa927 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
"""
@Create on: 2016-06-29 12:59
@Author: Rosen
"""
from __future__ import print_function
import argparse
import sys
from pyzabbix.api import ZabbixAPI
class Zabbix_Api:
def __init__(self, idc='qc'):
qc_url = ''
xg_url = ''
qc_auth = ''
xg_auth = ''
self.url = qc_url if idc == 'qc' else xg_url
self.auth = qc_auth if idc == 'qc' else xg_auth
self.z = ZabbixAPI(url=self.url, use_auth=True, auth=self.auth)
def Get_Token(self, user=None, password=None):
try:
token = self.z._login(user=user, password=password)
return token
except Exception as e:
print(e)
# 获取id,支持扩展获取所有id
def Get_ID(self,
HostName=None,
Template=None,
ScreenName=None,
Action='filter',
Macros_Flag=False,
Filter_Flag=False):
if HostName and len(HostName) <= 6:
exit('Warning: Hostname so short')
if HostName and '*' in HostName:
HostName = ''.join(HostName.split('*'))
Filter_Flag = True
Get_Input = HostName or Template or ScreenName
Host_List = []
Host_ID = []
Template_List = []
Template_ID = []
Screen_ID = []
Screen_List = []
try:
for l in Get_Input.split(','):
if HostName:
Host_List.append(l)
elif Template:
Template_List.append(l)
elif ScreenName:
Screen_List.append(l)
if Host_List:
# 模糊匹配与精确查询
Action = 'search' if Filter_Flag else Action
# Group_Flag = True if GroupName else Group_Flag
for h in Host_List:
host_id = self.z.do_request('host.get',
params={'output': ['host', 'hostid'], "%s" % Action: {'host': h}})
res = sorted(host_id['result'], key=lambda x: int(x['host'].split('-')[-1]))
for i in res:
del i['host']
Host_ID.append(i)
return Host_ID
elif Template_List:
for t in Template_List:
if Macros_Flag:
re = self.z.do_request('template.get',
params={'selectMacros': ['macro', 'value'], 'filter': {'host': t}})
macros = re['result'][0]['macros']
for i in macros:
i.pop('hosts')
data = re['result']
Template_ID.extend(data)
re = self.z.do_request('template.get', params={'output': 'templateid', 'filter': {'host': t}})
data = re['result']
Template_ID.extend(data)
return Template_ID
elif Screen_List:
for s in Screen_List:
re = self.z.do_request('screen.get', params={'output': 'screenid', 'filter': {'name': s}})
Screen_ID.append(re['result'][0]['screenid'])
return Screen_ID
except Exception as e:
print(e)
def Get_GroupID(self, GroupName=None):
try:
Group_ID = self.z.do_request('hostgroup.get', params={'output': 'extend', 'filter': {'name': GroupName}})
return Group_ID['result'][0]['groupid']
except Exception as e:
print(e)
def Get_GraphID(self, HostName=None, GraphName=None, Columns=3):
Graph_ID = []
Graph_List = []
x = 0
y = 0
try:
Host_ID = self.Get_ID(HostName=HostName)
Only_Host_ID = map(lambda x: x.values()[0], Host_ID)
for hostid in Only_Host_ID:
re_graphid = self.z.do_request('graph.get',
params={'output': ['graphid'],
'hostids': hostid,
'softfield': 'graphid', 'search': {'name': GraphName}})
if re_graphid['result']:
Graph_ID.append(re_graphid['result'][0]['graphid'])
else:
exit('Some host not have the graph: "%s" !' % GraphName)
for graph in Graph_ID:
Graph_List.append({
'resourcetype': '0',
'resourceid': graph,
'width': '500',
'height': '200',
'x': str(x),
'y': str(y),
'colspan': '0',
'rowspan': '0',
'elements': '0',
'valign': '0',
'halign': '0',
'style': '0',
'url': '',
'dynamic': '0'
})
x += 1
if x == int(Columns):
x = 0
y += 1
return Graph_ID, Graph_List
except Exception as e:
print(e)
def Screen_Create(self, HostName=None, GraphName=None, ScreenName=None, Columns=3):
try:
Graph_ID, Graph_List = self.Get_GraphID(HostName=HostName, GraphName=GraphName)
if len(Graph_ID) % Columns == 0:
vsize = len(Graph_ID) / Columns
else:
vsize = (len(Graph_ID) / Columns) + 1
Screen_ID = self.Get_ID(ScreenName=ScreenName)[0]
if Screen_ID:
re = self.z.do_request('screen.update', params={'screenid': Screen_ID,
'name': ScreenName,
'screenitems': Graph_List,
'hsize': Columns,
'vsize': vsize})
if re['result']['screenids']:
print('The screen : "%s" has been update!' % ScreenName)
else:
re = self.z.do_request('screen.create',
params={'name': ScreenName, 'hsize': Columns, 'vsize': vsize,
'screenitems': Graph_List})
if re['result']['screenids']:
print('The screen name: "%s" create succeed!' % ScreenName)
sys.exit(0)
exit('Screen create failed')
except Exception as e:
print(e)
def Create_Template(self, TemplateName=None, LinkTemplate=None, Template_ID=None, Macros=None):
try:
if LinkTemplate:
Template_Info = self.Get_ID(Template=LinkTemplate, Macros_Flag=True)[0]
Template_ID = Template_Info['templateid']
Macros = Template_Info['macros']
re = self.z.do_request('template.create',
params={'host': TemplateName, 'groups': {'groupid': 1},
'templates': Template_ID,
'macros': Macros})
if re['result']['templateids']:
print('Template "%s" create succeed!' % TemplateName)
except Exception as e:
print(e)
def Delete_Template(self, TemplateName=None):
Template_List = []
try:
Template_ID = self.Get_ID(Template=TemplateName)[0]['templateid']
Template_List.append(Template_ID)
re = self.z.do_request('template.delete', params=Template_List)
if re['result']['templateids']:
print('Template "%s" has been delete!' % TemplateName)
except Exception as e:
print(e)
def Mass_Remove_Templates(self, HostName=None, Templates=None):
data = []
try:
Host_ID = self.Get_ID(HostName=HostName)
Only_Host_ID = map(lambda x: x.values()[0], Host_ID)
for t in Templates.split(','):
Template_ID = self.Get_ID(Template=t)[0]['templateid']
re = self.z.do_request('host.massremove',
params={'hostids': Only_Host_ID, 'templateids_clear': Template_ID})
data.append(re['result'])
if data:
print('template has been unlink!')
sys.exit(0)
exit('template unlink failure!')
except Exception as e:
print(e)
def Mass_Add_Templates(self, HostName=None, Templates=None):
Templates_List = []
data = []
try:
Host_ID = self.Get_ID(HostName=HostName)
for t in Templates.split(','):
Templates_ID = self.Get_ID(Template=t)
Templates_List.extend(Templates_ID)
re = self.z.do_request('host.massadd', params={'hosts': Host_ID, 'templates': Templates_List})
data.append(re['result'])
if data:
print('Template has been link!')
sys.exit(0)
exit('Template link failure!')
except Exception as e:
print(e)
def Mass_Groups(self, HostName=None, GroupName=None, Method=None):
Group_ID = self.Get_GroupID(GroupName=GroupName)
Hosts_ID = self.Get_ID(HostName=HostName)
Only_Host_ID = map(lambda x: x.values()[0], Hosts_ID)
Mass = 'host.mass'
try:
if Method == 'replace':
Method = Mass + 'update'
elif Method == 'add':
Method = Mass + 'add'
re = self.z.do_request(Method, params={'hosts': Hosts_ID, 'groups': [{'groupid': Group_ID}]})
if re['result']['hostids']:
print('hosts information has been updated!')
elif Method == 'remove':
re = self.z.do_request('host.massremove', params={'hostids': Only_Host_ID, 'groupids': Group_ID})
if re['result']['hostids']:
print('hosts information has been updated!')
except Exception as e:
print(e)
def Method(self, ScreenName=None):
try:
Screen_ID = self.Get_ID(ScreenName=ScreenName)
re = self.z.do_request('screen.delete', params=Screen_ID)['result']['screenids']
if re:
print('%s has been delete' % ScreenName)
sys.exit(0)
print('The given screen name: "%s" not exists' % ScreenName)
except Exception as e:
print(e)
def Disable_Host(self, HostName=None, Method=None):
status = 0
data = []
try:
status = 1 if Method == 'disable' else status
Hostids = self.Get_ID(HostName=HostName)
if not Hostids:
exit('"%s" not exists!' % HostName)
for h in Hostids:
re = self.z.do_request('host.massupdate', params={'hosts': h, 'status': status})
data.append(re['result']['hostids'])
if not data:
exit('"%s" failed!' % Method)
print('hosts has been "%s" !' % Method)
except Exception as e:
print(e)
def main(self):
if len(sys.argv) == 1:
parse.print_help()
else:
args = parse.parse_args()
Method = ['delete', 'disable', 'enable', 'replace', 'remove', 'add', 'create']
# print(args)
if args.idc == 'xg':
self.__init__(idc='xg')
if args.method_link == 'unlink' and args.template and args.hostname:
self.Mass_Remove_Templates(HostName=args.hostname, Templates=args.template)
elif args.method_link == 'link' and args.template and args.hostname:
self.Mass_Add_Templates(HostName=args.hostname, Templates=args.template)
elif args.screen and args.hostname and args.graph:
self.Screen_Create(HostName=args.hostname, GraphName=args.graph, ScreenName=args.screen)
elif args.graph and args.hostname:
self.Get_GraphID(HostName=args.hostname, GraphName=args.graph)
elif args.method:
if args.screen and args.method in Method:
self.Method(ScreenName=args.screen)
elif args.group and args.hostname and args.method in Method:
self.Mass_Groups(HostName=args.hostname, GroupName=args.group, Method=args.method)
elif args.method == 'create' and args.template or args.link_template:
self.Create_Template(TemplateName=args.template, LinkTemplate=args.link_template)
elif args.method == 'delete' and args.template:
self.Delete_Template(TemplateName=args.template)
elif args.hostname and args.method in Method:
self.Disable_Host(HostName=args.hostname, Method=args.method)
elif args.hostname or args.template:
re = self.Get_ID(HostName=args.hostname, Template=args.template)
print(re)
if __name__ == '__main__':
parse = argparse.ArgumentParser(description='Zabbix API', usage='%(prog)s [options]')
parse.add_argument('-I,', '--idc', dest='idc', type=str, help='Specify IDC name; Example: -I "xg" or -I "qc"')
parse.add_argument('-H,', dest='hostname', type=str,
help='Support that match the given wildcard search..'
' Example: -H "qc-moses-async*"',
metavar='hostname')
parse.add_argument('-T,', dest='template', help='zabbix template; Example: -T "Template OS Linux"',
metavar='template')
parse.add_argument('-L,', dest='method_link', help='Unlink a templates and clear form the given hosts. '
'Link a template from the hosts. '
'Example: -L "link" or -L "unlink"', metavar='link or unlink')
parse.add_argument('-G,', dest='graph', help='get graph name from the given name', metavar='graph')
parse.add_argument('-S,', dest='screen', help='create screen from the given hosts', metavar='screen')
parse.add_argument('-M,', dest='method',
help='support "delete screen", "disable, enable hosts", "replace, remove, add group",'
' "create template", "delete template" .'
' Example: -M "delete" -S "test screen"',
metavar='method')
parse.add_argument('-g,', dest='group', help='groups information', metavar='group')
parse.add_argument('-l,', dest='link_template', help='Templates to be linked to the template',
metavar='link template')
zabbix = Zabbix_Api()
zabbix.main()
| 44.166181 | 117 | 0.499703 |
794773a0d1c3ba8e292f6f840d46ce911df4def2 | 17,473 | py | Python | elastipy/aggregation/converter.py | defgsus/elastipy | c1144ab39fa70571ba0e02ccf41d380a8a1bd730 | [
"Apache-2.0"
] | 1 | 2021-02-17T17:50:28.000Z | 2021-02-17T17:50:28.000Z | elastipy/aggregation/converter.py | defgsus/elastipy | c1144ab39fa70571ba0e02ccf41d380a8a1bd730 | [
"Apache-2.0"
] | 2 | 2021-03-29T02:09:41.000Z | 2022-03-01T20:09:48.000Z | elastipy/aggregation/converter.py | netzkolchose/elastipy | c1144ab39fa70571ba0e02ccf41d380a8a1bd730 | [
"Apache-2.0"
] | null | null | null | from copy import copy, deepcopy
from itertools import chain
import fnmatch
from typing import Sequence, Union, Optional, Iterable, Tuple, TextIO, Any, Mapping, List
from .helper import dict_rows_to_list_rows, create_matrix, remove_matrix_axis
class ConverterMixin:
"""
Interface that uses Visitor(self) to
convert the aggregation keys and values into various objects.
Must be bound into an Aggregation or compatible class
to work properly. Especially:
- must be compatible for Visitor(self)
- needs access to self.root.name
"""
def keys(
self,
key_separator: Optional[str] = None,
tuple_key: bool = False,
):
"""
Iterates through all keys of this aggregation.
For example, a top-level terms aggregation would return all bucketed field values.
For a nested bucket aggregation each key is a tuple of all parent keys as well.
:param key_separator: ``str``
Optional separator to concat multiple keys into one string
:param tuple_key: ``bool``
If True, the key is always a tuple
If False, the key is a string if there is only one key
:return: generator
"""
for key, value in self.items(key_separator=key_separator, tuple_key=tuple_key):
yield key
def values(self, default=None):
"""
Iterates through all values of this aggregation.
:param default: If not None any None-value will be replaced by this.
:return: generator
"""
for key, value in self.items(default=default):
yield value
def items(
self,
key_separator: str = None,
tuple_key: bool = False,
default=None,
) -> Iterable[Tuple]:
"""
Iterates through all key, value tuples.
:param key_separator: ``str``
Optional separator to concat multiple keys into one string.
:param tuple_key: ``bool``
If True, the key is always a tuple.
If False, the key is a string if there is only one key.
:param default:
If not None any None-value will be replaced by this.
:return: generator
"""
from .visitor import Visitor
v = Visitor(self, default_value=default, key_separator=key_separator, tuple_key=tuple_key)
yield from v.items()
def rows(
self,
header: bool = True,
include: Union[str, Sequence[str]] = None,
exclude: Union[str, Sequence[str]] = None,
flat: Union[bool, str, Sequence[str]] = False,
default = None,
) -> Iterable[list]:
"""
Iterates through all result values from this aggregation branch.
Each row is a list. The first row contains the names if 'header' == True.
This will include all parent aggregations (up to the root) and all children
aggregations (including metrics).
:param header: ``bool``
If True, the first row contains the names of the columns
:param include: ``str`` or ``sequence of str``
Can be one or more (OR-combined) wildcard patterns.
If used, any column that does not fit a pattern is removed.
:param exclude: ``str`` or ``sequence of str``
Can be one or more (OR-combined) wildcard patterns.
If used, any column that fits a pattern is removed.
:param flat: ``bool``, ``str`` or ``sequence of str``
Can be one or more aggregation names that should be *flattened out*,
meaning that each key of the aggregation creates a new column
instead of a new row. If ``True``, all bucket aggregations are
*flattened*.
Only supported for bucket aggregations!
.. NOTE::
Currently not supported for the root aggregation!
:param default:
This value will be used wherever a value is undefined.
:return: generator of list
"""
yield from dict_rows_to_list_rows(
self.dict_rows(include=include, exclude=exclude, flat=flat),
header=header,
default=default,
)
def dict_rows(
self,
include: Union[str, Sequence[str]] = None,
exclude: Union[str, Sequence[str]] = None,
flat: Union[bool, str, Sequence[str]] = False,
) -> Iterable[dict]:
"""
Iterates through all result values from this aggregation branch.
This will include all parent aggregations (up to the root) and all children
aggregations (including metrics and pipelines).
:param include: ``str`` or ``sequence of str``
Can be one or more (OR-combined) wildcard patterns.
If used, any column that does not fit a pattern is removed.
:param exclude: ``str`` or ``sequence of str``
Can be one or more (OR-combined) wildcard patterns.
If used, any column that fits a pattern is removed.
:param flat: ``bool``, ``str`` or ``sequence of str``
Can be one or more aggregation names that should be *flattened out*,
meaning that each key of the aggregation creates a new column
instead of a new row. If ``True``, all bucket aggregations are
*flattened*.
Only supported for bucket aggregations!
.. NOTE::
Currently not supported for the root aggregation!
:return: generator of dict
"""
from .visitor import Visitor
return Visitor(self).dict_rows(include=include, exclude=exclude, flat=flat)
def to_dict(self, key_separator=None, default=None) -> dict:
"""
Create a dictionary from all key/value pairs.
:param key_separator: str, optional separator to concat multiple keys into one string
:param default: If not None any None-value will be replaced by this.
:return: dict
"""
return {
key: value
for key, value in self.items(key_separator=key_separator, default=default)
}
def to_pandas(
self,
index: Union[bool, str] = False,
to_index: Union[bool, str] = False,
include: Union[str, Sequence[str]] = None,
exclude: Union[str, Sequence[str]] = None,
flat: Union[bool, str, Sequence[str]] = False,
dtype=None,
default=None,
):
"""
Converts the results of ``dict_rows()`` to a pandas DataFrame.
This will include all parent aggregations (up to the root) and all children
aggregations (including metrics).
Any columns containing dates will be automatically converted to pandas.Timestamp.
This method has a synonym: ``df``
:param index: ``bool`` or ``str``
Sets a specific column as the index of the DataFrame.
- If ``False`` no explicit index is set.
- If ``True`` the root aggregation's keys will be the index.
- if ``str`` explicitly set a certain column as the DataFrame index.
.. NOTE::
The column is kept in the DataFrame. If you wan't to set a
column as index and remove it from the columns, use ``to_index``.
:param to_index: ``bool`` or ``str``
Same as ``index`` but the column is removed from DataFrame.
- If ``False`` no explicit index is set.
- If ``True`` the root aggregation's keys will be the index.
- if ``str`` explicitly set a certain column as the DataFrame index.
:param include: ``str or list of str``
Can be one or more (OR-combined) wildcard patterns.
If used, any column that does not fit a pattern is removed
:param exclude: ``str or list of str``
Can be one or more (OR-combined) wildcard patterns.
If used, any column that fits a pattern is removed
:param flat: ``bool``, ``str`` or ``sequence of str``
Can be one or more aggregation names that should be *flattened out*,
meaning that each key of the aggregation creates a new column
instead of a new row. If ``True``, all bucket aggregations are
*flattened*.
Only supported for bucket aggregations!
.. NOTE::
Currently not supported for the root aggregation!
:param dtype:
Numpy data type to force. Only a single dtype is allowed. If None, infer.
:param default:
This value will be used wherever a value is undefined.
:return: pandas ``DataFrame`` instance
"""
import pandas as pd
if index and to_index:
raise ValueError(
"Can not use 'index' and 'to_index' together, settle for one please."
)
rows = list(dict_rows_to_list_rows(
self.dict_rows(include=include, exclude=exclude, flat=flat),
default=default,
header=True,
))
if rows:
df = pd.DataFrame(rows[1:], columns=rows[0], dtype=dtype)
else:
df = pd.DataFrame(dtype=dtype)
for key in df:
series = pd_series_to_datetime(df[key])
if series is not None:
df[key] = series
index = index or to_index
if index and len(df):
if index is True:
index = self.root.name
df.index = df[index]
if to_index:
df.pop(index)
return df
# synonym
df = to_pandas
def to_matrix(
self,
sort: Optional[Union[bool, str, int, Sequence[Union[str, int]]]] = None,
default: Optional[Any] = None,
include: Optional[Union[str, Sequence[str]]] = None,
exclude: Optional[Union[str, Sequence[str]]] = None,
) -> Tuple[List[str], List, List]:
"""
Generate an N-dimensional matrix from the values of this aggregation.
Each dimension corresponds to one of the parent bucket keys that lead
to this aggregation.
The values are gathered through the :link:`Aggregation.items` method.
So the matrix values are either the ``doc_count`` of the bucket
aggregation or the result of a ``metric`` or ``pipeline`` aggregation
that is inside one of the bucket aggregations.
.. CODE::
a = Search().agg_terms("color", field="color")
a = a.agg_terms("shape", field="shape")
...
names, keys, matrix = a.to_matrix()
names == ["color", "shape"]
keys == [["red", "green", "blue"], ["circle", "triangle"]]
matrix == [[23, 42], [84, 69], [4, 10]]
:param sort:
Can sort one or several keys/axises.
- ``True`` sorts all keys ascending
- ``"-"`` sorts all keys descending
- The **name of an aggregation** sorts it's keys ascending.
A "-" prefix sorts descending.
- An **integer** defines the aggregation by index.
Negative integers sort descending.
- A **sequence** of strings or integers can sort multiple keys
For example, `agg.to_matrix(sort=("color", "-shape", -4))` would
sort the ``color`` keys ascending, the ``shape`` keys descending and the
4th aggregation *-whatever that is-* descending.
:param default:
If not None any None-value will be replaced by this value
:param include: ``str | seq[str]``
One or more wildcard patterns that include matching keys.
All other keys are removed from the output.
:param exclude: ``str | seq[str]``
One or more wildcard patterns that exclude matching keys.
:return:
A tuple of **names**, **keys** and **matrix data**, each as list.
The **names** are the names of each aggregation that generates keys.
The **keys** are a list of lists, each corresponding to all the keys
of each parent aggregation.
**Data** is a list, with other nested lists for each further dimension,
containing the values of this aggregation.
Returns three empty lists if no data is available.
"""
from .visitor import Visitor
names = Visitor(self).key_names()
if isinstance(include, str):
include = [include]
if isinstance(exclude, str):
exclude = [exclude]
data_items = list(self.items(tuple_key=True, default=default))
if not data_items:
return [], [], []
data_keys, data_values = zip(*data_items)
num_dim = len(data_keys[0])
# collect keys for each dimension in the order of appearance
keys = [[] for _ in range(num_dim)]
for key in data_keys:
for i, k in enumerate(key):
if k not in keys[i]:
keys[i].append(k)
if sort:
if sort is True:
names_to_sort = names
elif isinstance(sort, str):
if sort == "-":
names_to_sort = [f"-{n}" for n in names]
else:
names_to_sort = [sort]
elif isinstance(sort, Iterable):
names_to_sort = sort
else:
raise TypeError(f"Invalid type {type(sort).__name__} for sort")
for n in reversed(names_to_sort):
if isinstance(n, str):
n, reverse = n.lstrip("-"), n.startswith("-")
try:
idx = names.index(n)
except IndexError:
raise IndexError(
f"Column '{n}' not found, available: {', '.join(names)}"
)
else:
idx, reverse = abs(n), n < 0
keys[idx].sort(reverse=reverse)
matrix = create_matrix(*(len(k) for k in keys), scalar=default)
for key, value in zip(data_keys, data_values):
m = matrix
for i in range(num_dim):
idx = keys[i].index(key[i])
if i == num_dim - 1:
m[idx] = value
else:
m = m[idx]
if include or exclude:
repeat = True
while repeat:
repeat = False
for dim, dim_keys in enumerate(keys):
for i, key in enumerate(dim_keys):
if not is_key_match(key, include, exclude):
dim_keys.pop(i)
remove_matrix_axis(matrix, dim, i)
repeat = True
break
if repeat:
break
return names, keys, matrix
def df_matrix(
self,
sort: Optional[Union[bool, str, int, Sequence[Union[str, int]]]] = None,
default: Optional[Any] = None,
include: Optional[Union[str, Sequence[str]]] = None,
exclude: Optional[Union[str, Sequence[str]]] = None,
):
"""
Returns a pandas DataFrame containing the matrix.
See `to_matrix` for details.
Only one- and two-dimensional matrices are supported.
:return:
pandas.DataFrame instance
:raises ValueError: If dimensions is 0 or above 2
"""
import pandas as pd
names, keys, matrix = self.to_matrix(
sort=sort,
default=default,
include=include,
exclude=exclude,
)
if len(keys) == 1:
df = pd.DataFrame(matrix, index=keys[0])
elif len(keys) == 2:
df = pd.DataFrame(matrix, index=keys[0], columns=keys[1])
else:
raise ValueError(
f"Can not convert matrix of dimension {len(keys)} to pandas DataFrame"
)
series = pd_series_to_datetime(df.index)
if series is not None:
df.index = series
if len(keys) == 2:
series = pd_series_to_datetime(df.columns)
if series is not None:
df.columns = series
return df
def is_key_match(key: str, include: Optional[Sequence], exclude: Optional[Sequence]):
if not include and not exclude:
return True
key = str(key)
if exclude:
for pattern in exclude:
if fnmatch.fnmatch(key, pattern):
return False
if include:
for pattern in include:
if fnmatch.fnmatch(key, pattern):
return True
return False
return True
def pd_series_to_datetime(series):
import pandas as pd
from pandas._libs.tslibs import OutOfBoundsDatetime
import numpy as np
from dateutil.parser import ParserError
if series.dtype == np.dtype("O"):
try:
return pd.to_datetime(series, format="%Y-%m-%dT%H:%M:%S.%fZ")
except (ValueError, TypeError, ParserError, OutOfBoundsDatetime):
pass
return None
| 34.531621 | 98 | 0.560522 |
794774412f0511a7497860f26e3a10fbf7a4eb28 | 662 | py | Python | old/find_solution.py | SPRACE/track-ml | 3af95fd014e98a5b11261dc5d618f34f82fdf84d | [
"MIT"
] | null | null | null | old/find_solution.py | SPRACE/track-ml | 3af95fd014e98a5b11261dc5d618f34f82fdf84d | [
"MIT"
] | 10 | 2019-04-15T21:44:31.000Z | 2020-08-26T21:05:00.000Z | old/find_solution.py | SPRACE/track-ml | 3af95fd014e98a5b11261dc5d618f34f82fdf84d | [
"MIT"
] | 4 | 2019-04-12T19:04:16.000Z | 2020-01-14T13:30:44.000Z | import random
import sys
sys.path.append('/home/silvio/github/track-ml-1/utils/')
from explorer_fake_tracks import *
err=0.3
for i in range(3):
err=err*0.1
err_string = "%.10f" % err
print(err_string)
with open('/data/ds') as f:
lines = random.sample(f.readlines(),1500)
with open('/data/dsRandom', 'w') as f:
for item in lines:
f.write("%s" % item)
filenames = ['/data/dsHeader', '/data/dsRandom']
with open('/data/dsEval', 'w') as outfile:
for fname in filenames:
with open(fname) as infile:
outfile.write(infile.read())
call_create_fake("/data/dsEval",err)
| 24.518519 | 56 | 0.601208 |
7947744e364c92791793423764444d3e20e4afc2 | 9,444 | py | Python | OPTIMAQS/controller/controller/luigsneumann_SM5_modified.py | jeremyforest/whole_optic_gui | 7af257e739da90b8ce8b2531aa1c520959fa7bff | [
"MIT"
] | null | null | null | OPTIMAQS/controller/controller/luigsneumann_SM5_modified.py | jeremyforest/whole_optic_gui | 7af257e739da90b8ce8b2531aa1c520959fa7bff | [
"MIT"
] | 6 | 2021-02-02T23:09:14.000Z | 2022-03-12T00:52:01.000Z | OPTIMAQS/controller/controller/luigsneumann_SM5_modified.py | jeremyforest/whole_optic_gui | 7af257e739da90b8ce8b2531aa1c520959fa7bff | [
"MIT"
] | null | null | null | """
Luigs And Neumann 5 controller from https://github.com/romainbrette/manipulator
Modified for python3 compatilibility and personnal tweaks as neededself.
Device class for the Luigs and Neumann SM-5 manipulator controller.
Adapted from Michael Graupner's LandNSM5 class.
"""
from serialdevice import SerialDevice
import serial
import binascii
import time
import struct
import warnings
from numpy import zeros
__all__ = ['LuigsNeumann_SM5']
verbose = True
class LuigsNeumann_SM5(SerialDevice):
def __init__(self, name=None):
# Note that the port name is arbitrary, it should be set or found out
SerialDevice.__init__(self, name)
# Open the serial port; 1 second time out
self.port.baudrate = 38400
self.port.bytesize = serial.EIGHTBITS
self.port.parity=serial.PARITY_NONE
self.port.stopbits=serial.STOPBITS_ONE
self.port.timeout=1 #None is blocking; 0 is non blocking
self.port.rts = True
self.port.dtr = True
self.port.xonxoff = False
self.port.rtscts = False
self.port.dsrdtr = False
#self.port.writeTimeout = 2
#self.port.inter_byte_timeout = 1
self.port.open()
self.port.flushInput()
self.port.flushOutput()
#import pdb; pdb.set_trace()
self.established_time = time.time()
self.establish_connection()
def send_command(self, ID, data, nbytes_answer, ack_ID=''):
'''
Send a command to the controller
'''
now = time.time()
if now - self.established_time > 3:
self.establish_connection()
self.established_time = now
high, low = self.CRC_16(data,len(data))
# Create hex-string to be sent
# <syn><ID><byte number>
send = '16' + ID + '%0.2X' % len(data)
print('hex string:' + send)
# <data>
# Loop over length of data to be sent
for i in range(len(data)):
send += '%0.2X' % data[i]
print('send data:' + send)
# <CRC>
send += '%0.2X%0.2X' % (high,low)
print('send data with crc:' + send)
# Convert hex string to bytes
sendbytes = binascii.unhexlify(send)
print('sendbytes:')
print(sendbytes)
expected = binascii.unhexlify('06' + ack_ID)
print('expected:')
print(expected)
self.port.write(sendbytes)
time.sleep(0.1)
answer = self.port.read(nbytes_answer+6)
# answer = self.port.readlines()
print('answer:')
print(answer)
if answer[:len(expected)] != expected :
warnings.warn('Did not get expected response for command with ID ' + ID +' ; resending')
# Resend
return self.send_command(ID, data, nbytes_answer, ack_ID)
return answer[4:4+nbytes_answer]
def establish_connection(self):
if verbose:
print ("establishing connection")
self.established_time = time.time()
self.send_command('0400', [], 0, ack_ID='040b')
if verbose:
print ("connection established")
def position(self, axis):
'''
Current position along an axis.
Parameters
----------
axis : axis number (starting at 1)
Returns
-------
The current position of the device axis in um.
'''
res = self.send_command('0101', [axis], 4)
return struct.unpack('f', res)[0]
def position_second_counter(self, axis):
'''
Current position along an axis.
Parameters
----------
axis : axis number (starting at 1)
Returns
-------
The current position of the device axis in um.
'''
res = self.send_command('0131', [axis], 4)
return struct.unpack('f', res)[0]
def absolute_move(self, x, axis):
'''
Moves the device axis to position x.
It uses the fast movement command.
Parameters
----------
axis: axis number (starting at 1)
x : target position in um.
speed : optional speed in um/s.
'''
x_hex = binascii.hexlify(struct.pack('>f', x))
data = [axis, int(x_hex[6:], 16), int(x_hex[4:6], 16), int(x_hex[2:4], 16), int(x_hex[:2], 16)]
# TODO: always goes fast (use 0049 for slow)
self.send_command('0048', data, 0)
def absolute_move_group(self, x, axes):
for i in range(len(x)):
self.absolute_move(x[i], axes[i])
time.sleep(0.05)
def relative_move(self, x, axis):
'''
Moves the device axis by relative amount x in um.
It uses the fast command.
Parameters
----------
axis: axis number
x : position shift in um.
'''
x_hex = binascii.hexlify(struct.pack('>f', x))
data = [axis, int(x_hex[6:], 16), int(x_hex[4:6], 16), int(x_hex[2:4], 16), int(x_hex[:2], 16)]
self.send_command('004A', data, 0)
def stop(self, axis):
"""
Stop current movements.
"""
self.send_command('00FF', [axis], 0)
def set_to_zero(self, axis):
"""
Set the current position of the axis as the zero position
:param axis:
:return:
"""
for axes in axis:
self.send_command('00f0', [axes], 0)
def set_to_zero_second_counter(self, axes):
"""
Set the current position of the axes as the zero position
:param axes:
:return:
"""
# # collection command does not seem to work...
# ID = 'A0F0'
# address = group_address(axes)
# self.send_command(ID, address, -1)
ID = '0132'
for axis in axes:
self.send_command(ID, [axis, 2], 0)
def go_to_zero(self, axis):
"""
Make axis go to zero position
:return:
"""
ID = '0024'
for axes in axis:
self.send_command(ID, [axes], 0)
def single_step(self, axis, steps):
'''
Moves the given axis using the StepIncrement or StepDecrement command.
Using a steps argument different from 1 (or -1) simply sends multiple
StepIncrement/StepDecrement commands.
Uses distance and velocity set by `set_single_step_distance` resp.
`set_single_step_velocity`.
'''
if steps > 0:
ID = '0140'
else:
ID = '0141'
for _ in range(int(abs(steps))):
self.send_command(ID, [axis], 0)
self.wait_motor_stop([axis])
def set_single_step_distance(self, axis, distance):
'''
Distance (in um) for `single_step`.
'''
if distance > 255:
print('Step distance too long, setting distance at 255um')
distance = 255
ID = '013a'
data = [axis] + list(bytearray(struct.pack('f', distance)))
self.send_command(ID, data, 0)
def set_ramp_length(self, axis, length):
"""
Set the ramp length for the chosen axis
:param axis: axis which ramp shall be changed
:param length: 0<length<=16
:return:
"""
self.send_command('003a', [axis, length], 0)
pass
def wait_motor_stop(self, axis):
"""
Wait for the motor to stop
:param axis:
:return:
"""
res = 1
while res:
res = self.send_command('0120', axis, 7)
res = int(binascii.hexlify(struct.unpack('s', res[6])[0])[1])
if __name__ == '__main__':
sm5 = LuigsNeumann_SM5('COM4')
print ('getting positions:')
for ax in range(1, 9):
print(ax, sm5.position(axis=ax))
time.sleep(2)
"""
print 'moving first manipulator (3 axes)'
sm5.relative_move_group([50, 50, 50], [1, 2, 3])
time.sleep(2)
print 'moving second manipulator (3 axes)'
sm5.relative_move_group([50, 50, 50], [4, 5, 6])
time.sleep(2)
print 'moving stage (2 axes)'
sm5.relative_move_group([50, 50], [7, 8])
"""
"""
Apparently: with two successive absolute moves, the second
cancels the first. With two successive relative moves, a sort of random
result is obtained, probably because the second cancels the first at midcourse.
"""
# for i in range(5):
# print (sm5.position(1))
# sm5.absolute_move(1000,1)
# time.sleep(1)
# print (sm5.position(1))
# sm5.absolute_move(1128,1)
# print (sm5.position(1))
# time.sleep(1)
#print ('getting positions')
#for ax in range(1, 9):
# print (ax, sm5.position(axis=ax))
#import keyboard # using module keyboard
#while True: # making a loop
# try: # used try so that if user pressed other than the given key error will not be shown
# if keyboard.is_pressed('a'):
# sm5.relative_move(10.,2)
# if keyboard.is_pressed('d'):
# sm5.relative_move(-10.,2)
# if keyboard.is_pressed('w'):
# sm5.relative_move(-10.,1)
# if keyboard.is_pressed('s'):
# sm5.relative_move(10.,1)
# if keyboard.is_pressed('r'):
# sm5.relative_move(10.,3)
# if keyboard.is_pressed('f'):
# sm5.relative_move(-10.,3)
# except:
# break # if user pressed a key other than the given key the loop break
| 29.148148 | 103 | 0.567238 |
7947746d6277d3e7b346273a345b775b5093ef54 | 1,024 | py | Python | prova/dashboard/views.py | mary023010/prova_django | a69a37a4f26f21018cef48e5d637dd630ca68877 | [
"MIT"
] | null | null | null | prova/dashboard/views.py | mary023010/prova_django | a69a37a4f26f21018cef48e5d637dd630ca68877 | [
"MIT"
] | null | null | null | prova/dashboard/views.py | mary023010/prova_django | a69a37a4f26f21018cef48e5d637dd630ca68877 | [
"MIT"
] | null | null | null | from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login
from django.contrib import messages
from django.template import context
from relazioni.models import *
# Create your views here.
'''def login_user(request):
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user is not None:
login(request,user)
messages.success(request, 'Login corretto!')
return redirect('selection_airport')
else:
messages.info(request, 'Username o Password errati!')
context = {}
return render(request,'dashboard/login.html',context)'''
def dashboard(request):
return render(request, 'dashboard/dashboard.html')
def visualizza_voli(request):
obj_ls = Fly.objects.all()
context = {'voli':obj_ls}
return render(request, 'dashboard/voli.html', context=context)
| 32 | 74 | 0.685547 |
794774c266428ad142b3455da8173b54a0414f2f | 69,667 | py | Python | tests/sonic_xcvr/test_cmis.py | sujinmkang/sonic-platform-common | ab5973504c7858759cfd4976b3995f0bc198f56d | [
"Apache-2.0"
] | null | null | null | tests/sonic_xcvr/test_cmis.py | sujinmkang/sonic-platform-common | ab5973504c7858759cfd4976b3995f0bc198f56d | [
"Apache-2.0"
] | null | null | null | tests/sonic_xcvr/test_cmis.py | sujinmkang/sonic-platform-common | ab5973504c7858759cfd4976b3995f0bc198f56d | [
"Apache-2.0"
] | 1 | 2021-06-30T20:53:15.000Z | 2021-06-30T20:53:15.000Z | from mock import MagicMock
import pytest
from sonic_platform_base.sonic_xcvr.api.public.cmis import CmisApi
from sonic_platform_base.sonic_xcvr.mem_maps.public.cmis import CmisMemMap
from sonic_platform_base.sonic_xcvr.xcvr_eeprom import XcvrEeprom
from sonic_platform_base.sonic_xcvr.codes.public.cmis import CmisCodes
from sonic_platform_base.sonic_xcvr.fields.consts import LENGTH_ASSEMBLY_FIELD, LEN_MULT_FIELD
class TestCmis(object):
codes = CmisCodes
mem_map = CmisMemMap(codes)
reader = MagicMock(return_value=None)
writer = MagicMock()
eeprom = XcvrEeprom(reader, writer, mem_map)
api = CmisApi(eeprom)
@pytest.mark.parametrize("mock_response, expected", [
("1234567890", "1234567890"),
("ABCD", "ABCD")
])
def test_get_model(self, mock_response, expected):
"""
Verify all api access valid fields
"""
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.return_value = mock_response
result = self.api.get_model()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
("0.0", "0.0"),
("1.2", "1.2")
])
def test_get_vendor_rev(self, mock_response, expected):
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.return_value = mock_response
result = self.api.get_vendor_rev()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
("100000000", "100000000")
])
def test_get_serial(self, mock_response, expected):
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.return_value = mock_response
result = self.api.get_serial()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
(
'QSFP-DD Double Density 8X Pluggable Transceiver',
'QSFP-DD Double Density 8X Pluggable Transceiver'
)
])
def test_get_module_type(self, mock_response, expected):
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.return_value = mock_response
result = self.api.get_module_type()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
("LC", "LC")
])
def test_get_connector_type(self, mock_response, expected):
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.return_value = mock_response
result = self.api.get_connector_type()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
([0, 1], '0.1')
])
def test_get_module_hardware_revision(self, mock_response, expected):
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.side_effect = mock_response
result = self.api.get_module_hardware_revision()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
([5,0], '5.0')
])
def test_get_cmis_rev(self, mock_response, expected):
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.side_effect = mock_response
result = self.api.get_cmis_rev()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
("ModuleReady", "ModuleReady")
])
def test_get_module_state(self, mock_response, expected):
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.return_value = mock_response
result = self.api.get_module_state()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
("No Fault detected", "No Fault detected")
])
def test_get_module_fault_cause(self, mock_response, expected):
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.return_value = mock_response
result = self.api.get_module_fault_cause()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
([0, 1], '0.1')
])
def test_get_module_active_firmware(self, mock_response, expected):
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.side_effect = mock_response
result = self.api.get_module_active_firmware()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
([0, 1], '0.1')
])
def test_get_module_inactive_firmware(self, mock_response, expected):
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.side_effect = mock_response
result = self.api.get_module_inactive_firmware()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
([True, 55.0], 55.0),
([False, 55.0], 'N/A'),
([True, None], None),
])
def test_get_module_temperature(self, mock_response, expected):
self.api.get_temperature_support = MagicMock()
self.api.get_temperature_support.return_value = mock_response[0]
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.return_value = mock_response[1]
result = self.api.get_module_temperature()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
([True, 3.0], 3.0),
([False, 3.0], 'N/A'),
([True, None], None),
])
def test_get_voltage(self, mock_response, expected):
self.api.get_voltage_support = MagicMock()
self.api.get_voltage_support.return_value = mock_response[0]
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.return_value = mock_response[1]
result = self.api.get_voltage()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
(False, False)
])
def test_is_flat_memory(self, mock_response, expected):
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.return_value = mock_response
result = self.api.is_flat_memory()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
(False, True)
])
def test_get_temperature_support(self, mock_response, expected):
self.api.is_flat_memory = MagicMock()
self.api.is_flat_memory.return_value = mock_response
result = self.api.get_temperature_support()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
(False, True)
])
def test_get_voltage_support(self, mock_response, expected):
self.api.is_flat_memory = MagicMock()
self.api.is_flat_memory.return_value = mock_response
result = self.api.get_voltage_support()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
(False, True)
])
def test_get_rx_los_support(self, mock_response, expected):
self.api.is_flat_memory = MagicMock()
self.api.is_flat_memory.return_value = mock_response
result = self.api.get_rx_los_support()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
(False, True)
])
def test_get_tx_cdr_lol_support(self, mock_response, expected):
self.api.is_flat_memory = MagicMock()
self.api.is_flat_memory.return_value = mock_response
result = self.api.get_tx_cdr_lol_support()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
([True, {'TxCDRLOL1': 0}], {'TxCDRLOL1': False}),
([False, {'TxCDRLOL1': 0}], ['N/A','N/A','N/A','N/A','N/A','N/A','N/A','N/A']),
([None, None], None),
([True, None], None)
])
def test_get_tx_cdr_lol(self, mock_response, expected):
self.api.get_tx_cdr_lol_support = MagicMock()
self.api.get_tx_cdr_lol_support.return_value = mock_response[0]
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.return_value = mock_response[1]
result = self.api.get_tx_cdr_lol()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
([True, {'RxLOS1': 0}], {'RxLOS1': False}),
([False, {'RxLOS1': 0}], ['N/A','N/A','N/A','N/A','N/A','N/A','N/A','N/A']),
([None, None], None),
([True, None], None)
])
def test_get_rx_los(self, mock_response, expected):
self.api.get_rx_los_support = MagicMock()
self.api.get_rx_los_support.return_value = mock_response[0]
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.return_value = mock_response[1]
result = self.api.get_rx_los()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
(False, True)
])
def test_get_rx_cdr_lol_support(self, mock_response, expected):
self.api.is_flat_memory = MagicMock()
self.api.is_flat_memory.return_value = mock_response
result = self.api.get_rx_cdr_lol_support()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
([True, {'RxCDRLOL1': 0}], {'RxCDRLOL1': False}),
([False, {'RxCDRLOL1': 0}], ['N/A','N/A','N/A','N/A','N/A','N/A','N/A','N/A']),
([None, None], None),
([True, None], None)
])
def test_get_rx_cdr_lol(self, mock_response, expected):
self.api.get_rx_cdr_lol_support = MagicMock()
self.api.get_rx_cdr_lol_support.return_value = mock_response[0]
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.return_value = mock_response[1]
result = self.api.get_rx_cdr_lol()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
([{'TxPowerHighAlarmFlag1':0}, {'TxPowerLowAlarmFlag1':0}, {'TxPowerHighWarnFlag1':0}, {'TxPowerLowWarnFlag1':0}],
{
'tx_power_high_alarm':{
'TxPowerHighAlarmFlag1': False
},
'tx_power_low_alarm':{
'TxPowerLowAlarmFlag1': False
},
'tx_power_high_warn':{
'TxPowerHighWarnFlag1': False,
},
'tx_power_low_warn':{
'TxPowerLowWarnFlag1': False
}
}),
([None, None, None, None], None)
])
def test_get_tx_power_flag(self, mock_response, expected):
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.side_effect = mock_response
result = self.api.get_tx_power_flag()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
([{'TxBiasHighAlarmFlag1':0}, {'TxBiasLowAlarmFlag1':0}, {'TxBiasHighWarnFlag1':0}, {'TxBiasLowWarnFlag1':0}],
{
'tx_bias_high_alarm':{
'TxBiasHighAlarmFlag1': False
},
'tx_bias_low_alarm':{
'TxBiasLowAlarmFlag1': False
},
'tx_bias_high_warn':{
'TxBiasHighWarnFlag1': False,
},
'tx_bias_low_warn':{
'TxBiasLowWarnFlag1': False
}
}),
([None, None, None, None], None)
])
def test_get_tx_bias_flag(self, mock_response, expected):
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.side_effect = mock_response
result = self.api.get_tx_bias_flag()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
([{'RxPowerHighAlarmFlag1':0}, {'RxPowerLowAlarmFlag1':0}, {'RxPowerHighWarnFlag1':0}, {'RxPowerLowWarnFlag1':0}],
{
'rx_power_high_alarm':{
'RxPowerHighAlarmFlag1': False
},
'rx_power_low_alarm':{
'RxPowerLowAlarmFlag1': False
},
'rx_power_high_warn':{
'RxPowerHighWarnFlag1': False,
},
'rx_power_low_warn':{
'RxPowerLowWarnFlag1': False
}
}),
([None, None, None, None], None)
])
def test_get_rx_power_flag(self, mock_response, expected):
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.side_effect = mock_response
result = self.api.get_rx_power_flag()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
({'TxOutputStatus1': 1}, {'TxOutputStatus1': True}),
(None, None),
])
def test_get_tx_output_status(self, mock_response, expected):
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.return_value = mock_response
result = self.api.get_tx_output_status()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
({'RxOutputStatus1': 1}, {'RxOutputStatus1': True}),
(None, None),
])
def test_get_rx_output_status(self, mock_response, expected):
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.return_value = mock_response
result = self.api.get_rx_output_status()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
(False, True)
])
def test_get_tx_bias_support(self, mock_response, expected):
self.api.is_flat_memory = MagicMock()
self.api.is_flat_memory.return_value = mock_response
result = self.api.get_tx_bias_support()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
([True, {'TxBias1': 0}], {'TxBias1': 0}),
([False, {'TxBias1': 0}], ['N/A','N/A','N/A','N/A','N/A','N/A','N/A','N/A']),
([None, None], None)
])
def test_get_tx_bias(self, mock_response, expected):
self.api.get_tx_bias_support = MagicMock()
self.api.get_tx_bias_support.return_value = mock_response[0]
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.return_value = mock_response[1]
result = self.api.get_tx_bias()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
(False, True)
])
def test_get_tx_power_support(self, mock_response, expected):
self.api.is_flat_memory = MagicMock()
self.api.is_flat_memory.return_value = mock_response
result = self.api.get_tx_power_support()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
([True, {'TxPower1': 0}], {'TxPower1': 0}),
([False, {'TxPower1': 0}], ['N/A','N/A','N/A','N/A','N/A','N/A','N/A','N/A']),
([None, None], None)
])
def test_get_tx_power(self, mock_response, expected):
self.api.get_tx_power_support = MagicMock()
self.api.get_tx_power_support.return_value = mock_response[0]
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.return_value = mock_response[1]
result = self.api.get_tx_power()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
(False, True)
])
def test_get_rx_power_support(self, mock_response, expected):
self.api.is_flat_memory = MagicMock()
self.api.is_flat_memory.return_value = mock_response
result = self.api.get_rx_power_support()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
([True, {'RxPower1': 0}], {'RxPower1': 0}),
([False, {'RxPower1': 0}], ['N/A','N/A','N/A','N/A','N/A','N/A','N/A','N/A']),
([None, None], None)
])
def test_get_rx_power(self, mock_response, expected):
self.api.get_rx_power_support = MagicMock()
self.api.get_rx_power_support.return_value = mock_response[0]
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.return_value = mock_response[1]
result = self.api.get_rx_power()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
([False, True], True)
])
def test_get_tx_fault_support(self, mock_response, expected):
self.api.is_flat_memory = MagicMock()
self.api.is_flat_memory.return_value = mock_response[0]
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.return_value = mock_response[1]
result = self.api.get_tx_fault_support()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
([True, {'TxFault1': 0}], {'TxFault1': False}),
([False, {'TxFault1': 0}], ['N/A','N/A','N/A','N/A','N/A','N/A','N/A','N/A']),
([None, None], None),
([True, None], None)
])
def test_get_tx_fault(self, mock_response, expected):
self.api.get_tx_fault_support = MagicMock()
self.api.get_tx_fault_support.return_value = mock_response[0]
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.return_value = mock_response[1]
result = self.api.get_tx_fault()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
(False, True)
])
def test_get_tx_los_support(self, mock_response, expected):
self.api.is_flat_memory = MagicMock()
self.api.is_flat_memory.return_value = mock_response
result = self.api.get_tx_los_support()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
([True, {'TxLOS1': 0}], {'TxLOS1': False}),
([False, {'TxLOS1': 0}], ['N/A','N/A','N/A','N/A','N/A','N/A','N/A','N/A']),
([None, None], None),
([True, None], None)
])
def test_get_tx_los(self, mock_response, expected):
self.api.get_tx_los_support = MagicMock()
self.api.get_tx_los_support.return_value = mock_response[0]
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.return_value = mock_response[1]
result = self.api.get_tx_los()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
([False, True], True)
])
def test_get_tx_disable_support(self, mock_response, expected):
self.api.is_flat_memory = MagicMock()
self.api.is_flat_memory.return_value = mock_response[0]
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.return_value = mock_response[1]
result = self.api.get_tx_disable_support()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
([True, 0x00], [False, False, False, False, False, False, False, False]),
([False, 0x00], ['N/A','N/A','N/A','N/A','N/A','N/A','N/A','N/A']),
([None, None], None),
([True, None], None)
])
def test_get_tx_disable(self, mock_response, expected):
self.api.get_tx_disable_support = MagicMock()
self.api.get_tx_disable_support.return_value = mock_response[0]
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.return_value = mock_response[1]
result = self.api.get_tx_disable()
assert result == expected
@pytest.mark.parametrize("input_param",[
(True), (False)
])
def test_tx_disable(self,input_param):
self.api.tx_disable(input_param)
@pytest.mark.parametrize("mock_response, expected", [
([True, 0x00], 0),
([False, 0x00], 'N/A'),
([None, None], None)
])
def test_get_tx_disable_channel(self, mock_response, expected):
self.api.get_tx_disable_support = MagicMock()
self.api.get_tx_disable_support.return_value = mock_response[0]
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.return_value = mock_response[1]
result = self.api.get_tx_disable_channel()
assert result == expected
@pytest.mark.parametrize("mock_response, input_param",[
(0, (0xff, True)),
(0, (0, True)),
(None, (0, False))
])
def test_tx_disable_channel(self, mock_response, input_param):
self.api.get_tx_disable_channel = MagicMock()
self.api.get_tx_disable_channel.return_value = mock_response
self.api.tx_disable_channel(*input_param)
def test_get_power_override(self):
self.api.get_power_override()
def test_set_power_override(self):
self.api.set_power_override(None, None)
@pytest.mark.parametrize("mock_response, expected", [
(False, True)
])
def test_get_transceiver_thresholds_support(self, mock_response, expected):
self.api.is_flat_memory = MagicMock()
self.api.is_flat_memory.return_value = mock_response
result = self.api.get_transceiver_thresholds_support()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
(None, False),
('Power Class 1', False),
('Power Class 8', True),
])
def test_get_lpmode_support(self, mock_response, expected):
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.return_value = mock_response
result = self.api.get_lpmode_support()
assert result == expected
def test_get_power_override_support(self, ):
result = self.api.get_power_override_support()
assert result == False
@pytest.mark.parametrize("mock_response, expected", [
("Single Mode Fiber (SMF)", "Single Mode Fiber (SMF)")
])
def test_get_module_media_type(self, mock_response, expected):
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.return_value = mock_response
result = self.api.get_module_media_type()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
("400GAUI-8 C2M (Annex 120E)", "400GAUI-8 C2M (Annex 120E)")
])
def test_get_host_electrical_interface(self, mock_response, expected):
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.return_value = mock_response
result = self.api.get_host_electrical_interface()
assert result == expected
@pytest.mark.parametrize("mock_response1, mock_response2, expected", [
("Single Mode Fiber (SMF)", "400ZR", "400ZR"),
("Multimode Fiber (MMF)", "100GE BiDi", "100GE BiDi"),
("Passive Copper Cable", "Copper cable", "Copper cable"),
("Active Cable Assembly", "Active Loopback module", "Active Loopback module"),
("BASE-T", "1000BASE-T (Clause 40)", "1000BASE-T (Clause 40)"),
("ABCD", "ABCD", "Unknown media interface")
])
def test_get_module_media_interface(self, mock_response1, mock_response2, expected):
self.api.get_module_media_type = MagicMock()
self.api.get_module_media_type.return_value = mock_response1
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.return_value = mock_response2
result = self.api.get_module_media_interface()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
('Copper cable', False),
('400ZR', True),
])
def test_is_coherent_module(self, mock_response, expected):
self.api.get_module_media_interface = MagicMock()
self.api.get_module_media_interface.return_value = mock_response
result = self.api.is_coherent_module()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
(8, 8)
])
def test_get_host_lane_count(self, mock_response, expected):
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.return_value = mock_response
result = self.api.get_host_lane_count()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
(1, 1)
])
def test_get_media_lane_count(self, mock_response, expected):
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.return_value = mock_response
result = self.api.get_media_lane_count()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
('C-band tunable laser', 'C-band tunable laser')
])
def test_get_media_interface_technology(self, mock_response, expected):
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.return_value = mock_response
result = self.api.get_media_interface_technology()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
(1, 1)
])
def test_get_host_lane_assignment_option(self, mock_response, expected):
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.return_value = mock_response
result = self.api.get_host_lane_assignment_option()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
(1, 1)
])
def test_get_media_lane_assignment_option(self, mock_response, expected):
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.return_value = mock_response
result = self.api.get_media_lane_assignment_option()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
({'ActiveAppSelLane1': 1},
{'ActiveAppSelLane1': 1})
])
def test_get_active_apsel_hostlane(self, mock_response, expected):
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.return_value = mock_response
result = self.api.get_active_apsel_hostlane()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
(-10, -10)
])
def test_get_tx_config_power(self, mock_response, expected):
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.return_value = mock_response
result = self.api.get_tx_config_power()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
(1, True),
(None, None),
])
def test_get_media_output_loopback(self, mock_response, expected):
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.return_value = mock_response
result = self.api.get_media_output_loopback()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
(1, True),
(None, None),
])
def test_get_media_input_loopback(self, mock_response, expected):
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.return_value = mock_response
result = self.api.get_media_input_loopback()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
(0x00, [False, False, False, False, False, False, False, False]),
(None, None),
])
def test_get_host_output_loopback(self, mock_response, expected):
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.return_value = mock_response
result = self.api.get_host_output_loopback()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
(0x00, [False, False, False, False, False, False, False, False]),
(None, None),
])
def test_get_host_input_loopback(self, mock_response, expected):
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.return_value = mock_response
result = self.api.get_host_input_loopback()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
(0xc2, (0,1,0)),
(None, None)
])
def test_get_aux_mon_type(self, mock_response, expected):
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.return_value = mock_response
result = self.api.get_aux_mon_type()
assert result == expected
@pytest.mark.parametrize("mock_response1, mock_response2, expected", [
(
[0,1,0],
[11520, 20480, -2560, 19200, 0],
{'monitor value': 45, 'high alarm': 80, 'low alarm': -10, 'high warn': 75, 'low warn': 0}
),
(
[0,0,0],
[11520, 20480, -2560, 19200, 0],
{'monitor value': 45, 'high alarm': 80, 'low alarm': -10, 'high warn': 75, 'low warn': 0}
),
(
[0,1,1],
[11520, 20480, -2560, 19200, 0],
None
),
])
def test_get_laser_temperature(self, mock_response1, mock_response2, expected):
self.api.get_aux_mon_type = MagicMock()
self.api.get_aux_mon_type.return_value = mock_response1
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.side_effect = mock_response2
result = self.api.get_laser_temperature()
assert result == expected
@pytest.mark.parametrize("mock_response1, mock_response2, expected", [
(
[0,1,0],
[32767, 65534, 0, 49150.5, 0],
{'monitor value': 1, 'high alarm': 2, 'low alarm': 0, 'high warn': 1.5, 'low warn': 0}
),
(
[1,0,0],
[32767, 65534, 0, 49150.5, 0],
{'monitor value': 1, 'high alarm': 2, 'low alarm': 0, 'high warn': 1.5, 'low warn': 0}
),
(
[0,0,0],
[32767, 65534, 0, 49150.5, 0],
None
),
])
def test_get_laser_TEC_current(self, mock_response1, mock_response2, expected):
self.api.get_aux_mon_type = MagicMock()
self.api.get_aux_mon_type.return_value = mock_response1
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.side_effect = mock_response2
result = self.api.get_laser_TEC_current()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
({'ConfigStatusLane1': 'ConfigSuccess'},
{'ConfigStatusLane1': 'ConfigSuccess'})
])
def test_get_config_datapath_hostlane_status(self, mock_response, expected):
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.return_value = mock_response
result = self.api.get_config_datapath_hostlane_status()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
({'DP1State': 'DataPathActivated'},
{'DP1State': 'DataPathActivated'})
])
def test_get_datapath_state(self, mock_response, expected):
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.return_value = mock_response
result = self.api.get_datapath_state()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
({'DPInitPending1': 0}, {'DPInitPending1': False}),
(None, None)
])
def test_get_dpinit_pending(self, mock_response, expected):
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.return_value = mock_response
result = self.api.get_dpinit_pending()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
([-20, 0], (-20,0))
])
def test_get_supported_power_config(self, mock_response, expected):
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.side_effect = mock_response
result = self.api.get_supported_power_config()
assert result == expected
def test_reset_module(self):
self.api.reset_module(True)
def test_set_low_power(self, ):
self.api.set_low_power(True)
@pytest.mark.parametrize("mock_response, expected", [
(127,
{
'simultaneous_host_media_loopback_supported': True,
'per_lane_media_loopback_supported': True,
'per_lane_host_loopback_supported': True,
'host_side_input_loopback_supported': True,
'host_side_output_loopback_supported': True,
'media_side_input_loopback_supported': True,
'media_side_output_loopback_supported': True
}),
(None, None)
])
def test_get_loopback_capability(self, mock_response, expected):
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.return_value = mock_response
result = self.api.get_loopback_capability()
assert result == expected
@pytest.mark.parametrize("input_param, mock_response",[
('none', {
'host_side_input_loopback_supported': True,
'host_side_output_loopback_supported': True,
'media_side_input_loopback_supported': True,
'media_side_output_loopback_supported': True
}),
('host-side-input', {
'host_side_input_loopback_supported': True,
'host_side_output_loopback_supported': True,
'media_side_input_loopback_supported': True,
'media_side_output_loopback_supported': True
}),
('host-side-output', {
'host_side_input_loopback_supported': True,
'host_side_output_loopback_supported': True,
'media_side_input_loopback_supported': True,
'media_side_output_loopback_supported': True
}),
('media-side-input', {
'host_side_input_loopback_supported': True,
'host_side_output_loopback_supported': True,
'media_side_input_loopback_supported': True,
'media_side_output_loopback_supported': True
}),
('media-side-output', {
'host_side_input_loopback_supported': True,
'host_side_output_loopback_supported': True,
'media_side_input_loopback_supported': True,
'media_side_output_loopback_supported': True
}),
])
def test_set_loopback_mode(self, input_param, mock_response):
self.api.get_loopback_capability = MagicMock()
self.api.get_loopback_capability.return_value = mock_response
self.api.set_loopback_mode(input_param)
def test_get_cdb_api(self):
self.api.get_cdb_api()
def test_get_vdm_api(self):
self.api.get_vdm_api()
@pytest.mark.parametrize("mock_response, expected",[
(
{'Pre-FEC BER Average Media Input': {1: [0.001, 0.0125, 0, 0.01, 0, False, False, False, False]}},
{'Pre-FEC BER Average Media Input': {1: [0.001, 0.0125, 0, 0.01, 0, False, False, False, False]}}
)
])
def test_get_vdm(self, mock_response, expected):
self.api.vdm = MagicMock()
self.api.vdm.get_vdm_allpage = MagicMock()
self.api.vdm.get_vdm_allpage.return_value = mock_response
result = self.api.get_vdm()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
(1, (False, False, True))
])
def test_get_module_firmware_fault_state_changed(self, mock_response, expected):
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.return_value = mock_response
result = self.api.get_module_firmware_fault_state_changed()
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
([0, 0, 0],
{
'voltage_flags': {
'voltage_high_alarm_flag': False,
'voltage_low_alarm_flag': False,
'voltage_high_warn_flag': False,
'voltage_low_warn_flag': False
},
'case_temp_flags': {
'case_temp_high_alarm_flag': False,
'case_temp_low_alarm_flag': False,
'case_temp_high_warn_flag': False,
'case_temp_low_warn_flag': False
},
'aux1_flags': {
'aux1_high_alarm_flag': False,
'aux1_low_alarm_flag': False,
'aux1_high_warn_flag': False,
'aux1_low_warn_flag': False
},
'aux2_flags': {
'aux2_high_alarm_flag': False,
'aux2_low_alarm_flag': False,
'aux2_high_warn_flag': False,
'aux2_low_warn_flag': False
},
'aux3_flags': {
'aux3_high_alarm_flag': False,
'aux3_low_alarm_flag': False,
'aux3_high_warn_flag': False,
'aux3_low_warn_flag': False
},
'custom_mon_flags': {
'custom_mon_high_alarm_flag': False,
'custom_mon_low_alarm_flag': False,
'custom_mon_high_warn_flag': False,
'custom_mon_low_warn_flag': False
}
}),
([None, None, None], None)
])
def test_get_module_level_flag(self, mock_response, expected):
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.side_effect = mock_response
result = self.api.get_module_level_flag()
assert result == expected
@pytest.mark.parametrize("input_param, mock_response1, mock_response2, expected", [
(
False,
[0x77, 0xff],
[18, 35, (0, 7, 112, 255, 255, 16, 0, 0, 19, 136, 0, 100, 3, 232, 19, 136, 58, 152)],
{'status':True, 'info': 'Auto page support: True\nMax write length: 2048\nStart payload size 112\nMax block size 2048\nWrite to EPL supported\nAbort CMD102h supported True\nGet module FW upgrade features time: 0.00 s\n', 'result': (112, 2048, False, True, 2048)}
),
(
False,
[0x77, 0xff],
[18, 35, (0, 7, 112, 255, 255, 1, 0, 0, 19, 136, 0, 100, 3, 232, 19, 136, 58, 152)],
{'status':True, 'info': 'Auto page support: True\nMax write length: 2048\nStart payload size 112\nMax block size 2048\nWrite to LPL supported\nAbort CMD102h supported True\nGet module FW upgrade features time: 0.00 s\n', 'result': (112, 2048, True, True, 2048)}
),
])
def test_get_module_fw_upgrade_feature(self, input_param, mock_response1, mock_response2, expected):
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.side_effect = mock_response1
self.api.cdb = MagicMock()
self.api.cdb.get_fw_management_features = MagicMock()
self.api.cdb.get_fw_management_features.return_value = mock_response2
self.api.cdb.cdb_chkcode = MagicMock()
self.api.cdb.cdb_chkcode.return_value = mock_response2[1]
result = self.api.get_module_fw_upgrade_feature(input_param)
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
(
[110, 26, (3, 3, 0, 0, 0, 1, 1, 4, 3, 0, 0, 100, 3, 232, 19, 136, 58, 152, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 4, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)],
{'status':True, 'info': 'Get module FW info\nImage A Version: 0.0.1\nImage B Version: 0.0.0\nRunning Image: A; Committed Image: A\nGet module FW info time: 0.00 s\n', 'result': ('0.0.1', 1, 1, 0, '0.0.0', 0, 0, 0)}
),
(
[110, 26, (48, 3, 0, 0, 0, 1, 1, 4, 3, 0, 0, 100, 3, 232, 19, 136, 58, 152, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 4, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)],
{'status':True, 'info': 'Get module FW info\nImage A Version: 0.0.1\nImage B Version: 0.0.0\nRunning Image: B; Committed Image: B\nGet module FW info time: 0.00 s\n', 'result': ('0.0.1', 0, 0, 0, '0.0.0', 1, 1, 0)}
),
])
def test_get_module_fw_info(self, mock_response, expected):
self.api.cdb = MagicMock()
self.api.cdb.get_fw_info = MagicMock()
self.api.cdb.get_fw_info.return_value = mock_response
self.api.cdb.cdb_chkcode = MagicMock()
self.api.cdb.cdb_chkcode.return_value = mock_response[1]
result = self.api.get_module_fw_info()
assert result == expected
@pytest.mark.parametrize("input_param, mock_response, expected", [
(1, 1, (True, 'Module FW run: Success\nModule FW run time: 0.00 s\n')),
(1, 64, (False, 'Module FW run: Fail\nFW_run_status 64\n')),
])
def test_module_fw_run(self, input_param, mock_response, expected):
self.api.cdb = MagicMock()
self.api.cdb.run_fw_image = MagicMock()
self.api.cdb.run_fw_image.return_value = mock_response
result = self.api.module_fw_run(input_param)
assert result == expected
@pytest.mark.parametrize("mock_response, expected", [
(1, (True, 'Module FW commit: Success\nModule FW commit time: 0.00 s\n')),
(64, (False, 'Module FW commit: Fail\nFW_commit_status 64\n')),
])
def test_module_fw_commit(self, mock_response, expected):
self.api.cdb = MagicMock()
self.api.cdb.commit_fw_image = MagicMock()
self.api.cdb.commit_fw_image.return_value = mock_response
result = self.api.module_fw_commit()
assert result == expected
@pytest.mark.parametrize("input_param, mock_response, expected", [
(
'abc',
[{'status': True, 'info': '', 'result': ('a', 1, 1, 0, 'b', 0, 0, 0)}, {'status': True, 'info': '', 'result': (112, 2048, True, True, 2048)}, (True, ''), (True, '')],
(True, '')
),
(
'abc',
[{'status': False, 'info': '', 'result': None}, {'status': True, 'info': '', 'result': (112, 2048, True, True, 2048)}, (True, ''), (True, '')],
(False, '')
),
(
'abc',
[{'status': True, 'info': '', 'result': ('a', 1, 1, 0, 'b', 0, 0, 0)}, {'status': False, 'info': '', 'result': None}, (True, ''), (True, '')],
(False, '')
),
(
'abc',
[{'status': True, 'info': '', 'result': ('a', 1, 1, 0, 'b', 0, 0, 0)}, {'status': True, 'info': '', 'result': (112, 2048, True, True, 2048)}, (False, ''), (True, '')],
(False, '')
),
])
def test_module_fw_upgrade(self, input_param, mock_response, expected):
self.api.get_module_fw_info = MagicMock()
self.api.get_module_fw_info.return_value = mock_response[0]
self.api.get_module_fw_upgrade_feature = MagicMock()
self.api.get_module_fw_upgrade_feature.return_value = mock_response[1]
self.api.module_fw_download = MagicMock()
self.api.module_fw_download.return_value = mock_response[2]
self.api.module_fw_switch = MagicMock()
self.api.module_fw_switch.return_value = mock_response[3]
result = self.api.module_fw_upgrade(input_param)
assert result == expected
@pytest.mark.parametrize("mock_response, expected",[
([None, None, None, None, None, None, None, None, None, None, None, None, None, None], None),
(
[
{
'Extended Identifier': {'Power Class': 'Power Class 8', 'MaxPower': 20.0},
'Identifier': 'QSFP-DD Double Density 8X Pluggable Transceiver',
'Identifier Abbreviation': 'QSFP-DD',
'ModuleHardwareMajorRevision': 0,
'ModuleHardwareMinorRevision': 0,
'VendorSN': '00000000',
'VendorName': 'VENDOR_NAME',
'VendorPN': 'ABCD',
'Connector': 'LC',
'Length Cable Assembly': 0.0,
'ModuleMediaType': 'Single Mode Fiber (SMF)',
'VendorDate': '21010100',
'VendorOUI': 'xx-xx-xx'
},
'400GAUI-8 C2M (Annex 120E)',
'400ZR, DWDM, amplified',
8, 1, 1, 1,
{'ActiveAppSelLane1': 1, 'ActiveAppSelLane2': 1, 'ActiveAppSelLane3': 1, 'ActiveAppSelLane4': 1,
'ActiveAppSelLane5': 1, 'ActiveAppSelLane6': 1, 'ActiveAppSelLane7': 1, 'ActiveAppSelLane8': 1},
'1550 nm DFB',
'0.0',
'5.0',
'0.1',
'0.0',
'Single Mode Fiber (SMF)'
],
{ 'type': 'QSFP-DD Double Density 8X Pluggable Transceiver',
'type_abbrv_name': 'QSFP-DD',
'model': 'ABCD',
'encoding': 'N/A',
'ext_identifier': 'Power Class 8 (20.0W Max)',
'ext_rateselect_compliance': 'N/A',
'cable_type': 'Length Cable Assembly(m)',
'cable_length': 0.0,
'nominal_bit_rate': 0,
'specification_compliance': 'Single Mode Fiber (SMF)',
'application_advertisement': 'N/A',
'active_firmware': '0.1',
'media_lane_count': 1,
'inactive_firmware': '0.0',
'vendor_rev': '0.0',
'host_electrical_interface': '400GAUI-8 C2M (Annex 120E)',
'vendor_oui': 'xx-xx-xx',
'manufacturer': 'VENDOR_NAME',
'media_interface_technology': '1550 nm DFB',
'media_interface_code': '400ZR, DWDM, amplified',
'serial': '00000000',
'host_lane_count': 8,
'active_apsel_hostlane1': 1,
'active_apsel_hostlane3': 1,
'active_apsel_hostlane2': 1,
'active_apsel_hostlane5': 1,
'active_apsel_hostlane4': 1,
'active_apsel_hostlane7': 1,
'active_apsel_hostlane6': 1,
'active_apsel_hostlane8': 1,
'hardware_rev': '0.0',
'cmis_rev': '5.0',
'media_lane_assignment_option': 1,
'connector': 'LC',
'host_lane_assignment_option': 1,
'vendor_date': '21010100'
}
)
])
def test_get_transceiver_info(self, mock_response, expected):
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.return_value = mock_response[0]
self.api.get_host_electrical_interface = MagicMock()
self.api.get_host_electrical_interface.return_value = mock_response[1]
self.api.get_module_media_interface = MagicMock()
self.api.get_module_media_interface.return_value = mock_response[2]
self.api.get_host_lane_count = MagicMock()
self.api.get_host_lane_count.return_value = mock_response[3]
self.api.get_media_lane_count = MagicMock()
self.api.get_media_lane_count.return_value = mock_response[4]
self.api.get_host_lane_assignment_option = MagicMock()
self.api.get_host_lane_assignment_option.return_value = mock_response[5]
self.api.get_media_lane_assignment_option = MagicMock()
self.api.get_media_lane_assignment_option.return_value = mock_response[6]
self.api.get_active_apsel_hostlane = MagicMock()
self.api.get_active_apsel_hostlane.return_value = mock_response[7]
self.api.get_media_interface_technology = MagicMock()
self.api.get_media_interface_technology.return_value = mock_response[8]
self.api.get_vendor_rev = MagicMock()
self.api.get_vendor_rev.return_value = mock_response[9]
self.api.get_cmis_rev = MagicMock()
self.api.get_cmis_rev.return_value = mock_response[10]
self.api.get_module_active_firmware = MagicMock()
self.api.get_module_active_firmware.return_value = mock_response[11]
self.api.get_module_inactive_firmware = MagicMock()
self.api.get_module_inactive_firmware.return_value = mock_response[12]
self.api.get_module_media_type = MagicMock()
self.api.get_module_media_type.return_value = mock_response[13]
result = self.api.get_transceiver_info()
assert result == expected
@pytest.mark.parametrize("mock_response, expected",[
(
[
{'RxLOS8': False, 'RxLOS2': False, 'RxLOS3': False, 'RxLOS1': False,
'RxLOS6': False, 'RxLOS7': False, 'RxLOS4': False, 'RxLOS5': False},
{'TxFault1': False, 'TxFault2': False, 'TxFault3': False, 'TxFault4': False,
'TxFault5': False, 'TxFault6': False, 'TxFault7': False, 'TxFault8': False},
[False, False, False, False, False, False, False, False],
0,
50,
3.3,
{'LaserBiasTx1Field': 70, 'LaserBiasTx2Field': 70,
'LaserBiasTx3Field': 70, 'LaserBiasTx4Field': 70,
'LaserBiasTx5Field': 70, 'LaserBiasTx6Field': 70,
'LaserBiasTx7Field': 70, 'LaserBiasTx8Field': 70},
{'OpticalPowerRx1Field': 0.1, 'OpticalPowerRx2Field': 0,
'OpticalPowerRx3Field': 0, 'OpticalPowerRx4Field': 0,
'OpticalPowerRx5Field': 0, 'OpticalPowerRx6Field': 0,
'OpticalPowerRx7Field': 0, 'OpticalPowerRx8Field': 0,},
{'OpticalPowerTx1Field': 0.1, 'OpticalPowerTx2Field': 0,
'OpticalPowerTx3Field': 0, 'OpticalPowerTx4Field': 0,
'OpticalPowerTx5Field': 0, 'OpticalPowerTx6Field': 0,
'OpticalPowerTx7Field': 0, 'OpticalPowerTx8Field': 0,},
True, True,
{'monitor value': 40},
{
'Pre-FEC BER Average Media Input':{1:[0.001, 0.0125, 0, 0.01, 0, False, False, False, False]},
'Errored Frames Average Media Input':{1:[0, 1, 0, 1, 0, False, False, False, False]},
}
],
{
'temperature': 50,
'voltage': 3.3,
'tx1power': 0.1, 'tx2power': 0, 'tx3power': 0, 'tx4power': 0,
'tx5power': 0, 'tx6power': 0, 'tx7power': 0, 'tx8power': 0,
'rx1power': 0.1, 'rx2power': 0, 'rx3power': 0, 'rx4power': 0,
'rx5power': 0, 'rx6power': 0, 'rx7power': 0, 'rx8power': 0,
'tx1bias': 70, 'tx2bias': 70, 'tx3bias': 70, 'tx4bias': 70,
'tx5bias': 70, 'tx6bias': 70, 'tx7bias': 70, 'tx8bias': 70,
'rx_los': False,
'tx_fault': False,
'tx_disable': False,
'tx_disabled_channel': 0,
'laser_temperature': 40,
'prefec_ber': 0.001,
'postfec_ber': 0,
}
)
])
def test_get_transceiver_bulk_status(self, mock_response, expected):
self.api.get_rx_los = MagicMock()
self.api.get_rx_los.return_value = mock_response[0]
self.api.get_tx_fault = MagicMock()
self.api.get_tx_fault.return_value = mock_response[1]
self.api.get_tx_disable = MagicMock()
self.api.get_tx_disable.return_value = mock_response[2]
self.api.get_tx_disable_channel = MagicMock()
self.api.get_tx_disable_channel.return_value = mock_response[3]
self.api.get_module_temperature = MagicMock()
self.api.get_module_temperature.return_value = mock_response[4]
self.api.get_voltage = MagicMock()
self.api.get_voltage.return_value = mock_response[5]
self.api.get_tx_bias = MagicMock()
self.api.get_tx_bias.return_value = mock_response[6]
self.api.get_rx_power = MagicMock()
self.api.get_rx_power.return_value = mock_response[7]
self.api.get_tx_power = MagicMock()
self.api.get_tx_power.return_value = mock_response[8]
self.api.get_rx_los_support = MagicMock()
self.api.get_rx_los_support.return_value = mock_response[9]
self.api.get_tx_fault_support = MagicMock()
self.api.get_tx_fault_support.return_value = mock_response[10]
self.api.get_laser_temperature = MagicMock()
self.api.get_laser_temperature.return_value = mock_response[11]
self.api.get_vdm = MagicMock()
self.api.get_vdm.return_value = mock_response[12]
result = self.api.get_transceiver_bulk_status()
assert result == expected
@pytest.mark.parametrize("mock_response, expected",[
(
[
True,
{
'TempHighAlarm': 80, 'TempLowAlarm': 0, 'TempHighWarning': 75, 'TempLowWarning': 10,
'VoltageHighAlarm': 3.5, 'VoltageLowAlarm': 3.1, 'VoltageHighWarning': 3.45, 'VoltageLowWarning': 3.15,
'RxPowerHighAlarm': 1.0, 'RxPowerLowAlarm': 0.01, 'RxPowerHighWarning': 1.0, 'RxPowerLowWarning': 0.01,
'TxPowerHighAlarm': 1.0, 'TxPowerLowAlarm': 0.01, 'TxPowerHighWarning': 1.0, 'TxPowerLowWarning': 0.01,
'TxHighAlarm': 90, 'TxLowAlarm': 10, 'TxHighWarning': 80, 'TxLowWarning': 20,
},
{'high alarm': 80, 'low alarm': 10, 'high warn': 75, 'low warn': 20},
{
'Pre-FEC BER Average Media Input':{1:[0.001, 0.0125, 0, 0.01, 0, False, False, False, False]},
'Errored Frames Average Media Input':{1:[0, 1, 0, 1, 0, False, False, False, False]},
}
],
{
'temphighalarm': 80, 'templowalarm': 0, 'temphighwarning': 75, 'templowwarning': 10,
'vcchighalarm': 3.5, 'vcclowalarm': 3.1, 'vcchighwarning': 3.45, 'vcclowwarning': 3.15,
'txpowerhighalarm': 0.0, 'txpowerlowalarm': -20.0, 'txpowerhighwarning': 0.0, 'txpowerlowwarning': -20.0,
'rxpowerhighalarm': 0.0, 'rxpowerlowalarm': -20.0, 'rxpowerhighwarning': 0.0, 'rxpowerlowwarning': -20.0,
'txbiashighalarm': 90, 'txbiaslowalarm': 10, 'txbiashighwarning': 80, 'txbiaslowwarning': 20,
'lasertemphighalarm': 80, 'lasertemplowalarm': 10, 'lasertemphighwarning': 75, 'lasertemplowwarning': 20,
'prefecberhighalarm': 0.0125, 'prefecberlowalarm': 0, 'prefecberhighwarning': 0.01, 'prefecberlowwarning': 0,
'postfecberhighalarm': 1, 'postfecberlowalarm': 0, 'postfecberhighwarning': 1, 'postfecberlowwarning': 0,
}
),
([None, None, None, None], None),
(
[False, None, None, None],
{
'temphighalarm': 'N/A', 'templowalarm': 'N/A', 'temphighwarning': 'N/A', 'templowwarning': 'N/A',
'vcchighalarm': 'N/A', 'vcclowalarm': 'N/A', 'vcchighwarning': 'N/A', 'vcclowwarning': 'N/A',
'txpowerhighalarm': 'N/A', 'txpowerlowalarm': 'N/A', 'txpowerhighwarning': 'N/A', 'txpowerlowwarning': 'N/A',
'rxpowerhighalarm': 'N/A', 'rxpowerlowalarm': 'N/A', 'rxpowerhighwarning': 'N/A', 'rxpowerlowwarning': 'N/A',
'txbiashighalarm': 'N/A', 'txbiaslowalarm': 'N/A', 'txbiashighwarning': 'N/A', 'txbiaslowwarning': 'N/A',
}
),
([True, None, None, None], None)
])
def test_get_transceiver_threshold_info(self, mock_response, expected):
self.api.get_transceiver_thresholds_support = MagicMock()
self.api.get_transceiver_thresholds_support.return_value = mock_response[0]
self.api.xcvr_eeprom.read = MagicMock()
self.api.xcvr_eeprom.read.return_value = mock_response[1]
self.api.get_laser_temperature = MagicMock()
self.api.get_laser_temperature.return_value = mock_response[2]
self.api.get_vdm = MagicMock()
self.api.get_vdm.return_value = mock_response[3]
result = self.api.get_transceiver_threshold_info()
assert result == expected
@pytest.mark.parametrize("mock_response, expected",[
(
[
'ModuleReady', 'No Fault detected', (False, False, True),
{'DP1State': 'DataPathActivated', 'DP2State': 'DataPathActivated',
'DP3State': 'DataPathActivated', 'DP4State': 'DataPathActivated',
'DP5State': 'DataPathActivated', 'DP6State': 'DataPathActivated',
'DP7State': 'DataPathActivated', 'DP8State': 'DataPathActivated'},
{'TxOutputStatus1': True},
{
'RxOutputStatus1': True, 'RxOutputStatus2': True,
'RxOutputStatus3': True, 'RxOutputStatus4': True,
'RxOutputStatus5': True, 'RxOutputStatus6': True,
'RxOutputStatus7': True, 'RxOutputStatus8': True
},
{'TxFault1': False},
{
'TxLOS1': False, 'TxLOS2': False, 'TxLOS3': False, 'TxLOS4': False,
'TxLOS5': False, 'TxLOS6': False, 'TxLOS7': False, 'TxLOS8': False
},
{
'TxCDRLOL1': False, 'TxCDRLOL2': False, 'TxCDRLOL3': False, 'TxCDRLOL4': False,
'TxCDRLOL5': False, 'TxCDRLOL6': False, 'TxCDRLOL7': False, 'TxCDRLOL8': False
},
{'RxLOS1': False},
{'RxCDRLOL1': False},
{
'ConfigStatusLane1': 'ConfigSuccess', 'ConfigStatusLane2': 'ConfigSuccess',
'ConfigStatusLane3': 'ConfigSuccess', 'ConfigStatusLane4': 'ConfigSuccess',
'ConfigStatusLane5': 'ConfigSuccess', 'ConfigStatusLane6': 'ConfigSuccess',
'ConfigStatusLane7': 'ConfigSuccess', 'ConfigStatusLane8': 'ConfigSuccess'
},
{
'DPInitPending1': False, 'DPInitPending2': False,
'DPInitPending3': False, 'DPInitPending4': False,
'DPInitPending5': False, 'DPInitPending6': False,
'DPInitPending7': False, 'DPInitPending8': False
},
{
'case_temp_flags': {
'case_temp_high_alarm_flag': False,
'case_temp_low_alarm_flag': False,
'case_temp_high_warn_flag': False,
'case_temp_low_warn_flag': False,
},
'voltage_flags': {
'voltage_high_alarm_flag': False,
'voltage_low_alarm_flag': False,
'voltage_high_warn_flag': False,
'voltage_low_warn_flag': False,
},
'aux1_flags': {
'aux1_high_alarm_flag': False,
'aux1_low_alarm_flag': False,
'aux1_high_warn_flag': False,
'aux1_low_warn_flag': False,
},
'aux2_flags': {
'aux2_high_alarm_flag': False,
'aux2_low_alarm_flag': False,
'aux2_high_warn_flag': False,
'aux2_low_warn_flag': False,
},
'aux3_flags': {
'aux3_high_alarm_flag': False,
'aux3_low_alarm_flag': False,
'aux3_high_warn_flag': False,
'aux3_low_warn_flag': False,
}
},
(0, 0, 0),
{
'tx_power_high_alarm': {'TxPowerHighAlarmFlag1': False},
'tx_power_low_alarm': {'TxPowerLowAlarmFlag1': False},
'tx_power_high_warn': {'TxPowerHighWarnFlag1': False},
'tx_power_low_warn': {'TxPowerLowWarnFlag1': False},
},
{
'rx_power_high_alarm': {'RxPowerHighAlarmFlag1': False},
'rx_power_low_alarm': {'RxPowerLowAlarmFlag1': False},
'rx_power_high_warn': {'RxPowerHighWarnFlag1': False},
'rx_power_low_warn': {'RxPowerLowWarnFlag1': False},
},
{
'tx_bias_high_alarm': {'TxBiasHighAlarmFlag1': False},
'tx_bias_low_alarm': {'TxBiasLowAlarmFlag1': False},
'tx_bias_high_warn': {'TxBiasHighWarnFlag1': False},
'tx_bias_low_warn': {'TxBiasLowWarnFlag1': False},
},
{
'Pre-FEC BER Average Media Input':{1:[0.001, 0.0125, 0, 0.01, 0, False, False, False, False]},
'Errored Frames Average Media Input':{1:[0, 1, 0, 1, 0, False, False, False, False]},
}
],
{
'module_state': 'ModuleReady',
'module_fault_cause': 'No Fault detected',
'datapath_firmware_fault': False,
'module_firmware_fault': False,
'module_state_changed': True,
'DP1State': 'DataPathActivated',
'DP2State': 'DataPathActivated',
'DP3State': 'DataPathActivated',
'DP4State': 'DataPathActivated',
'DP5State': 'DataPathActivated',
'DP6State': 'DataPathActivated',
'DP7State': 'DataPathActivated',
'DP8State': 'DataPathActivated',
'txoutput_status': True,
'rxoutput_status_hostlane1': True,
'rxoutput_status_hostlane2': True,
'rxoutput_status_hostlane3': True,
'rxoutput_status_hostlane4': True,
'rxoutput_status_hostlane5': True,
'rxoutput_status_hostlane6': True,
'rxoutput_status_hostlane7': True,
'rxoutput_status_hostlane8': True,
'txfault': False,
'txlos_hostlane1': False,
'txlos_hostlane2': False,
'txlos_hostlane3': False,
'txlos_hostlane4': False,
'txlos_hostlane5': False,
'txlos_hostlane6': False,
'txlos_hostlane7': False,
'txlos_hostlane8': False,
'txcdrlol_hostlane1': False,
'txcdrlol_hostlane2': False,
'txcdrlol_hostlane3': False,
'txcdrlol_hostlane4': False,
'txcdrlol_hostlane5': False,
'txcdrlol_hostlane6': False,
'txcdrlol_hostlane7': False,
'txcdrlol_hostlane8': False,
'rxlos': False,
'rxcdrlol': False,
'config_state_hostlane1': 'ConfigSuccess',
'config_state_hostlane2': 'ConfigSuccess',
'config_state_hostlane3': 'ConfigSuccess',
'config_state_hostlane4': 'ConfigSuccess',
'config_state_hostlane5': 'ConfigSuccess',
'config_state_hostlane6': 'ConfigSuccess',
'config_state_hostlane7': 'ConfigSuccess',
'config_state_hostlane8': 'ConfigSuccess',
'dpinit_pending_hostlane1': False,
'dpinit_pending_hostlane2': False,
'dpinit_pending_hostlane3': False,
'dpinit_pending_hostlane4': False,
'dpinit_pending_hostlane5': False,
'dpinit_pending_hostlane6': False,
'dpinit_pending_hostlane7': False,
'dpinit_pending_hostlane8': False,
'temphighalarm_flag': False, 'templowalarm_flag': False,
'temphighwarning_flag': False, 'templowwarning_flag': False,
'vcchighalarm_flag': False, 'vcclowalarm_flag': False,
'vcchighwarning_flag': False, 'vcclowwarning_flag': False,
'lasertemphighalarm_flag': False, 'lasertemplowalarm_flag': False,
'lasertemphighwarning_flag': False, 'lasertemplowwarning_flag': False,
'txpowerhighalarm_flag': False, 'txpowerlowalarm_flag': False,
'txpowerhighwarning_flag': False, 'txpowerlowwarning_flag': False,
'rxpowerhighalarm_flag': False, 'rxpowerlowalarm_flag': False,
'rxpowerhighwarning_flag': False, 'rxpowerlowwarning_flag': False,
'txbiashighalarm_flag': False, 'txbiaslowalarm_flag': False,
'txbiashighwarning_flag': False, 'txbiaslowwarning_flag': False,
'prefecberhighalarm_flag': False, 'prefecberlowalarm_flag': False,
'prefecberhighwarning_flag': False, 'prefecberlowwarning_flag': False,
'postfecberhighalarm_flag': False, 'postfecberlowalarm_flag': False,
'postfecberhighwarning_flag': False, 'postfecberlowwarning_flag': False,
}
)
])
def test_get_transceiver_status(self, mock_response, expected):
self.api.get_module_state = MagicMock()
self.api.get_module_state.return_value = mock_response[0]
self.api.get_module_fault_cause = MagicMock()
self.api.get_module_fault_cause.return_value = mock_response[1]
self.api.get_module_firmware_fault_state_changed = MagicMock()
self.api.get_module_firmware_fault_state_changed.return_value = mock_response[2]
self.api.get_datapath_state = MagicMock()
self.api.get_datapath_state.return_value = mock_response[3]
self.api.get_tx_output_status = MagicMock()
self.api.get_tx_output_status.return_value = mock_response[4]
self.api.get_rx_output_status = MagicMock()
self.api.get_rx_output_status.return_value = mock_response[5]
self.api.get_tx_fault = MagicMock()
self.api.get_tx_fault.return_value = mock_response[6]
self.api.get_tx_los = MagicMock()
self.api.get_tx_los.return_value = mock_response[7]
self.api.get_tx_cdr_lol = MagicMock()
self.api.get_tx_cdr_lol.return_value = mock_response[8]
self.api.get_rx_los = MagicMock()
self.api.get_rx_los.return_value = mock_response[9]
self.api.get_rx_cdr_lol = MagicMock()
self.api.get_rx_cdr_lol.return_value = mock_response[10]
self.api.get_config_datapath_hostlane_status = MagicMock()
self.api.get_config_datapath_hostlane_status.return_value = mock_response[11]
self.api.get_dpinit_pending = MagicMock()
self.api.get_dpinit_pending.return_value = mock_response[12]
self.api.get_module_level_flag = MagicMock()
self.api.get_module_level_flag.return_value = mock_response[13]
self.api.get_aux_mon_type = MagicMock()
self.api.get_aux_mon_type.return_value = mock_response[14]
self.api.get_tx_power_flag = MagicMock()
self.api.get_tx_power_flag.return_value = mock_response[15]
self.api.get_rx_power_flag = MagicMock()
self.api.get_rx_power_flag.return_value = mock_response[16]
self.api.get_tx_bias_flag = MagicMock()
self.api.get_tx_bias_flag.return_value = mock_response[17]
self.api.get_vdm = MagicMock()
self.api.get_vdm.return_value = mock_response[18]
result = self.api.get_transceiver_status()
assert result == expected
@pytest.mark.parametrize("mock_response, expected",[
(
[
False,
False,
[False, False, False, False, False, False, False, False],
[False, False, False, False, False, False, False, False]
],
{
'media_output_loopback': False,
'media_input_loopback': False,
'host_output_loopback_lane1': False,
'host_output_loopback_lane2': False,
'host_output_loopback_lane3': False,
'host_output_loopback_lane4': False,
'host_output_loopback_lane5': False,
'host_output_loopback_lane6': False,
'host_output_loopback_lane7': False,
'host_output_loopback_lane8': False,
'host_input_loopback_lane1': False,
'host_input_loopback_lane2': False,
'host_input_loopback_lane3': False,
'host_input_loopback_lane4': False,
'host_input_loopback_lane5': False,
'host_input_loopback_lane6': False,
'host_input_loopback_lane7': False,
'host_input_loopback_lane8': False
}
)
])
def test_get_transceiver_loopback(self, mock_response, expected):
self.api.get_media_output_loopback = MagicMock()
self.api.get_media_output_loopback.return_value = mock_response[0]
self.api.get_media_input_loopback = MagicMock()
self.api.get_media_input_loopback.return_value = mock_response[1]
self.api.get_host_output_loopback = MagicMock()
self.api.get_host_output_loopback.return_value = mock_response[2]
self.api.get_host_input_loopback = MagicMock()
self.api.get_host_input_loopback.return_value = mock_response[3]
result = self.api.get_transceiver_loopback()
assert result == expected
def test_cable_len(self):
cable_len_field = self.mem_map.get_field(LENGTH_ASSEMBLY_FIELD)
data = bytearray([0xFF])
dep = {LEN_MULT_FIELD: 0b11}
decoded = cable_len_field.decode(data, **dep)
assert decoded == 6300
| 45.924192 | 365 | 0.60248 |
794776090c22d0f8791d86196281e9728e81d84b | 1,592 | py | Python | multi_task/loss/build.py | mydkzgj/esm | 2170d436af021188f233fa88a233959c61bd1f23 | [
"MIT"
] | null | null | null | multi_task/loss/build.py | mydkzgj/esm | 2170d436af021188f233fa88a233959c61bd1f23 | [
"MIT"
] | null | null | null | multi_task/loss/build.py | mydkzgj/esm | 2170d436af021188f233fa88a233959c61bd1f23 | [
"MIT"
] | null | null | null | # encoding: utf-8
"""
@author: Jiayang Chen
@contact: [email protected]
"""
# Existing Losses
from torch.nn import CrossEntropyLoss
# Custom Losses (None)
from .secondary_structure_prediction_loss import SecondaryStructurePredictionLoss
from .contact_prediction_loss import ContactPredictionLoss
def build_loss(cfg):
"""
:param cfg: primarily base on the LOSS.TYPE of cfg.
:return: two dicts for losses (class & function)
"""
lossNames = cfg.LOSS.TYPE.split(" ")
# build loss class (most of time is useless, in case of need)
loss_classes = {}
for lossName in lossNames:
if lossName == "contact_prediction_loss":
loss_classes[lossName] = ContactPredictionLoss()
elif lossName == "secondary_structure_prediction_loss":
loss_classes[lossName] = SecondaryStructurePredictionLoss()
else:
raise Exception('Unexpected LOSS_TYPE arise: {}'.format(cfg.LOSS.TYPE))
# build loss func
def loss_func(pd_contacts=None, gt_contacts=None, pd_structure2s=None, gt_structure2s=None):
losses = {}
for lossName in lossNames:
if lossName == "contact_prediction_loss":
losses[lossName] = loss_classes[lossName](pd_contacts, gt_contacts)
elif lossName == "secondary_structure_prediction_loss":
losses[lossName] = loss_classes[lossName](pd_structure2s, gt_structure2s)
else:
raise Exception('Unexpected LOSS_TYPE arise: {}'.format(cfg.LOSS.TYPE))
return losses
return loss_func, loss_classes
| 35.377778 | 96 | 0.684673 |
79477611f1ef7d8f1d60080ae44aaa1c7eb4c9be | 2,066 | py | Python | tests/examples/test_examples.py | alvin-chang/lightning-flash | 481d4d369ff0a5d8c2b2d9e4970c5608a92b3ff5 | [
"Apache-2.0"
] | null | null | null | tests/examples/test_examples.py | alvin-chang/lightning-flash | 481d4d369ff0a5d8c2b2d9e4970c5608a92b3ff5 | [
"Apache-2.0"
] | null | null | null | tests/examples/test_examples.py | alvin-chang/lightning-flash | 481d4d369ff0a5d8c2b2d9e4970c5608a92b3ff5 | [
"Apache-2.0"
] | null | null | null | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
import sys
from pathlib import Path
from typing import List, Optional, Tuple
import pytest
root = Path(__file__).parent.parent.parent
def call_script(
filepath: str,
args: Optional[List[str]] = None,
timeout: Optional[int] = 60 * 5,
) -> Tuple[int, str, str]:
if args is None:
args = []
args = [str(a) for a in args]
command = [sys.executable, filepath] + args
print(" ".join(command))
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
stdout, stderr = p.communicate(timeout=timeout)
except subprocess.TimeoutExpired:
p.kill()
stdout, stderr = p.communicate()
stdout = stdout.decode("utf-8")
stderr = stderr.decode("utf-8")
return p.returncode, stdout, stderr
def run_test(filepath):
code, stdout, stderr = call_script(filepath)
print(f"{filepath} STDOUT: {stdout}")
print(f"{filepath} STDERR: {stderr}")
assert not code
@pytest.mark.parametrize(
"step,file",
[
("finetuning", "image_classification.py"),
("finetuning", "tabular_classification.py"),
("predict", "classify_image.py"),
("predict", "classify_tabular.py"),
# "classify_text.py" TODO: takes too long
]
)
def test_finetune_example(tmpdir, step, file):
run_test(str(root / "flash_examples" / step / file))
def test_generic_example(tmpdir):
run_test(str(root / "flash_examples" / "generic_task.py"))
| 30.382353 | 81 | 0.684414 |
794776544998bef81e3c6cac4815148976037ea5 | 118,576 | py | Python | tensorflow/python/training/saver_test.py | sorhus/tensorflow | 99de1826646c8d354259187fc9c2330b794c1ac4 | [
"Apache-2.0"
] | 1 | 2018-09-15T21:31:52.000Z | 2018-09-15T21:31:52.000Z | tensorflow/python/training/saver_test.py | sorhus/tensorflow | 99de1826646c8d354259187fc9c2330b794c1ac4 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/training/saver_test.py | sorhus/tensorflow | 99de1826646c8d354259187fc9c2330b794c1ac4 | [
"Apache-2.0"
] | 1 | 2021-08-30T22:33:25.000Z | 2021-08-30T22:33:25.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for tensorflow.python.training.saver.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import math
import os
import random
import shutil
import tempfile
import time
import numpy as np
import six
from google.protobuf.any_pb2 import Any
from google.protobuf import text_format
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import queue_runner_pb2
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import function
from tensorflow.python.framework import graph_io
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops as ops_lib
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary import summary
from tensorflow.python.training import adam
from tensorflow.python.training import checkpointable
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import queue_runner_impl
from tensorflow.python.training import saver as saver_module
from tensorflow.python.training import saver_test_utils
from tensorflow.python.training.checkpoint_state_pb2 import CheckpointState
from tensorflow.python.util import compat
@test_util.with_c_api
class SaverTest(test.TestCase):
def basicSaveRestore(self, variable_op):
save_path = os.path.join(self.get_temp_dir(), "basic_save_restore")
with self.test_session(graph=ops_lib.Graph()) as sess:
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variable_op(10.0, name="v0")
v1 = variable_op(20.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
v2_init = v2.insert("k1", 30.0)
# Initialize all variables
if context.in_graph_mode():
self.evaluate([variables.global_variables_initializer(), v2_init])
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
self.assertEqual(b"k1", self.evaluate(v2.keys()))
self.assertEqual(30.0, self.evaluate(v2.values()))
# Save the initialized values in the file at "save_path"
save = saver_module.Saver(
{
"v0": v0,
"v1": v1,
"v2": v2.saveable
}, restore_sequentially=True)
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
# Start a second session. In that session the parameter nodes
# have not been initialized either.
with self.test_session(graph=ops_lib.Graph()) as sess:
v0 = variable_op(-1.0, name="v0")
v1 = variable_op(-1.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
# Assert that the variables are not initialized.
if context.in_graph_mode():
self.assertEqual(
len(variables.report_uninitialized_variables().eval()), 2)
self.assertEqual(0, len(v2.keys().eval()))
self.assertEqual(0, len(v2.values().eval()))
# Restore the saved values in the parameter nodes.
save = saver_module.Saver({"v0": v0, "v1": v1, "v2": v2.saveable})
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
self.assertEqual(b"k1", self.evaluate(v2.keys()))
self.assertEqual(30.0, self.evaluate(v2.values()))
# Build another graph with 2 nodes, initialized
# differently, and a Restore node for them.
with self.test_session(graph=ops_lib.Graph()) as sess:
v0_2 = variable_op(1000.0, name="v0")
v1_2 = variable_op(2000.0, name="v1")
v2_2 = saver_test_utils.CheckpointedOp(name="v2")
v2_init = v2_2.insert("k1000", 3000.0)
# Check that the parameter nodes have been initialized.
if context.in_graph_mode():
init_all_op = [variables.global_variables_initializer(), v2_init]
self.evaluate(init_all_op)
# TODO(xpan): Why _mutable_hash_table_v2 doesn't create empty
# table as it claims in eager mode?
self.assertEqual(b"k1000", self.evaluate(v2_2.keys()))
self.assertEqual(3000.0, self.evaluate(v2_2.values()))
self.assertEqual(1000.0, self.evaluate(v0_2))
self.assertEqual(2000.0, self.evaluate(v1_2))
# Restore the values saved earlier in the parameter nodes.
save2 = saver_module.Saver({"v0": v0_2, "v1": v1_2, "v2": v2_2.saveable})
save2.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, self.evaluate(v0_2))
self.assertEqual(20.0, self.evaluate(v1_2))
self.assertEqual(b"k1", self.evaluate(v2_2.keys()))
self.assertEqual(30.0, self.evaluate(v2_2.values()))
def testBasic(self):
self.basicSaveRestore(variables.Variable)
@test_util.run_in_graph_and_eager_modes()
def testResourceBasic(self):
self.basicSaveRestore(resource_variable_ops.ResourceVariable)
def testResourceVariableReadOpsAddedDeterministically(self):
graph_defs = []
num_graphs = 10
for _ in range(num_graphs):
with ops_lib.Graph().as_default() as g:
for i in range(20):
resource_variable_ops.ResourceVariable(i, name="var%s" % i)
saver_module.Saver()
graph_defs.append(g.as_graph_def())
for i in range(num_graphs - 1):
self.assertEqual(graph_defs[i], graph_defs[i + 1])
def testEagerBasic(self):
with context.eager_mode():
ckpt_prefix = os.path.join(self.get_temp_dir(), "ckpt")
v1 = resource_variable_ops.ResourceVariable(3.14, name="v1")
v2 = resource_variable_ops.ResourceVariable([1, 2], name="v2")
save = saver_module.Saver([v1, v2])
save.save(None, ckpt_prefix)
v1.assign(0.0)
v2.assign([0, 0])
self.assertNear(0.0, self.evaluate(v1), 1e-5)
self.assertAllEqual([0, 0], self.evaluate(v2))
save.restore(None, ckpt_prefix)
self.assertNear(3.14, self.evaluate(v1), 1e-5)
self.assertAllEqual([1, 2], self.evaluate(v2))
def testEagerGraphCompatibility(self):
# Save from graph mode and restore from eager mode.
graph_ckpt_prefix = os.path.join(self.get_temp_dir(), "graph_ckpt")
with context.graph_mode():
with self.test_session(graph=ops_lib.Graph()) as sess:
# Create a graph model and save the checkpoint.
w1 = resource_variable_ops.ResourceVariable(1.0, name="w1")
w2 = resource_variable_ops.ResourceVariable(2.0, name="w2")
graph_saver = saver_module.Saver([w1, w2])
sess.run(variables.global_variables_initializer())
graph_saver.save(sess, graph_ckpt_prefix)
with context.eager_mode():
ops_lib._default_graph_stack.reset() # pylint: disable=protected-access
ops_lib.reset_default_graph()
w1 = resource_variable_ops.ResourceVariable(0.0, name="w1")
w2 = resource_variable_ops.ResourceVariable(0.0, name="w2")
graph_saver = saver_module.Saver([w1, w2])
graph_saver.restore(None, graph_ckpt_prefix)
self.assertAllEqual(self.evaluate(w1), 1.0)
self.assertAllEqual(self.evaluate(w2), 2.0)
# Save from eager mode and restore from graph mode.
eager_ckpt_prefix = os.path.join(self.get_temp_dir(), "eager_ckpt")
with context.eager_mode():
ops_lib._default_graph_stack.reset() # pylint: disable=protected-access
ops_lib.reset_default_graph()
w3 = resource_variable_ops.ResourceVariable(3.0, name="w3")
w4 = resource_variable_ops.ResourceVariable(4.0, name="w4")
graph_saver = saver_module.Saver([w3, w4])
graph_saver.save(None, eager_ckpt_prefix)
with context.graph_mode():
with self.test_session(graph=ops_lib.Graph()) as sess:
w3 = resource_variable_ops.ResourceVariable(0.0, name="w3")
w4 = resource_variable_ops.ResourceVariable(0.0, name="w4")
graph_saver = saver_module.Saver([w3, w4])
sess.run(variables.global_variables_initializer())
graph_saver.restore(sess, eager_ckpt_prefix)
self.assertAllEqual(w3.eval(), 3.0)
self.assertAllEqual(w4.eval(), 4.0)
@test_util.run_in_graph_and_eager_modes()
def testResourceSaveRestoreCachingDevice(self):
save_path = os.path.join(self.get_temp_dir(), "resource_cache")
with self.test_session(graph=ops_lib.Graph()) as sess:
v = resource_variable_ops.ResourceVariable([1], caching_device="/cpu:0",
name="v")
if context.in_graph_mode():
self.evaluate(variables.global_variables_initializer())
else:
sess = None
save = saver_module.Saver([v])
save.save(sess, save_path)
save2 = saver_module.Saver([v])
save2.restore(sess, save_path)
self.assertEquals(self.evaluate(v), [1])
def testNoAdditionalOpsAddedBySaverForResourceVariablesOutsideSaveScope(self):
with ops_lib.Graph().as_default() as g:
v = resource_variable_ops.ResourceVariable(1.0, name="v")
with ops_lib.name_scope("saver1"):
saver_module.Saver()
with ops_lib.name_scope("saver2"):
saver_module.Saver({"name": v})
ops_in_saver1_scope_but_not_save_scope = [
op for op in g.get_operations()
if (op.name.startswith("saver1/") and
not op.name.startswith("saver1/save/"))]
self.assertEqual(ops_in_saver1_scope_but_not_save_scope, [])
ops_in_saver2_scope_but_not_save_scope = [
op for op in g.get_operations()
if (op.name.startswith("saver2/") and
not op.name.startswith("saver2/save/"))]
self.assertEqual(ops_in_saver2_scope_but_not_save_scope, [])
def testSaveCopyRestoreWithSaveRelativePaths(self):
"""Save, copy checkpoint dir and restore from copied dir.
This only works for save_relative_paths=True.
"""
save_dir1 = os.path.join(self.get_temp_dir(), "save_dir1")
os.mkdir(save_dir1)
save_path1 = os.path.join(save_dir1, "save_copy_restore")
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variables.Variable(10.0, name="v0")
v1 = variables.Variable(20.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
v2_init = v2.insert("k1", 30.0)
save = saver_module.Saver(
var_list={
"v0": v0,
"v1": v1,
"v2": v2.saveable},
restore_sequentially=True,
save_relative_paths=True)
init_all_op = [variables.global_variables_initializer(), v2_init]
with self.test_session() as sess:
# Initialize all variables
sess.run(init_all_op)
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
self.assertEqual(b"k1", v2.keys().eval())
self.assertEqual(30.0, v2.values().eval())
# Save the initialized values in the file at "save_path"
val = save.save(sess, save_path1)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path1, val)
self.assertEqual(saver_module.latest_checkpoint(save_dir1), save_path1)
save_dir2 = os.path.join(self.get_temp_dir(), "save_dir2")
os.renames(save_dir1, save_dir2)
save_path2 = os.path.join(save_dir2, "save_copy_restore")
self.assertEqual(saver_module.latest_checkpoint(save_dir2), save_path2)
# Start a second session. In that session the parameter nodes
# have not been initialized either.
with self.test_session() as sess:
v0 = variables.Variable(-1.0, name="v0")
v1 = variables.Variable(-1.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
save = saver_module.Saver({"v0": v0, "v1": v1, "v2": v2.saveable})
# Assert that the variables are not initialized.
self.assertEqual(
len(variables.report_uninitialized_variables().eval()), 2)
self.assertEqual(0, len(v2.keys().eval()))
self.assertEqual(0, len(v2.values().eval()))
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path2)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
self.assertEqual(b"k1", v2.keys().eval())
self.assertEqual(30.0, v2.values().eval())
def testFilenameTensor(self):
v0 = variables.Variable(0, name="v0")
filename = b"somerandomfilename"
save = saver_module.Saver({"v0": v0}, filename=filename)
with self.test_session() as sess:
tensor = sess.graph.get_tensor_by_name(
save.saver_def.filename_tensor_name)
self.assertEqual(sess.run(tensor), filename)
def testInvalidPath(self):
v0 = variables.Variable(0, name="v0")
for ver in (saver_pb2.SaverDef.V1, saver_pb2.SaverDef.V2):
with self.test_session() as sess:
save = saver_module.Saver({"v0": v0}, write_version=ver)
with self.assertRaisesRegexp(errors.NotFoundError,
"Failed to find any matching files for"):
save.restore(sess, "invalid path")
def testInt64(self):
save_path = os.path.join(self.get_temp_dir(), "int64")
with self.test_session() as sess:
# Build a graph with 1 node, and save and restore for them.
v = variables.Variable(np.int64(15), name="v")
save = saver_module.Saver({"v": v}, restore_sequentially=True)
variables.global_variables_initializer().run()
# Save the initialized values in the file at "save_path"
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
with self.test_session() as sess:
v = variables.Variable(np.int64(-1), name="v")
save = saver_module.Saver({"v": v})
with self.assertRaisesWithPredicateMatch(
errors_impl.OpError, lambda e: "uninitialized value v" in e.message):
sess.run(v)
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(np.int64(15), v.eval())
def testSomeErrors(self):
with ops_lib.Graph().as_default():
v0 = variables.Variable([10.0], name="v0")
v1 = variables.Variable([20.0], name="v1")
v2 = variables.Variable([20.0], name="v2")
v2._set_save_slice_info(
variables.Variable.SaveSliceInfo("v1", [1], [0], [1]))
# By default the name used for "v2" will be "v1" and raise an error.
with self.assertRaisesRegexp(ValueError, "same name: v1"):
saver_module.Saver([v0, v1, v2])
# The names are different and will work.
saver_module.Saver({"vee1": v1, "other": [v2]})
# Partitioned variables also cause name conflicts.
p_v1 = variable_scope.get_variable(
"p_v1",
shape=[4, 5],
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=2))
p_v2 = variable_scope.get_variable(
"p_v2",
shape=[4, 5],
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=2))
p_v2._name = "p_v1"
with self.assertRaisesRegexp(ValueError, "same name: p_v1"):
saver_module.Saver([p_v1, p_v2])
def testSameName(self):
with ops_lib.Graph().as_default():
v0 = variables.Variable([10.0], name="v0")
v2 = saver_test_utils.CheckpointedOp(name="v2")
# Saving one variable under two names raises an error.
with self.assertRaisesRegexp(
ValueError, "The same saveable will be restored with two names: v0"):
saver_module.Saver({"v0": v0, "v0too": v0})
# Ditto for custom saveables.
with self.assertRaisesRegexp(
ValueError, "The same saveable will be restored with two names: v2"):
saver_module.Saver({"v2": v2.saveable, "v2too": v2.saveable})
# Verify non-duplicate names work.
saver_module.Saver({"v0": v0, "v2": v2.saveable})
def testBasicsWithListOfVariables(self):
save_path = os.path.join(self.get_temp_dir(), "basics_with_list")
with self.test_session(graph=ops_lib.Graph()) as sess:
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variables.Variable(10.0, name="v0")
v1 = variables.Variable(20.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
v2_init = v2.insert("k1", 30.0)
save = saver_module.Saver([v0, v1, v2.saveable])
variables.global_variables_initializer().run()
v2_init.run()
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
self.assertEqual(b"k1", v2.keys().eval())
self.assertEqual(30.0, v2.values().eval())
# Save the initialized values in the file at "save_path"
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
# Start a second session. In that session the variables
# have not been initialized either.
with self.test_session(graph=ops_lib.Graph()) as sess:
v0 = variables.Variable(-1.0, name="v0")
v1 = variables.Variable(-1.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
save = saver_module.Saver([v0, v1, v2.saveable])
with self.assertRaisesWithPredicateMatch(
errors_impl.OpError, lambda e: "uninitialized value v0" in e.message):
sess.run(v0)
with self.assertRaisesWithPredicateMatch(
errors_impl.OpError, lambda e: "uninitialized value v1" in e.message):
sess.run(v1)
self.assertEqual(0, len(v2.keys().eval()))
self.assertEqual(0, len(v2.values().eval()))
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
self.assertEqual(b"k1", v2.keys().eval())
self.assertEqual(30.0, v2.values().eval())
# Build another graph with 2 nodes, initialized
# differently, and a Restore node for them.
with self.test_session(graph=ops_lib.Graph()) as sess:
v0_2 = variables.Variable(1000.0, name="v0")
v1_2 = variables.Variable(2000.0, name="v1")
v2_2 = saver_test_utils.CheckpointedOp(name="v2")
save2 = saver_module.Saver([v0_2, v1_2, v2_2.saveable])
v2_2.insert("k1000", 3000.0).run()
variables.global_variables_initializer().run()
# Check that the parameter nodes have been initialized.
self.assertEqual(1000.0, v0_2.eval())
self.assertEqual(2000.0, v1_2.eval())
self.assertEqual(b"k1000", v2_2.keys().eval())
self.assertEqual(3000.0, v2_2.values().eval())
# Restore the values saved earlier in the parameter nodes.
save2.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0_2.eval())
self.assertEqual(20.0, v1_2.eval())
self.assertEqual(b"k1", v2_2.keys().eval())
self.assertEqual(30.0, v2_2.values().eval())
def _SaveAndLoad(self, var_name, var_value, other_value, save_path):
with self.test_session(graph=ops_lib.Graph()) as sess:
var = resource_variable_ops.ResourceVariable(var_value, name=var_name)
save = saver_module.Saver({var_name: var})
if context.in_graph_mode():
self.evaluate(var.initializer)
val = save.save(sess, save_path)
self.assertEqual(save_path, val)
with self.test_session(graph=ops_lib.Graph()) as sess:
var = resource_variable_ops.ResourceVariable(other_value, name=var_name)
save = saver_module.Saver({var_name: var})
save.restore(sess, save_path)
self.assertAllClose(var_value, self.evaluate(var))
def testCacheRereadsFile(self):
save_path = os.path.join(self.get_temp_dir(), "cache_rereads")
# Save and reload one Variable named "var0".
self._SaveAndLoad("var0", 0.0, 1.0, save_path)
# Save and reload one Variable named "var1" in the same file.
# The cached readers should know to re-read the file.
self._SaveAndLoad("var1", 1.1, 2.2, save_path)
def testAllowEmpty(self):
save_path = os.path.join(self.get_temp_dir(), "allow_empty")
with self.test_session() as sess:
_ = constant_op.constant(1)
save = saver_module.Saver(allow_empty=True)
val = save.save(sess, save_path)
self.assertIsNone(val)
with self.test_session() as sess:
save = saver_module.Saver(allow_empty=True)
save.restore(sess, save_path)
def testGPU(self):
if not test.is_gpu_available():
return
save_path = os.path.join(self.get_temp_dir(), "gpu")
with session.Session("", graph=ops_lib.Graph()) as sess:
with sess.graph.device(test.gpu_device_name()):
v0_1 = variables.Variable(123.45)
save = saver_module.Saver({"v0": v0_1})
variables.global_variables_initializer().run()
save.save(sess, save_path)
with session.Session("", graph=ops_lib.Graph()) as sess:
with sess.graph.device(test.gpu_device_name()):
v0_2 = variables.Variable(543.21)
save = saver_module.Saver({"v0": v0_2})
variables.global_variables_initializer().run()
def testSharedServerOnGPU(self):
if not test.is_gpu_available():
return
save_path = os.path.join(self.get_temp_dir(), "gpu")
with session.Session("", graph=ops_lib.Graph()) as sess:
with sess.graph.device(test.gpu_device_name()):
v0_1 = variables.Variable(123.45)
save = saver_module.Saver({"v0": v0_1}, sharded=True, allow_empty=True)
variables.global_variables_initializer().run()
save.save(sess, save_path)
with session.Session("", graph=ops_lib.Graph()) as sess:
with sess.graph.device(test.gpu_device_name()):
v0_2 = variables.Variable(543.21)
save = saver_module.Saver({"v0": v0_2}, sharded=True, allow_empty=True)
variables.global_variables_initializer().run()
def testVariables(self):
save_path = os.path.join(self.get_temp_dir(), "variables")
with session.Session("", graph=ops_lib.Graph()) as sess:
one = variables.Variable(1.0)
twos = variables.Variable([2.0, 2.0, 2.0])
v2 = saver_test_utils.CheckpointedOp(name="v2")
init = variables.global_variables_initializer()
save = saver_module.Saver()
init.run()
v2.insert("k1", 3.0).run()
save.save(sess, save_path)
with session.Session("", graph=ops_lib.Graph()) as sess:
one = variables.Variable(0.0)
twos = variables.Variable([0.0, 0.0, 0.0])
v2 = saver_test_utils.CheckpointedOp(name="v2")
# Saver with no arg, defaults to 'all variables'.
save = saver_module.Saver()
save.restore(sess, save_path)
self.assertAllClose(1.0, one.eval())
self.assertAllClose([2.0, 2.0, 2.0], twos.eval())
self.assertEqual(b"k1", v2.keys().eval())
self.assertEqual(3.0, v2.values().eval())
def testVarListShouldBeEmptyInDeferredBuild(self):
with ops_lib.Graph().as_default():
v = variables.Variable(1.0)
with self.assertRaisesRegexp(ValueError, "defer_build"):
saver_module.Saver([v], defer_build=True)
def testBuildShouldBeCalledBeforeSaveInCaseOfDeferBuild(self):
save_path = os.path.join(self.get_temp_dir(), "error_deferred_build")
with ops_lib.Graph().as_default(), session.Session() as sess:
variables.Variable(1.0)
saver = saver_module.Saver(defer_build=True)
with self.assertRaisesRegexp(RuntimeError, "build"):
saver.save(sess, save_path)
def testDeferredBuild(self):
save_path = os.path.join(self.get_temp_dir(), "deferred_build")
with session.Session("", graph=ops_lib.Graph()) as sess:
one = variables.Variable(1.0)
save = saver_module.Saver(defer_build=True)
# if build is not deferred, saver cannot save the `twos`.
twos = variables.Variable([2.0, 2.0, 2.0])
init = variables.global_variables_initializer()
save.build()
init.run()
save.save(sess, save_path)
with session.Session("", graph=ops_lib.Graph()) as sess:
one = variables.Variable(0.0)
twos = variables.Variable([0.0, 0.0, 0.0])
# Saver with no arg, defaults to 'all variables'.
save = saver_module.Saver()
save.restore(sess, save_path)
self.assertAllClose(1.0, one.eval())
self.assertAllClose([2.0, 2.0, 2.0], twos.eval())
def testReshape(self):
save_path = os.path.join(self.get_temp_dir(), "variables_reshape")
with session.Session("", graph=ops_lib.Graph()) as sess:
var = variables.Variable([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
init = variables.global_variables_initializer()
save = saver_module.Saver()
init.run()
save.save(sess, save_path)
# Error when restoring with default reshape=False
with session.Session("", graph=ops_lib.Graph()) as sess:
var = variables.Variable([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]])
save = saver_module.Saver()
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Assign requires shapes of both tensors to match."):
save.restore(sess, save_path)
# Restored to new shape with reshape=True
with session.Session("", graph=ops_lib.Graph()) as sess:
var = variables.Variable([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]])
save = saver_module.Saver(reshape=True)
save.restore(sess, save_path)
self.assertAllClose([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], var.eval())
@test_util.run_in_graph_and_eager_modes()
def testSaveWithGlobalStep(self, pad_step_number=False):
save_path = os.path.join(self.get_temp_dir(), "ckpt_with_global_step")
global_step_int = 5
# Save and reload one Variable named "var0".
self._SaveAndLoad("var0", 0.0, 1.0, save_path)
for use_tensor in [True, False]:
with self.test_session(graph=ops_lib.Graph()):
var = resource_variable_ops.ResourceVariable(1.0, name="var0")
save = saver_module.Saver(
{
var._shared_name: var
}, pad_step_number=pad_step_number)
if context.in_graph_mode():
self.evaluate(var.initializer)
sess = ops_lib.get_default_session()
else:
sess = None
if use_tensor:
global_step = constant_op.constant(global_step_int)
val = save.save(sess, save_path, global_step=global_step)
else:
val = save.save(sess, save_path, global_step=global_step_int)
if pad_step_number:
expected_save_path = "%s-%s" % (save_path,
"{:08d}".format(global_step_int))
else:
expected_save_path = "%s-%d" % (save_path, global_step_int)
self.assertEqual(expected_save_path, val)
def testSaveWithGlobalStepWithPadding(self):
self.testSaveWithGlobalStep(pad_step_number=True)
def testSaveToNonexistingPath(self):
file_io.write_string_to_file(
os.path.join(self.get_temp_dir(), "actually_a_file"), "")
paths = [
os.path.join(self.get_temp_dir(), "nonexisting_dir/path"),
os.path.join(self.get_temp_dir(), "other_nonexisting_dir/path1/path2"),
os.path.join(self.get_temp_dir(), "actually_a_file/path"),
]
for save_path in paths:
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variables.Variable(10.0, name="v0")
v1 = variables.Variable(20.0, name="v1")
save = saver_module.Saver({"v0": v0, "v1": v1}, restore_sequentially=True)
init_all_op = variables.global_variables_initializer()
# In the case where the parent directory doesn't exist, whether or not the
# save succeeds or fails is implementation dependent. Therefore we allow
# both cases.
try:
with self.test_session() as sess:
# Initialize all variables
sess.run(init_all_op)
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
# Save the graph.
save.save(sess, save_path)
with self.test_session() as sess:
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
except ValueError as exc:
error_msg_template = "Parent directory of {} doesn't exist, can't save."
self.assertEqual(error_msg_template.format(save_path), str(exc))
def testSaveToURI(self):
# ParseURI functions don't work on Windows yet.
# TODO(jhseu): Remove this check when it works.
if os.name == "nt":
self.skipTest("Local URI support doesn't work on Windows")
save_path = "file://" + os.path.join(self.get_temp_dir(), "uri")
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variables.Variable(10.0, name="v0")
v1 = variables.Variable(20.0, name="v1")
save = saver_module.Saver({"v0": v0, "v1": v1}, restore_sequentially=True)
init_all_op = variables.global_variables_initializer()
with self.test_session() as sess:
# Initialize all variables
sess.run(init_all_op)
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
save.save(sess, save_path)
@test_util.with_c_api
class SaveRestoreShardedTest(test.TestCase):
_WRITE_VERSION = saver_pb2.SaverDef.V1
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
def testBasics(self):
save_path = os.path.join(self.get_temp_dir(), "sharded_basics")
# Build a graph with 2 parameter nodes on different devices.
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = variables.Variable(10, name="v0")
t0 = saver_test_utils.CheckpointedOp(name="t0")
with sess.graph.device("/cpu:1"):
v1 = variables.Variable(20, name="v1")
t1 = saver_test_utils.CheckpointedOp(name="t1")
save = saver_module.Saver(
{
"v0": v0,
"v1": v1,
"t0": t0.saveable,
"t1": t1.saveable
},
write_version=self._WRITE_VERSION,
sharded=True)
variables.global_variables_initializer().run()
t0.insert("k1", 30.0).run()
t1.insert("k2", 40.0).run()
val = save.save(sess, save_path)
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(save_path + "-?????-of-00002", val)
else:
self.assertEqual(save_path, val)
meta_graph_filename = save._MetaGraphFilename(val)
self.assertEqual(save_path + ".meta", meta_graph_filename)
if save._write_version is saver_pb2.SaverDef.V1:
# Restore different ops from shard 0 of the saved files.
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = variables.Variable(111, name="v0")
t0 = saver_test_utils.CheckpointedOp(name="t0")
save = saver_module.Saver(
{
"v0": v0,
"t0": t0.saveable
},
write_version=self._WRITE_VERSION,
sharded=True)
variables.global_variables_initializer().run()
t0.insert("k11", 33.0).run()
self.assertEqual(111, v0.eval())
self.assertEqual(b"k11", t0.keys().eval())
self.assertEqual(33.0, t0.values().eval())
save.restore(sess, save_path + "-00000-of-00002")
self.assertEqual(10, v0.eval())
self.assertEqual(b"k1", t0.keys().eval())
self.assertEqual(30.0, t0.values().eval())
# Restore different ops from shard 1 of the saved files.
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v1 = variables.Variable(222)
t1 = saver_test_utils.CheckpointedOp(name="t1")
save = saver_module.Saver(
{
"v1": v1,
"t1": t1.saveable
},
write_version=self._WRITE_VERSION,
sharded=True)
variables.global_variables_initializer().run()
t1.insert("k22", 44.0).run()
self.assertEqual(222, v1.eval())
self.assertEqual(b"k22", t1.keys().eval())
self.assertEqual(44.0, t1.values().eval())
save.restore(sess, save_path + "-00001-of-00002")
self.assertEqual(20, v1.eval())
self.assertEqual(b"k2", t1.keys().eval())
self.assertEqual(40.0, t1.values().eval())
# Now try a restore with the sharded filename.
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = variables.Variable(111, name="v0")
t0 = saver_test_utils.CheckpointedOp(name="t0")
with sess.graph.device("/cpu:1"):
v1 = variables.Variable(222, name="v1")
t1 = saver_test_utils.CheckpointedOp(name="t1")
save = saver_module.Saver(
{
"v0": v0,
"v1": v1,
"t0": t0.saveable,
"t1": t1.saveable
},
write_version=self._WRITE_VERSION,
sharded=True)
variables.global_variables_initializer().run()
t0.insert("k11", 33.0).run()
t1.insert("k22", 44.0).run()
self.assertEqual(111, v0.eval())
self.assertEqual(222, v1.eval())
self.assertEqual(b"k11", t0.keys().eval())
self.assertEqual(33.0, t0.values().eval())
self.assertEqual(b"k22", t1.keys().eval())
self.assertEqual(44.0, t1.values().eval())
save_path = os.path.join(self.get_temp_dir(), "sharded_basics")
if save._write_version is saver_pb2.SaverDef.V1:
save.restore(sess, save_path + "-?????-of-?????")
else:
save.restore(sess, save_path)
self.assertEqual(10, v0.eval())
self.assertEqual(20, v1.eval())
self.assertEqual(b"k1", t0.keys().eval())
self.assertEqual(30.0, t0.values().eval())
self.assertEqual(b"k2", t1.keys().eval())
self.assertEqual(40.0, t1.values().eval())
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(
saver_module.latest_checkpoint(self.get_temp_dir()),
os.path.join(self.get_temp_dir(), "sharded_basics-?????-of-00002"))
else:
self.assertEqual(
saver_module.latest_checkpoint(self.get_temp_dir()),
os.path.join(self.get_temp_dir(), "sharded_basics"))
def testSaverDef(self):
with self.test_session():
v0 = variables.Variable(123, name="v0")
save = saver_module.Saver({"v0": v0}, sharded=True)
sd = save.as_saver_def()
self.assertTrue(sd.sharded)
def _testPartitionedVariables(self, use_resource):
var_full_shape = [10, 3]
# Allows save/restore mechanism to work w/ different slicings.
var_name = "my_var"
saved_dir = self._get_test_dir("partitioned_variables")
saved_path = os.path.join(saved_dir, "ckpt")
call_saver_with_dict = False # updated by test loop below
def _save(slices=None, partitioner=None):
with self.test_session(graph=ops_lib.Graph()) as sess:
# Calls .eval() to return the ndarray that makes up the full variable.
rnd = random_ops.random_uniform(var_full_shape).eval()
if slices:
assert not partitioner
# TODO(apassos): make create_partitioned_variables take use_resource
# option to make this test passable without creating a named
# variable_scope.
vs = partitioned_variables.create_partitioned_variables(
var_full_shape, slices, rnd, name=var_name)
elif partitioner:
vs = [
variable_scope.get_variable(
var_name,
shape=var_full_shape,
initializer=rnd,
partitioner=partitioner,
use_resource=use_resource)
]
else:
if use_resource:
vs = [resource_variable_ops.ResourceVariable(rnd, name=var_name)]
else:
vs = [variables.Variable(rnd, name=var_name)]
variables.global_variables_initializer().run()
if call_saver_with_dict:
saver = saver_module.Saver({var_name: (vs if slices else vs[0])})
else:
saver = saver_module.Saver(vs)
actual_path = saver.save(sess, saved_path)
self.assertEqual(saved_path, actual_path)
return rnd
def _restore(slices=None, partitioner=None):
with self.test_session(graph=ops_lib.Graph()) as sess:
if slices:
assert not partitioner
new_vs = partitioned_variables.create_partitioned_variables(
var_full_shape,
slices,
array_ops.zeros(var_full_shape), # != original contents.
name=var_name)
elif partitioner:
new_vs = [
variable_scope.get_variable(
var_name,
shape=var_full_shape,
initializer=array_ops.zeros(var_full_shape),
partitioner=partitioner)
]
else:
new_vs = [
variables.Variable(
array_ops.zeros(
shape=var_full_shape), # != original contents.
name=var_name)
]
variables.global_variables_initializer().run()
if call_saver_with_dict:
saver = saver_module.Saver({
var_name: (new_vs if slices else new_vs[0])
})
else:
saver = saver_module.Saver(new_vs)
saver.restore(sess, saved_path)
if partitioner:
return new_vs[0].as_tensor().eval()
elif slices and slices[0] != 1:
return array_ops.concat(new_vs, 0).eval()
elif slices and slices[1] != 1:
return array_ops.concat(new_vs, 1).eval()
else: # Non-sliced.
return new_vs[0].eval()
for call_saver_with_dict in {False, True}:
# Save PartitionedVariable and restore into full variable.
saved_full = _save(
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=2))
restored_full = _restore()
self.assertAllEqual(saved_full, restored_full)
# Saves 10 horizontal parts of a partitioned variable.
# Restores into a full variable, non-sliced.
saved_full = _save(slices=[10, 1])
restored_full = _restore()
self.assertAllEqual(saved_full, restored_full)
# Restores into a different number/orientation of slices.
restored_full = _restore(slices=[2, 1]) # 2 horizon parts.
self.assertAllEqual(saved_full, restored_full)
restored_full = _restore(slices=[1, 3]) # 3 vertical parts.
self.assertAllEqual(saved_full, restored_full)
# Restores into a PartitionedVariable
restored_full = _restore(
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=2))
self.assertAllEqual(saved_full, restored_full)
# Now, saves a full variable and restores in slices.
saved_full = _save()
restored_full = _restore(slices=[1, 3])
self.assertAllEqual(saved_full, restored_full)
def testPartitionedVariable(self):
self._testPartitionedVariables(use_resource=False)
def testPartitionedResourceVariable(self):
self._testPartitionedVariables(use_resource=True)
@test_util.with_c_api
class SaveRestoreShardedTestV2(SaveRestoreShardedTest):
_WRITE_VERSION = saver_pb2.SaverDef.V2
@test_util.with_c_api
class MaxToKeepTest(test.TestCase):
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
def assertCheckpointState(self, model_checkpoint_path,
all_model_checkpoint_paths, save_dir):
checkpoint_state = saver_module.get_checkpoint_state(save_dir)
self.assertEqual(checkpoint_state.model_checkpoint_path,
model_checkpoint_path)
self.assertEqual(checkpoint_state.all_model_checkpoint_paths,
all_model_checkpoint_paths)
def testNonSharded(self):
save_dir = self._get_test_dir("max_to_keep_non_sharded")
with self.test_session() as sess:
v = variables.Variable(10.0, name="v")
save = saver_module.Saver({"v": v}, max_to_keep=2)
variables.global_variables_initializer().run()
self.assertEqual([], save.last_checkpoints)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s1], save.last_checkpoints)
self.assertTrue(saver_module.checkpoint_exists(s1))
self.assertCheckpointState(
model_checkpoint_path=s1,
all_model_checkpoint_paths=[s1],
save_dir=save_dir)
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s1, s2], save.last_checkpoints)
self.assertTrue(saver_module.checkpoint_exists(s1))
self.assertTrue(saver_module.checkpoint_exists(s2))
self.assertCheckpointState(
model_checkpoint_path=s2,
all_model_checkpoint_paths=[s1, s2],
save_dir=save_dir)
s3 = save.save(sess, os.path.join(save_dir, "s3"))
self.assertEqual([s2, s3], save.last_checkpoints)
self.assertFalse(saver_module.checkpoint_exists(s1))
self.assertTrue(saver_module.checkpoint_exists(s2))
self.assertTrue(saver_module.checkpoint_exists(s3))
self.assertCheckpointState(
model_checkpoint_path=s3,
all_model_checkpoint_paths=[s2, s3],
save_dir=save_dir)
# Create a second helper, identical to the first.
save2 = saver_module.Saver(saver_def=save.as_saver_def())
save2.set_last_checkpoints(save.last_checkpoints)
# Create a third helper, with the same configuration but no knowledge of
# previous checkpoints.
save3 = saver_module.Saver(saver_def=save.as_saver_def())
# Exercise the first helper.
# Adding s2 again (old s2 is removed first, then new s2 appended)
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s3, s2], save.last_checkpoints)
self.assertFalse(saver_module.checkpoint_exists(s1))
self.assertFalse(
saver_module.checkpoint_exists(save._MetaGraphFilename(s1)))
self.assertTrue(saver_module.checkpoint_exists(s3))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s3)))
self.assertTrue(saver_module.checkpoint_exists(s2))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s2)))
self.assertCheckpointState(
model_checkpoint_path=s2,
all_model_checkpoint_paths=[s3, s2],
save_dir=save_dir)
# Adding s1 (s3 should now be deleted as oldest in list)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s2, s1], save.last_checkpoints)
self.assertFalse(saver_module.checkpoint_exists(s3))
self.assertFalse(
saver_module.checkpoint_exists(save._MetaGraphFilename(s3)))
self.assertTrue(saver_module.checkpoint_exists(s2))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s2)))
self.assertTrue(saver_module.checkpoint_exists(s1))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s1)))
self.assertCheckpointState(
model_checkpoint_path=s1,
all_model_checkpoint_paths=[s2, s1],
save_dir=save_dir)
# Exercise the second helper.
# Adding s2 again (old s2 is removed first, then new s2 appended)
s2 = save2.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s3, s2], save2.last_checkpoints)
# Created by the first helper.
self.assertTrue(saver_module.checkpoint_exists(s1))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s1)))
# Deleted by the first helper.
self.assertFalse(saver_module.checkpoint_exists(s3))
self.assertFalse(
saver_module.checkpoint_exists(save._MetaGraphFilename(s3)))
self.assertTrue(saver_module.checkpoint_exists(s2))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s2)))
self.assertCheckpointState(
model_checkpoint_path=s2,
all_model_checkpoint_paths=[s3, s2],
save_dir=save_dir)
# Adding s1 (s3 should now be deleted as oldest in list)
s1 = save2.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s2, s1], save2.last_checkpoints)
self.assertFalse(saver_module.checkpoint_exists(s3))
self.assertFalse(
saver_module.checkpoint_exists(save._MetaGraphFilename(s3)))
self.assertTrue(saver_module.checkpoint_exists(s2))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s2)))
self.assertTrue(saver_module.checkpoint_exists(s1))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s1)))
self.assertCheckpointState(
model_checkpoint_path=s1,
all_model_checkpoint_paths=[s2, s1],
save_dir=save_dir)
# Exercise the third helper.
# Adding s2 again (but helper is unaware of previous s2)
s2 = save3.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s2], save3.last_checkpoints)
# Created by the first helper.
self.assertTrue(saver_module.checkpoint_exists(s1))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s1)))
# Deleted by the first helper.
self.assertFalse(saver_module.checkpoint_exists(s3))
self.assertFalse(
saver_module.checkpoint_exists(save._MetaGraphFilename(s3)))
self.assertTrue(saver_module.checkpoint_exists(s2))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s2)))
# Even though the file for s1 exists, this saver isn't aware of it, which
# is why it doesn't end up in the checkpoint state.
self.assertCheckpointState(
model_checkpoint_path=s2,
all_model_checkpoint_paths=[s2],
save_dir=save_dir)
# Adding s1 (s3 should not be deleted because helper is unaware of it)
s1 = save3.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s2, s1], save3.last_checkpoints)
self.assertFalse(saver_module.checkpoint_exists(s3))
self.assertFalse(
saver_module.checkpoint_exists(save._MetaGraphFilename(s3)))
self.assertTrue(saver_module.checkpoint_exists(s2))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s2)))
self.assertTrue(saver_module.checkpoint_exists(s1))
self.assertTrue(
saver_module.checkpoint_exists(save._MetaGraphFilename(s1)))
self.assertCheckpointState(
model_checkpoint_path=s1,
all_model_checkpoint_paths=[s2, s1],
save_dir=save_dir)
def testSharded(self):
save_dir = self._get_test_dir("max_to_keep_sharded")
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = variables.Variable(111, name="v0")
with sess.graph.device("/cpu:1"):
v1 = variables.Variable(222, name="v1")
save = saver_module.Saver(
{
"v0": v0,
"v1": v1
}, sharded=True, max_to_keep=2)
variables.global_variables_initializer().run()
self.assertEqual([], save.last_checkpoints)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s1], save.last_checkpoints)
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(2, len(gfile.Glob(s1)))
else:
self.assertEqual(4, len(gfile.Glob(s1 + "*")))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1)))
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s1, s2], save.last_checkpoints)
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(2, len(gfile.Glob(s1)))
else:
self.assertEqual(4, len(gfile.Glob(s1 + "*")))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1)))
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(2, len(gfile.Glob(s2)))
else:
self.assertEqual(4, len(gfile.Glob(s2 + "*")))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2)))
s3 = save.save(sess, os.path.join(save_dir, "s3"))
self.assertEqual([s2, s3], save.last_checkpoints)
self.assertEqual(0, len(gfile.Glob(s1 + "*")))
self.assertFalse(gfile.Exists(save._MetaGraphFilename(s1)))
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(2, len(gfile.Glob(s2)))
else:
self.assertEqual(4, len(gfile.Glob(s2 + "*")))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2)))
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(2, len(gfile.Glob(s3)))
else:
self.assertEqual(4, len(gfile.Glob(s3 + "*")))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s3)))
def testNoMaxToKeep(self):
save_dir = self._get_test_dir("no_max_to_keep")
save_dir2 = self._get_test_dir("max_to_keep_0")
with self.test_session() as sess:
v = variables.Variable(10.0, name="v")
variables.global_variables_initializer().run()
# Test max_to_keep being None.
save = saver_module.Saver({"v": v}, max_to_keep=None)
self.assertEqual([], save.last_checkpoints)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([], save.last_checkpoints)
self.assertTrue(saver_module.checkpoint_exists(s1))
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([], save.last_checkpoints)
self.assertTrue(saver_module.checkpoint_exists(s2))
# Test max_to_keep being 0.
save2 = saver_module.Saver({"v": v}, max_to_keep=0)
self.assertEqual([], save2.last_checkpoints)
s1 = save2.save(sess, os.path.join(save_dir2, "s1"))
self.assertEqual([], save2.last_checkpoints)
self.assertTrue(saver_module.checkpoint_exists(s1))
s2 = save2.save(sess, os.path.join(save_dir2, "s2"))
self.assertEqual([], save2.last_checkpoints)
self.assertTrue(saver_module.checkpoint_exists(s2))
def testNoMetaGraph(self):
save_dir = self._get_test_dir("no_meta_graph")
with self.test_session() as sess:
v = variables.Variable(10.0, name="v")
save = saver_module.Saver({"v": v})
variables.global_variables_initializer().run()
s1 = save.save(sess, os.path.join(save_dir, "s1"), write_meta_graph=False)
self.assertTrue(saver_module.checkpoint_exists(s1))
self.assertFalse(gfile.Exists(save._MetaGraphFilename(s1)))
@test_util.with_c_api
class KeepCheckpointEveryNHoursTest(test.TestCase):
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
@test.mock.patch.object(saver_module, "time")
def testNonSharded(self, mock_time):
save_dir = self._get_test_dir("keep_checkpoint_every_n_hours")
with self.test_session() as sess:
v = variables.Variable([10.0], name="v")
# Run the initializer NOW to avoid the 0.5s overhead of the first Run()
# call, which throws the test timing off in fastbuild mode.
variables.global_variables_initializer().run()
# Create a saver that will keep the last 2 checkpoints plus one every 0.7
# seconds.
start_time = time.time()
mock_time.time.return_value = start_time
save = saver_module.Saver(
{
"v": v
}, max_to_keep=2, keep_checkpoint_every_n_hours=0.7 / 3600)
self.assertEqual([], save.last_checkpoints)
# Wait till 1 seconds have elapsed so s1 will be old enough to keep.
# sleep may return early, don't trust it.
mock_time.time.return_value = start_time + 1.0
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s1], save.last_checkpoints)
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s1, s2], save.last_checkpoints)
# We now have 2 'last_checkpoints': [s1, s2]. The next call to Save(),
# would normally delete s1, because max_to_keep is 2. However, s1 is
# older than 0.7s so we must keep it.
s3 = save.save(sess, os.path.join(save_dir, "s3"))
self.assertEqual([s2, s3], save.last_checkpoints)
# s1 should still be here, we are Not checking now to reduce time
# variance in the test.
# We now have 2 'last_checkpoints': [s2, s3], and s1 on disk. The next
# call to Save(), will delete s2, because max_to_keep is 2, and because
# we already kept the old s1. s2 is very close in time to s1 so it gets
# deleted.
s4 = save.save(sess, os.path.join(save_dir, "s4"))
self.assertEqual([s3, s4], save.last_checkpoints)
# Check that s1 is still here, but s2 is gone.
self.assertTrue(saver_module.checkpoint_exists(s1))
self.assertFalse(saver_module.checkpoint_exists(s2))
self.assertTrue(saver_module.checkpoint_exists(s3))
self.assertTrue(saver_module.checkpoint_exists(s4))
@test_util.with_c_api
class SaveRestoreWithVariableNameMap(test.TestCase):
def _testNonReshape(self, variable_op):
save_path = os.path.join(self.get_temp_dir(), "non_reshape")
with self.test_session(graph=ops_lib.Graph()) as sess:
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variable_op(10.0, name="v0")
v1 = variable_op(20.0, name="v1")
save = saver_module.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1})
self.evaluate(variables.global_variables_initializer())
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
# Save the initialized values in the file at "save_path"
# Use a variable name map to set the saved tensor names
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
# Verify that the original names are not in the Saved file
save = saver_module.Saver({"v0": v0, "v1": v1})
with self.assertRaisesOpError("not found in checkpoint"):
save.restore(sess, save_path)
# Verify that the mapped names are present in the Saved file and can be
# Restored using remapped names.
with self.test_session(graph=ops_lib.Graph()) as sess:
v0 = variable_op(-1.0, name="v0")
v1 = variable_op(-1.0, name="v1")
if context.in_graph_mode():
with self.assertRaisesOpError("uninitialized"):
self.evaluate(v0)
with self.assertRaisesOpError("uninitialized"):
self.evaluate(v1)
save = saver_module.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1})
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
if context.in_graph_mode():
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
# Add a prefix to the node names in the current graph and Restore using
# remapped names.
with self.test_session(graph=ops_lib.Graph()) as sess:
v0 = variable_op(-1.0, name="restore_prefix/v0")
v1 = variable_op(-1.0, name="restore_prefix/v1")
if context.in_graph_mode():
with self.assertRaisesOpError("uninitialized"):
self.evaluate(v0)
with self.assertRaisesOpError("uninitialized"):
self.evaluate(v1)
# Restore the saved values in the parameter nodes.
save = saver_module.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1})
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
@test_util.run_in_graph_and_eager_modes()
def testNonReshapeResourceVariable(self):
self._testNonReshape(resource_variable_ops.ResourceVariable)
def testNonReshapeVariable(self):
self._testNonReshape(variables.Variable)
@test_util.with_c_api
class LatestCheckpointWithRelativePaths(test.TestCase):
@staticmethod
@contextlib.contextmanager
def tempWorkingDir(temppath):
cwd = os.getcwd()
os.chdir(temppath)
try:
yield
finally:
os.chdir(cwd)
@staticmethod
@contextlib.contextmanager
def tempDir():
tempdir = tempfile.mkdtemp()
try:
yield tempdir
finally:
shutil.rmtree(tempdir)
def testNameCollision(self):
# Make sure we have a clean directory to work in.
with self.tempDir() as tempdir:
# Jump to that directory until this test is done.
with self.tempWorkingDir(tempdir):
# Save training snapshots to a relative path.
traindir = "train/"
os.mkdir(traindir)
# Collides with the default name of the checkpoint state file.
filepath = os.path.join(traindir, "checkpoint")
with self.test_session() as sess:
unused_a = variables.Variable(0.0) # So that Saver saves something.
variables.global_variables_initializer().run()
# Should fail.
saver = saver_module.Saver(sharded=False)
with self.assertRaisesRegexp(ValueError, "collides with"):
saver.save(sess, filepath)
# Succeeds: the file will be named "checkpoint-<step>".
saver.save(sess, filepath, global_step=1)
self.assertIsNotNone(saver_module.latest_checkpoint(traindir))
# Succeeds: the file will be named "checkpoint-<i>-of-<n>".
saver = saver_module.Saver(sharded=True)
saver.save(sess, filepath)
self.assertIsNotNone(saver_module.latest_checkpoint(traindir))
# Succeeds: the file will be named "checkpoint-<step>-<i>-of-<n>".
saver = saver_module.Saver(sharded=True)
saver.save(sess, filepath, global_step=1)
self.assertIsNotNone(saver_module.latest_checkpoint(traindir))
def testRelativePath(self):
# Make sure we have a clean directory to work in.
with self.tempDir() as tempdir:
# Jump to that directory until this test is done.
with self.tempWorkingDir(tempdir):
# Save training snapshots to a relative path.
traindir = "train/"
os.mkdir(traindir)
filename = "snapshot"
filepath = os.path.join(traindir, filename)
with self.test_session() as sess:
# Build a simple graph.
v0 = variables.Variable(0.0)
inc = v0.assign_add(1.0)
save = saver_module.Saver({"v0": v0})
# Record a short training history.
variables.global_variables_initializer().run()
save.save(sess, filepath, global_step=0)
inc.eval()
save.save(sess, filepath, global_step=1)
inc.eval()
save.save(sess, filepath, global_step=2)
with self.test_session() as sess:
# Build a new graph with different initialization.
v0 = variables.Variable(-1.0)
# Create a new saver.
save = saver_module.Saver({"v0": v0})
variables.global_variables_initializer().run()
# Get the most recent checkpoint name from the training history file.
name = saver_module.latest_checkpoint(traindir)
self.assertIsNotNone(name)
# Restore "v0" from that checkpoint.
save.restore(sess, name)
self.assertEqual(v0.eval(), 2.0)
@test_util.with_c_api
class CheckpointStateTest(test.TestCase):
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
def testAbsPath(self):
save_dir = self._get_test_dir("abs_paths")
abs_path = os.path.join(save_dir, "model-0")
ckpt = saver_module.generate_checkpoint_state_proto(save_dir, abs_path)
self.assertEqual(ckpt.model_checkpoint_path, abs_path)
self.assertTrue(os.path.isabs(ckpt.model_checkpoint_path))
self.assertEqual(len(ckpt.all_model_checkpoint_paths), 1)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], abs_path)
def testRelPath(self):
train_dir = "train"
model = os.path.join(train_dir, "model-0")
# model_checkpoint_path should have no "train" directory part.
new_rel_path = "model-0"
ckpt = saver_module.generate_checkpoint_state_proto(train_dir, model)
self.assertEqual(ckpt.model_checkpoint_path, new_rel_path)
self.assertEqual(len(ckpt.all_model_checkpoint_paths), 1)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], new_rel_path)
def testAllModelCheckpointPaths(self):
save_dir = self._get_test_dir("all_models_test")
abs_path = os.path.join(save_dir, "model-0")
for paths in [None, [], ["model-2"]]:
ckpt = saver_module.generate_checkpoint_state_proto(
save_dir, abs_path, all_model_checkpoint_paths=paths)
self.assertEqual(ckpt.model_checkpoint_path, abs_path)
self.assertTrue(os.path.isabs(ckpt.model_checkpoint_path))
self.assertEqual(
len(ckpt.all_model_checkpoint_paths), len(paths) if paths else 1)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], abs_path)
def testUpdateCheckpointState(self):
save_dir = self._get_test_dir("update_checkpoint_state")
os.chdir(save_dir)
# Make a temporary train directory.
train_dir = "train"
os.mkdir(train_dir)
abs_path = os.path.join(save_dir, "model-0")
rel_path = os.path.join("train", "model-2")
saver_module.update_checkpoint_state(
train_dir, rel_path, all_model_checkpoint_paths=[abs_path, rel_path])
ckpt = saver_module.get_checkpoint_state(train_dir)
self.assertEqual(ckpt.model_checkpoint_path, rel_path)
self.assertEqual(len(ckpt.all_model_checkpoint_paths), 2)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], rel_path)
self.assertEqual(ckpt.all_model_checkpoint_paths[0], abs_path)
def testUpdateCheckpointStateSaveRelativePaths(self):
save_dir = self._get_test_dir("update_checkpoint_state")
os.chdir(save_dir)
abs_path2 = os.path.join(save_dir, "model-2")
rel_path2 = "model-2"
abs_path0 = os.path.join(save_dir, "model-0")
rel_path0 = "model-0"
saver_module._update_checkpoint_state( # pylint: disable=protected-access
save_dir=save_dir,
model_checkpoint_path=abs_path2,
all_model_checkpoint_paths=[rel_path0, abs_path2],
save_relative_paths=True)
# File should contain relative paths.
file_content = file_io.read_file_to_string(
os.path.join(save_dir, "checkpoint"))
ckpt = CheckpointState()
text_format.Merge(file_content, ckpt)
self.assertEqual(ckpt.model_checkpoint_path, rel_path2)
self.assertEqual(len(ckpt.all_model_checkpoint_paths), 2)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], rel_path2)
self.assertEqual(ckpt.all_model_checkpoint_paths[0], rel_path0)
# get_checkpoint_state should return absolute paths.
ckpt = saver_module.get_checkpoint_state(save_dir)
self.assertEqual(ckpt.model_checkpoint_path, abs_path2)
self.assertEqual(len(ckpt.all_model_checkpoint_paths), 2)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], abs_path2)
self.assertEqual(ckpt.all_model_checkpoint_paths[0], abs_path0)
def testCheckPointStateFailsWhenIncomplete(self):
save_dir = self._get_test_dir("checkpoint_state_fails_when_incomplete")
os.chdir(save_dir)
ckpt_path = os.path.join(save_dir, "checkpoint")
ckpt_file = open(ckpt_path, "w")
ckpt_file.write("")
ckpt_file.close()
with self.assertRaises(ValueError):
saver_module.get_checkpoint_state(save_dir)
def testCheckPointCompletesRelativePaths(self):
save_dir = self._get_test_dir("checkpoint_completes_relative_paths")
os.chdir(save_dir)
ckpt_path = os.path.join(save_dir, "checkpoint")
ckpt_file = open(ckpt_path, "w")
ckpt_file.write("""
model_checkpoint_path: "./model.ckpt-687529"
all_model_checkpoint_paths: "./model.ckpt-687500"
all_model_checkpoint_paths: "./model.ckpt-687529"
""")
ckpt_file.close()
ckpt = saver_module.get_checkpoint_state(save_dir)
self.assertEqual(ckpt.model_checkpoint_path,
os.path.join(save_dir, "./model.ckpt-687529"))
self.assertEqual(ckpt.all_model_checkpoint_paths[0],
os.path.join(save_dir, "./model.ckpt-687500"))
self.assertEqual(ckpt.all_model_checkpoint_paths[1],
os.path.join(save_dir, "./model.ckpt-687529"))
@test_util.with_c_api
class MetaGraphTest(test.TestCase):
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
def testAddCollectionDef(self):
test_dir = self._get_test_dir("good_collection")
filename = os.path.join(test_dir, "metafile")
with self.test_session():
# Creates a graph.
v0 = variables.Variable(1.0, name="v0")
control_flow_ops.cond(
math_ops.less(v0, 10), lambda: math_ops.add(v0, 1),
lambda: math_ops.subtract(v0, 1))
control_flow_ops.while_loop(lambda i: math_ops.less(i, 10),
lambda i: math_ops.add(i, 1), [v0])
var = variables.Variable(constant_op.constant(0, dtype=dtypes.int64))
count_up_to = var.count_up_to(3)
input_queue = data_flow_ops.FIFOQueue(
30, dtypes.float32, shared_name="collection_queue")
qr = queue_runner_impl.QueueRunner(input_queue, [count_up_to])
variables.global_variables_initializer()
# Creates a saver.
save = saver_module.Saver({"v0": v0})
# Adds a set of collections.
ops_lib.add_to_collection("int_collection", 3)
ops_lib.add_to_collection("float_collection", 3.5)
ops_lib.add_to_collection("string_collection", "hello")
ops_lib.add_to_collection("variable_collection", v0)
# Add QueueRunners.
queue_runner_impl.add_queue_runner(qr)
# Adds user_defined proto in three formats: string, bytes and Any.
queue_runner = queue_runner_pb2.QueueRunnerDef(queue_name="test_queue")
ops_lib.add_to_collection("user_defined_string_collection",
str(queue_runner))
ops_lib.add_to_collection("user_defined_bytes_collection",
queue_runner.SerializeToString())
any_buf = Any()
any_buf.Pack(queue_runner)
ops_lib.add_to_collection("user_defined_any_collection", any_buf)
# Generates MetaGraphDef.
meta_graph_def = save.export_meta_graph(filename)
self.assertTrue(meta_graph_def.HasField("saver_def"))
self.assertTrue(meta_graph_def.HasField("graph_def"))
self.assertTrue(meta_graph_def.HasField("meta_info_def"))
self.assertNotEqual(meta_graph_def.meta_info_def.tensorflow_version, "")
self.assertNotEqual(meta_graph_def.meta_info_def.tensorflow_git_version,
"")
collection_def = meta_graph_def.collection_def
self.assertEqual(len(collection_def), 12)
with ops_lib.Graph().as_default():
# Restores from MetaGraphDef.
new_saver = saver_module.import_meta_graph(filename)
# Generates a new MetaGraphDef.
new_meta_graph_def = new_saver.export_meta_graph()
# It should be the same as the original.
test_util.assert_meta_graph_protos_equal(
self, meta_graph_def, new_meta_graph_def)
def testAddCollectionDefFails(self):
with self.test_session():
# Creates a graph.
v0 = variables.Variable(10.0, name="v0")
# Creates a saver.
save = saver_module.Saver({"v0": v0})
# Generates MetaGraphDef.
meta_graph_def = meta_graph_pb2.MetaGraphDef()
# Verifies that collection with unsupported key will not be added.
ops_lib.add_to_collection(save, 3)
save._add_collection_def(meta_graph_def, save)
self.assertEqual(len(meta_graph_def.collection_def), 0)
# Verifies that collection where item type does not match expected
# type will not be added.
ops_lib.add_to_collection("int_collection", 3)
ops_lib.add_to_collection("int_collection", 3.5)
save._add_collection_def(meta_graph_def, "int_collection")
self.assertEqual(len(meta_graph_def.collection_def), 0)
def _testMultiSaverCollectionSave(self, test_dir):
filename = os.path.join(test_dir, "metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
saver1_ckpt = os.path.join(test_dir, "saver1.ckpt")
with self.test_session(graph=ops_lib.Graph()) as sess:
# Creates a graph.
v0 = variables.Variable([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], name="v0")
v1 = variables.Variable(11.0, name="v1")
# Creates 2 savers.
saver0 = saver_module.Saver({"v0": v0}, name="saver0")
saver1 = saver_module.Saver({"v1": v1}, name="saver1")
ops_lib.add_to_collection("savers", saver0)
ops_lib.add_to_collection("savers", saver1)
variables.global_variables_initializer().run()
# Saves to different checkpoints.
saver0.save(sess, saver0_ckpt)
saver1.save(sess, saver1_ckpt)
# Generates MetaGraphDef.
meta_graph_def = saver_module.export_meta_graph(filename)
meta_graph_def0 = saver0.export_meta_graph()
meta_graph_def1 = saver1.export_meta_graph()
# Verifies that there is no saver_def in meta_graph_def.
self.assertFalse(meta_graph_def.HasField("saver_def"))
# Verifies that there is saver_def in meta_graph_def0 and 1.
self.assertTrue(meta_graph_def0.HasField("saver_def"))
self.assertTrue(meta_graph_def1.HasField("saver_def"))
# Verifies SAVERS is saved as bytes_list for meta_graph_def.
collection_def = meta_graph_def.collection_def["savers"]
kind = collection_def.WhichOneof("kind")
self.assertEqual(kind, "bytes_list")
# Verifies that there are 2 entries in SAVERS collection.
savers = getattr(collection_def, kind)
self.assertEqual(2, len(savers.value))
# Verifies SAVERS collection is saved as bytes_list for meta_graph_def0.
collection_def = meta_graph_def0.collection_def["savers"]
kind = collection_def.WhichOneof("kind")
self.assertEqual(kind, "bytes_list")
# Verifies that there are 2 entries in SAVERS collection.
savers = getattr(collection_def, kind)
self.assertEqual(2, len(savers.value))
def _testMultiSaverCollectionRestore(self, test_dir):
filename = os.path.join(test_dir, "metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
saver1_ckpt = os.path.join(test_dir, "saver1.ckpt")
with self.test_session(graph=ops_lib.Graph()) as sess:
# Imports from meta_graph.
saver_module.import_meta_graph(filename)
# Retrieves SAVERS collection. Verifies there are 2 entries.
savers = ops_lib.get_collection("savers")
self.assertEqual(2, len(savers))
# Retrieves saver0. Verifies that new_saver0 can restore v0, but not v1.
new_saver0 = savers[0]
new_saver0.restore(sess, saver0_ckpt)
v0 = sess.graph.get_tensor_by_name("v0:0")
v1 = sess.graph.get_tensor_by_name("v1:0")
self.assertAllEqual([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], v0.eval())
self.assertEqual([3, 2], v0.get_shape())
self.assertEqual([], v1.get_shape())
with self.assertRaisesWithPredicateMatch(
errors_impl.OpError, lambda e: "uninitialized value v1" in e.message):
sess.run(v1)
# Retrieves saver1. Verifies that new_saver1 can restore v1.
new_saver1 = savers[1]
new_saver1.restore(sess, saver1_ckpt)
v1 = sess.graph.get_tensor_by_name("v1:0")
self.assertEqual(11.0, v1.eval())
def testMultiSaverCollection(self):
test_dir = self._get_test_dir("saver_collection")
self._testMultiSaverCollectionSave(test_dir)
self._testMultiSaverCollectionRestore(test_dir)
def testClearExtraneousSavers(self):
test_dir = self._get_test_dir("clear_extraneous_savers")
filename = os.path.join(test_dir, "metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
saver1_ckpt = os.path.join(test_dir, "saver1.ckpt")
with self.test_session(graph=ops_lib.Graph()) as sess:
# Creates a graph.
v0 = variables.Variable([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], name="v0")
v1 = variables.Variable(11.0, name="v1")
# Creates 2 savers.
saver0 = saver_module.Saver({"v0": v0}, name="saver0")
saver1 = saver_module.Saver({"v1": v1}, name="saver1")
ops_lib.add_to_collection("savers", saver0)
ops_lib.add_to_collection("savers", saver1)
variables.global_variables_initializer().run()
# Saves to different checkpoints.
saver0.save(sess, saver0_ckpt)
saver1.save(sess, saver1_ckpt)
# Generates MetaGraphDef.
meta_graph_def = saver_module.export_meta_graph(filename)
meta_graph_def0 = saver0.export_meta_graph()
meta_graph_def1 = saver1.export_meta_graph(clear_extraneous_savers=True)
# Verifies that there is no saver_def in meta_graph_def.
self.assertFalse(meta_graph_def.HasField("saver_def"))
# Verifies that there is saver_def in meta_graph_def0 and 1.
self.assertTrue(meta_graph_def0.HasField("saver_def"))
self.assertTrue(meta_graph_def1.HasField("saver_def"))
# Verifies SAVERS is saved as bytes_list for meta_graph_def.
collection_def = meta_graph_def.collection_def["savers"]
kind = collection_def.WhichOneof("kind")
self.assertEqual(kind, "bytes_list")
# Verifies that there are 2 entries in SAVERS collection.
savers = getattr(collection_def, kind)
self.assertEqual(2, len(savers.value))
# Verifies SAVERS collection is saved as bytes_list for meta_graph_def1.
collection_def = meta_graph_def1.collection_def["savers"]
kind = collection_def.WhichOneof("kind")
self.assertEqual(kind, "bytes_list")
# Verifies that there is 1 entry in SAVERS collection.
savers = getattr(collection_def, kind)
self.assertEqual(1, len(savers.value))
# Verifies that saver0 graph nodes are omitted from the saver1 export
self.assertEqual(29, len(meta_graph_def0.graph_def.node))
self.assertEqual(19, len(meta_graph_def1.graph_def.node))
def testBinaryAndTextFormat(self):
test_dir = self._get_test_dir("binary_and_text")
filename = os.path.join(test_dir, "metafile")
with self.test_session(graph=ops_lib.Graph()):
# Creates a graph.
variables.Variable(10.0, name="v0")
# Exports the graph as binary format.
saver_module.export_meta_graph(filename, as_text=False)
with self.test_session(graph=ops_lib.Graph()):
# Imports the binary format graph.
saver = saver_module.import_meta_graph(filename)
self.assertIsNotNone(saver)
# Exports the graph as text format.
saver.export_meta_graph(filename, as_text=True)
with self.test_session(graph=ops_lib.Graph()):
# Imports the text format graph.
saver_module.import_meta_graph(filename)
# Writes wrong contents to the file.
graph_io.write_graph(saver.as_saver_def(),
os.path.dirname(filename),
os.path.basename(filename))
with self.test_session(graph=ops_lib.Graph()):
# Import should fail.
with self.assertRaisesWithPredicateMatch(IOError,
lambda e: "Cannot parse file"):
saver_module.import_meta_graph(filename)
# Deletes the file
gfile.Remove(filename)
with self.assertRaisesWithPredicateMatch(IOError,
lambda e: "does not exist"):
saver_module.import_meta_graph(filename)
def testSliceVariable(self):
test_dir = self._get_test_dir("slice_saver")
filename = os.path.join(test_dir, "metafile")
with self.test_session():
v1 = variables.Variable([20.0], name="v1")
v2 = variables.Variable([20.0], name="v2")
v2._set_save_slice_info(
variables.Variable.SaveSliceInfo("v1", [1], [0], [1]))
# The names are different and will work.
slice_saver = saver_module.Saver({"first": v1, "second": v2})
variables.global_variables_initializer().run()
# Exports to meta_graph
meta_graph_def = slice_saver.export_meta_graph(filename)
with ops_lib.Graph().as_default():
# Restores from MetaGraphDef.
new_saver = saver_module.import_meta_graph(filename)
self.assertIsNotNone(new_saver)
# Generates a new MetaGraphDef.
new_meta_graph_def = new_saver.export_meta_graph()
# It should be the same as the original.
test_util.assert_meta_graph_protos_equal(self, meta_graph_def,
new_meta_graph_def)
def _testGraphExtensionSave(self, test_dir):
filename = os.path.join(test_dir, "metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
# Creates an inference graph.
# Hidden 1
images = constant_op.constant(1.2, dtypes.float32, shape=[100, 28])
with ops_lib.name_scope("hidden1"):
weights = variables.Variable(
random_ops.truncated_normal(
[28, 128], stddev=1.0 / math.sqrt(float(28))),
name="weights")
# The use of control_flow_ops.cond here is purely for adding test coverage
# the save and restore of control flow context (which doesn't make any
# sense here from a machine learning perspective). The typical biases is
# a simple Variable without the conditions.
biases = variables.Variable(
control_flow_ops.cond(
math_ops.less(random.random(), 0.5),
lambda: array_ops.ones([128]), lambda: array_ops.zeros([128])),
name="biases")
hidden1 = nn_ops.relu(math_ops.matmul(images, weights) + biases)
# Hidden 2
with ops_lib.name_scope("hidden2"):
weights = variables.Variable(
random_ops.truncated_normal(
[128, 32], stddev=1.0 / math.sqrt(float(128))),
name="weights")
# The use of control_flow_ops.while_loop here is purely for adding test
# coverage the save and restore of control flow context (which doesn't
# make any sense here from a machine learning perspective). The typical
# biases is a simple Variable without the conditions.
def loop_cond(it, _):
return it < 2
def loop_body(it, biases):
biases += constant_op.constant(0.1, shape=[32])
return it + 1, biases
_, biases = control_flow_ops.while_loop(
loop_cond, loop_body,
[constant_op.constant(0), variables.Variable(array_ops.zeros([32]))])
hidden2 = nn_ops.relu(math_ops.matmul(hidden1, weights) + biases)
# Linear
with ops_lib.name_scope("softmax_linear"):
weights = variables.Variable(
random_ops.truncated_normal(
[32, 10], stddev=1.0 / math.sqrt(float(32))),
name="weights")
biases = variables.Variable(array_ops.zeros([10]), name="biases")
logits = math_ops.matmul(hidden2, weights) + biases
ops_lib.add_to_collection("logits", logits)
init_all_op = variables.global_variables_initializer()
with self.test_session() as sess:
# Initializes all the variables.
sess.run(init_all_op)
# Runs to logit.
sess.run(logits)
# Creates a saver.
saver0 = saver_module.Saver()
saver0.save(sess, saver0_ckpt)
# Generates MetaGraphDef.
saver0.export_meta_graph(filename)
def _testGraphExtensionRestore(self, test_dir):
filename = os.path.join(test_dir, "metafile")
train_filename = os.path.join(test_dir, "train_metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
with self.test_session(graph=ops_lib.Graph()) as sess:
# Restores from MetaGraphDef.
new_saver = saver_module.import_meta_graph(filename)
# Generates a new MetaGraphDef.
new_saver.export_meta_graph()
# Restores from checkpoint.
new_saver.restore(sess, saver0_ckpt)
# Adds loss and train.
labels = constant_op.constant(0, dtypes.int32, shape=[100], name="labels")
batch_size = array_ops.size(labels)
labels = array_ops.expand_dims(labels, 1)
indices = array_ops.expand_dims(math_ops.range(0, batch_size), 1)
concated = array_ops.concat([indices, labels], 1)
onehot_labels = sparse_ops.sparse_to_dense(
concated, array_ops.stack([batch_size, 10]), 1.0, 0.0)
logits = ops_lib.get_collection("logits")[0]
cross_entropy = nn_ops.softmax_cross_entropy_with_logits(
labels=onehot_labels, logits=logits, name="xentropy")
loss = math_ops.reduce_mean(cross_entropy, name="xentropy_mean")
summary.scalar("loss", loss)
# Creates the gradient descent optimizer with the given learning rate.
optimizer = gradient_descent.GradientDescentOptimizer(0.01)
# Runs train_op.
train_op = optimizer.minimize(loss)
ops_lib.add_to_collection("train_op", train_op)
# Runs train_op.
sess.run(train_op)
# Generates MetaGraphDef.
saver_module.export_meta_graph(train_filename)
def _testRestoreFromTrainGraphWithControlContext(self, test_dir):
train_filename = os.path.join(test_dir, "train_metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
with self.test_session(graph=ops_lib.Graph()) as sess:
# Restores from MetaGraphDef.
new_saver = saver_module.import_meta_graph(train_filename)
# Restores from checkpoint.
new_saver.restore(sess, saver0_ckpt)
train_op = ops_lib.get_collection("train_op")[0]
sess.run(train_op)
def testGraphExtension(self):
test_dir = self._get_test_dir("graph_extension")
self._testGraphExtensionSave(test_dir)
self._testGraphExtensionRestore(test_dir)
self._testRestoreFromTrainGraphWithControlContext(test_dir)
def _testWhileLoopAndGradientSerDes(self, outer_body_fn):
# Build a while loop with `outer_body_fn`, export it, and verify that it can
# be imported and the gradient can be built and run correctly.
test_dir = self._get_test_dir("nested_control_flow")
filename = os.path.join(test_dir, "metafile")
saver_ckpt = os.path.join(test_dir, "saver.ckpt")
# Create while loop using `outer_body_fn`.
with ops_lib.Graph().as_default():
var = variables.Variable(0)
var_name = var.name
_, output = control_flow_ops.while_loop(lambda i, x: i < 5, outer_body_fn,
[0, var])
output_name = output.name
init_op = variables.global_variables_initializer()
# Generate a MetaGraphDef containing the while loop.
with session.Session() as sess:
sess.run(init_op)
sess.run(output)
saver = saver_module.Saver()
saver.save(sess, saver_ckpt)
saver.export_meta_graph(filename)
# Build and run the gradients of the while loop. We use this below to
# verify that the gradients are correct with an imported MetaGraphDef.
grad = gradients_impl.gradients([output], [var])
with session.Session() as sess:
sess.run(init_op)
expected_grad_value = sess.run(grad)
# Restore the MetaGraphDef into a new Graph.
with ops_lib.Graph().as_default():
with session.Session() as sess:
saver = saver_module.import_meta_graph(filename)
saver.restore(sess, saver_ckpt)
# Make sure we can still build gradients and get the same result.
var = ops_lib.get_default_graph().get_tensor_by_name(var_name)
output = ops_lib.get_default_graph().get_tensor_by_name(output_name)
grad = gradients_impl.gradients([output], [var])
init_op = variables.global_variables_initializer()
with session.Session() as sess:
sess.run(init_op)
actual_grad_value = sess.run(grad)
self.assertEqual(expected_grad_value, actual_grad_value)
def testNestedWhileLoopsSerDes(self):
# Test two simple nested while loops.
def body(i, x):
_, r = control_flow_ops.while_loop(lambda j, y: j < 3,
lambda j, y: (j + 1, y + x),
[0, 0])
return i + 1, x + r
self._testWhileLoopAndGradientSerDes(body)
def testNestedControlFlowSerDes(self):
# Test while loop in a cond in a while loop.
# pylint: disable=g-long-lambda
def body(i, x):
cond_result = control_flow_ops.cond(
i > 0,
lambda: control_flow_ops.while_loop(
lambda j, y: j < 3,
lambda j, y: (j + 1, y + x),
[0, 0])[1],
lambda: x)
return i + 1, cond_result
# pylint: enable=g-long-lambda
self._testWhileLoopAndGradientSerDes(body)
def testStrippedOpListDef(self):
with self.test_session():
# Creates a graph.
v0 = variables.Variable(0.0)
var = variables.Variable(10.0)
math_ops.add(v0, var)
@function.Defun(dtypes.float32)
def minus_one(x):
return x - 1
minus_one(array_ops.identity(v0))
save = saver_module.Saver({"v0": v0})
variables.global_variables_initializer()
# Generates MetaGraphDef.
meta_graph_def = save.export_meta_graph()
ops = [o.name for o in meta_graph_def.meta_info_def.stripped_op_list.op]
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(ops, [
"Add", "Assign", "Const", "Identity", "NoOp", "RestoreV2",
"SaveSlices", "Sub", "VariableV2"
])
else:
self.assertEqual(ops, [
"Add", "Assign", "Const", "Identity", "NoOp", "RestoreV2", "SaveV2",
"Sub", "VariableV2"
])
# Test calling stripped_op_list_for_graph directly
op_list = meta_graph.stripped_op_list_for_graph(meta_graph_def.graph_def)
self.assertEqual(ops, [o.name for o in op_list.op])
for o in op_list.op:
self.assertEqual(o.summary, "")
self.assertEqual(o.description, "")
def testStripDefaultValuedAttrs(self):
"""Verifies that default valued attrs are stripped, unless disabled."""
# With strip_default_attrs enabled, attributes "T" (float32) and "Tout"
# (complex64) in the "Complex" op must be removed.
with self.test_session():
real_num = variables.Variable(1.0, dtype=dtypes.float32, name="real")
imag_num = variables.Variable(2.0, dtype=dtypes.float32, name="imag")
math_ops.complex(real_num, imag_num, name="complex")
save = saver_module.Saver({"real_num": real_num, "imag_num": imag_num})
variables.global_variables_initializer()
meta_graph_def = save.export_meta_graph(strip_default_attrs=True)
node_def = test_util.get_node_def_from_graph("complex",
meta_graph_def.graph_def)
self.assertNotIn("T", node_def.attr)
self.assertNotIn("Tout", node_def.attr)
# With strip_default_attrs disabled, attributes "T" (float32) and "Tout"
# (complex64) in the "Complex" op must *not* be removed, even if they map
# to their defaults.
with self.test_session(graph=ops_lib.Graph()):
real_num = variables.Variable(1.0, dtype=dtypes.float32, name="real")
imag_num = variables.Variable(2.0, dtype=dtypes.float32, name="imag")
math_ops.complex(real_num, imag_num, name="complex")
save = saver_module.Saver({"real_num": real_num, "imag_num": imag_num})
variables.global_variables_initializer()
meta_graph_def = save.export_meta_graph(strip_default_attrs=False)
node_def = test_util.get_node_def_from_graph("complex",
meta_graph_def.graph_def)
self.assertIn("T", node_def.attr)
self.assertIn("Tout", node_def.attr)
def testImportIntoNamescope(self):
# Test that we can import a meta graph into a namescope.
test_dir = self._get_test_dir("import_into_namescope")
filename = os.path.join(test_dir, "ckpt")
image = array_ops.placeholder(dtypes.float32, [None, 784], name="image")
label = array_ops.placeholder(dtypes.float32, [None, 10], name="label")
with session.Session() as sess:
weights = variables.Variable(
random_ops.random_uniform([784, 10]), name="weights")
bias = variables.Variable(array_ops.zeros([10]), name="bias")
logit = nn_ops.relu(math_ops.matmul(image, weights) + bias, name="logits")
nn_ops.softmax(logit, name="prediction")
cost = nn_ops.softmax_cross_entropy_with_logits(labels=label,
logits=logit, name="cost")
adam.AdamOptimizer().minimize(cost, name="optimize")
saver = saver_module.Saver()
sess.run(variables.global_variables_initializer())
saver.save(sess, filename)
graph = ops_lib.Graph()
with session.Session(graph=graph) as sess:
new_saver = saver_module.import_meta_graph(
filename + ".meta", graph=graph, import_scope="new_model")
new_saver.restore(sess, filename)
sess.run(["new_model/optimize"], {
"new_model/image:0": np.random.random([1, 784]),
"new_model/label:0": np.random.randint(
10, size=[1, 10])
})
def testClearDevicesOnImport(self):
# Test that we import a graph without its devices and run successfully.
with ops_lib.Graph().as_default():
with ops_lib.device("/job:ps/replica:0/task:0/device:GPU:0"):
image = array_ops.placeholder(dtypes.float32, [None, 784], name="image")
label = array_ops.placeholder(dtypes.float32, [None, 10], name="label")
weights = variables.Variable(
random_ops.random_uniform([784, 10]), name="weights")
bias = variables.Variable(array_ops.zeros([10]), name="bias")
logit = nn_ops.relu(math_ops.matmul(image, weights) + bias)
nn_ops.softmax(logit, name="prediction")
cost = nn_ops.softmax_cross_entropy_with_logits(labels=label,
logits=logit)
adam.AdamOptimizer().minimize(cost, name="optimize")
meta_graph_def = saver_module.export_meta_graph()
with session.Session(graph=ops_lib.Graph()) as sess:
saver_module.import_meta_graph(
meta_graph_def, clear_devices=False, import_scope="new_model")
# Device refers to GPU, which is not available here.
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run(variables.global_variables_initializer())
with session.Session(graph=ops_lib.Graph()) as sess:
saver_module.import_meta_graph(
meta_graph_def, clear_devices=True, import_scope="new_model")
sess.run(variables.global_variables_initializer())
sess.run(["new_model/optimize"], {
"new_model/image:0": np.random.random([1, 784]),
"new_model/label:0": np.random.randint(
10, size=[1, 10])
})
def testClearDevicesOnExport(self):
# Test that we export a graph without its devices and run successfully.
with ops_lib.Graph().as_default():
with ops_lib.device("/job:ps/replica:0/task:0/device:GPU:0"):
image = array_ops.placeholder(dtypes.float32, [None, 784], name="image")
label = array_ops.placeholder(dtypes.float32, [None, 10], name="label")
weights = variables.Variable(
random_ops.random_uniform([784, 10]), name="weights")
bias = variables.Variable(array_ops.zeros([10]), name="bias")
logit = nn_ops.relu(math_ops.matmul(image, weights) + bias)
nn_ops.softmax(logit, name="prediction")
cost = nn_ops.softmax_cross_entropy_with_logits(labels=label,
logits=logit)
adam.AdamOptimizer().minimize(cost, name="optimize")
meta_graph_def = saver_module.export_meta_graph(clear_devices=True)
graph_io.write_graph(meta_graph_def, self.get_temp_dir(),
"meta_graph.pbtxt")
with session.Session(graph=ops_lib.Graph()) as sess:
saver_module.import_meta_graph(meta_graph_def, import_scope="new_model")
sess.run(variables.global_variables_initializer())
sess.run(["new_model/optimize"], {
"new_model/image:0": np.random.random([1, 784]),
"new_model/label:0": np.random.randint(
10, size=[1, 10])
})
def testPreserveDatasetAndFunctions(self):
with ops_lib.Graph().as_default() as g:
dataset = dataset_ops.Dataset.range(10).map(lambda x: x * x)
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
_ = array_ops.identity(next_element, name="output")
# Generate three MetaGraphDef protos using different code paths.
meta_graph_def_simple = saver_module.export_meta_graph()
meta_graph_def_devices_cleared = saver_module.export_meta_graph(
clear_devices=True)
meta_graph_def_from_graph_def = saver_module.export_meta_graph(
clear_devices=True, graph_def=g.as_graph_def())
for meta_graph_def in [meta_graph_def_simple,
meta_graph_def_devices_cleared,
meta_graph_def_from_graph_def]:
with session.Session(graph=ops_lib.Graph()) as sess:
saver_module.import_meta_graph(meta_graph_def, import_scope="new_model")
sess.run(variables.global_variables_initializer())
for i in range(10):
self.assertEqual(i * i, sess.run("new_model/output:0"))
with self.assertRaises(errors.OutOfRangeError):
sess.run("new_model/output:0")
@test_util.with_c_api
class CheckpointReaderTest(test.TestCase):
_WRITE_VERSION = saver_pb2.SaverDef.V1
def testDebugString(self):
# Builds a graph.
v0 = variables.Variable(
[[1, 2, 3], [4, 5, 6]], dtype=dtypes.float32, name="v0")
v1 = variables.Variable(
[[[1], [2]], [[3], [4]], [[5], [6]]], dtype=dtypes.float32, name="v1")
init_all_op = variables.global_variables_initializer()
save = saver_module.Saver(
{
"v0": v0,
"v1": v1
}, write_version=self._WRITE_VERSION)
save_path = os.path.join(self.get_temp_dir(),
"ckpt_for_debug_string" + str(self._WRITE_VERSION))
with self.test_session() as sess:
sess.run(init_all_op)
# Saves a checkpoint.
save.save(sess, save_path)
# Creates a reader.
reader = pywrap_tensorflow.NewCheckpointReader(save_path)
# Verifies that the tensors exist.
self.assertTrue(reader.has_tensor("v0"))
self.assertTrue(reader.has_tensor("v1"))
debug_string = reader.debug_string()
# Verifies that debug string contains the right strings.
self.assertTrue(compat.as_bytes("v0 (DT_FLOAT) [2,3]") in debug_string)
self.assertTrue(compat.as_bytes("v1 (DT_FLOAT) [3,2,1]") in debug_string)
# Verifies get_variable_to_shape_map() returns the correct information.
var_map = reader.get_variable_to_shape_map()
self.assertEqual([2, 3], var_map["v0"])
self.assertEqual([3, 2, 1], var_map["v1"])
# Verifies get_tensor() returns the tensor value.
v0_tensor = reader.get_tensor("v0")
v1_tensor = reader.get_tensor("v1")
self.assertAllEqual(v0.eval(), v0_tensor)
self.assertAllEqual(v1.eval(), v1_tensor)
# Verifies get_tensor() fails for non-existent tensors.
with self.assertRaisesRegexp(errors.NotFoundError,
"v3 not found in checkpoint"):
reader.get_tensor("v3")
def testNonexistentPath(self):
with self.assertRaisesRegexp(errors.NotFoundError,
"Unsuccessful TensorSliceReader"):
pywrap_tensorflow.NewCheckpointReader("non-existent")
@test_util.with_c_api
class CheckpointReaderForV2Test(CheckpointReaderTest):
_WRITE_VERSION = saver_pb2.SaverDef.V2
@test_util.with_c_api
class WriteGraphTest(test.TestCase):
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
def testWriteGraph(self):
test_dir = self._get_test_dir("write_graph_dir")
variables.Variable([[1, 2, 3], [4, 5, 6]], dtype=dtypes.float32, name="v0")
path = graph_io.write_graph(ops_lib.get_default_graph(),
os.path.join(test_dir, "l1"), "graph.pbtxt")
truth = os.path.join(test_dir, "l1", "graph.pbtxt")
self.assertEqual(path, truth)
self.assertTrue(os.path.exists(path))
def testRecursiveCreate(self):
test_dir = self._get_test_dir("deep_dir")
variables.Variable([[1, 2, 3], [4, 5, 6]], dtype=dtypes.float32, name="v0")
path = graph_io.write_graph(ops_lib.get_default_graph().as_graph_def(),
os.path.join(test_dir, "l1", "l2", "l3"),
"graph.pbtxt")
truth = os.path.join(test_dir, "l1", "l2", "l3", "graph.pbtxt")
self.assertEqual(path, truth)
self.assertTrue(os.path.exists(path))
@test_util.with_c_api
class SaverUtilsTest(test.TestCase):
def setUp(self):
self._base_dir = os.path.join(self.get_temp_dir(), "saver_utils_test")
gfile.MakeDirs(self._base_dir)
def tearDown(self):
gfile.DeleteRecursively(self._base_dir)
def testCheckpointExists(self):
for sharded in (False, True):
for version in (saver_pb2.SaverDef.V2, saver_pb2.SaverDef.V1):
with self.test_session(graph=ops_lib.Graph()) as sess:
unused_v = variables.Variable(1.0, name="v")
variables.global_variables_initializer().run()
saver = saver_module.Saver(sharded=sharded, write_version=version)
path = os.path.join(self._base_dir, "%s-%s" % (sharded, version))
self.assertFalse(
saver_module.checkpoint_exists(path)) # Not saved yet.
ckpt_prefix = saver.save(sess, path)
self.assertTrue(saver_module.checkpoint_exists(ckpt_prefix))
ckpt_prefix = saver_module.latest_checkpoint(self._base_dir)
self.assertTrue(saver_module.checkpoint_exists(ckpt_prefix))
def testGetCheckpointMtimes(self):
prefixes = []
for version in (saver_pb2.SaverDef.V2, saver_pb2.SaverDef.V1):
with self.test_session(graph=ops_lib.Graph()) as sess:
unused_v = variables.Variable(1.0, name="v")
variables.global_variables_initializer().run()
saver = saver_module.Saver(write_version=version)
prefixes.append(
saver.save(sess, os.path.join(self._base_dir, str(version))))
mtimes = saver_module.get_checkpoint_mtimes(prefixes)
self.assertEqual(2, len(mtimes))
self.assertTrue(mtimes[1] >= mtimes[0])
@test_util.with_c_api
class ScopedGraphTest(test.TestCase):
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
def _testScopedSave(self, test_dir, exported_filename, ckpt_filename):
graph = ops_lib.Graph()
with graph.as_default():
# Creates an inference graph.
# Hidden 1
images = constant_op.constant(
1.2, dtypes.float32, shape=[100, 28], name="images")
with ops_lib.name_scope("hidden1"):
weights1 = variables.Variable(
random_ops.truncated_normal(
[28, 128], stddev=1.0 / math.sqrt(float(28))),
name="weights")
# The use of control_flow_ops.cond here is purely for adding test
# coverage the save and restore of control flow context (which doesn't
# make any sense here from a machine learning perspective). The typical
# biases is a simple Variable without the conditions.
biases1 = variables.Variable(
control_flow_ops.cond(
math_ops.less(random.random(), 0.5),
lambda: array_ops.ones([128]), lambda: array_ops.zeros([128])),
name="biases")
hidden1 = nn_ops.relu(math_ops.matmul(images, weights1) + biases1)
# Hidden 2
with ops_lib.name_scope("hidden2"):
weights2 = variables.Variable(
random_ops.truncated_normal(
[128, 32], stddev=1.0 / math.sqrt(float(128))),
name="weights")
# The use of control_flow_ops.while_loop here is purely for adding test
# coverage the save and restore of control flow context (which doesn't
# make any sense here from a machine learning perspective). The typical
# biases is a simple Variable without the conditions.
def loop_cond(it, _):
return it < 2
def loop_body(it, biases2):
biases2 += constant_op.constant(0.1, shape=[32])
return it + 1, biases2
_, biases2 = control_flow_ops.while_loop(loop_cond, loop_body, [
constant_op.constant(0), variables.Variable(array_ops.zeros([32]))
])
hidden2 = nn_ops.relu(math_ops.matmul(hidden1, weights2) + biases2)
# Linear
with ops_lib.name_scope("softmax_linear"):
weights3 = variables.Variable(
random_ops.truncated_normal(
[32, 10], stddev=1.0 / math.sqrt(float(32))),
name="weights")
biases3 = variables.Variable(array_ops.zeros([10]), name="biases")
logits = math_ops.matmul(hidden2, weights3) + biases3
ops_lib.add_to_collection("logits", logits)
# Adds user_defined proto in three formats: string, bytes and Any.
# Any proto should just pass through.
queue_runner = queue_runner_pb2.QueueRunnerDef(queue_name="test_queue")
ops_lib.add_to_collection("user_defined_string_collection",
str(queue_runner))
ops_lib.add_to_collection("user_defined_bytes_collection",
queue_runner.SerializeToString())
any_buf = Any()
any_buf.Pack(queue_runner)
ops_lib.add_to_collection("user_defined_any_collection", any_buf)
_, var_list = meta_graph.export_scoped_meta_graph(
filename=os.path.join(test_dir, exported_filename),
graph=ops_lib.get_default_graph(),
export_scope="hidden1")
self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys()))
with self.test_session(graph=graph) as sess:
sess.run(variables.global_variables_initializer())
saver = saver_module.Saver(var_list=var_list, max_to_keep=1)
saver.save(sess, os.path.join(test_dir, ckpt_filename), write_state=False)
def _testScopedRestore(self, test_dir, exported_filename,
new_exported_filename, ckpt_filename):
graph = ops_lib.Graph()
# Create all the missing inputs.
with graph.as_default():
new_image = constant_op.constant(
1.2, dtypes.float32, shape=[100, 28], name="images")
var_list = meta_graph.import_scoped_meta_graph(
os.path.join(test_dir, exported_filename),
graph=graph,
input_map={"$unbound_inputs_images": new_image},
import_scope="new_hidden1")
self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys()))
hidden1 = graph.as_graph_element("new_hidden1/Relu:0")
weights1 = graph.as_graph_element("new_hidden1/weights:0")
biases1 = graph.as_graph_element("new_hidden1/biases:0")
with graph.as_default():
# Hidden 2
with ops_lib.name_scope("hidden2"):
weights = variables.Variable(
random_ops.truncated_normal(
[128, 32], stddev=1.0 / math.sqrt(float(128))),
name="weights")
# The use of control_flow_ops.while_loop here is purely for adding test
# coverage the save and restore of control flow context (which doesn't
# make any sense here from a machine learning perspective). The typical
# biases is a simple Variable without the conditions.
def loop_cond(it, _):
return it < 2
def loop_body(it, biases):
biases += constant_op.constant(0.1, shape=[32])
return it + 1, biases
_, biases = control_flow_ops.while_loop(loop_cond, loop_body, [
constant_op.constant(0), variables.Variable(array_ops.zeros([32]))
])
hidden2 = nn_ops.relu(math_ops.matmul(hidden1, weights) + biases)
# Linear
with ops_lib.name_scope("softmax_linear"):
weights = variables.Variable(
random_ops.truncated_normal(
[32, 10], stddev=1.0 / math.sqrt(float(32))),
name="weights")
biases = variables.Variable(array_ops.zeros([10]), name="biases")
logits = math_ops.matmul(hidden2, weights) + biases
ops_lib.add_to_collection("logits", logits)
# The rest of the variables.
rest_variables = list(
set(variables.global_variables()) - set(var_list.keys()))
init_rest_op = variables.initialize_variables(rest_variables)
with self.test_session(graph=graph) as sess:
saver = saver_module.Saver(var_list=var_list, max_to_keep=1)
saver.restore(sess, os.path.join(test_dir, ckpt_filename))
# Verify that we have restored weights1 and biases1.
sess.run([weights1, biases1])
# Initialize the rest of the variables and run logits.
sess.run(init_rest_op)
sess.run(logits)
# Verifies that we can save the subgraph under "hidden1" and restore it
# into "new_hidden1" in the new graph.
def testScopedSaveAndRestore(self):
test_dir = self._get_test_dir("scoped_export_import")
ckpt_filename = "ckpt"
self._testScopedSave(test_dir, "exported_hidden1.pbtxt", ckpt_filename)
self._testScopedRestore(test_dir, "exported_hidden1.pbtxt",
"exported_new_hidden1.pbtxt", ckpt_filename)
# Verifies that we can copy the subgraph under "hidden1" and copy it
# to different name scope in the same graph or different graph.
def testCopyScopedGraph(self):
test_dir = self._get_test_dir("scoped_copy")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
graph1 = ops_lib.Graph()
with graph1.as_default():
with ops_lib.name_scope("hidden1"):
images = constant_op.constant(
1.0, dtypes.float32, shape=[3, 2], name="images")
weights1 = variables.Variable(
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], name="weights")
biases1 = variables.Variable([0.1] * 3, name="biases")
nn_ops.relu(math_ops.matmul(images, weights1) + biases1, name="relu")
# Run the graph and save scoped checkpoint.
with self.test_session(graph=graph1) as sess:
sess.run(variables.global_variables_initializer())
_, var_list_1 = meta_graph.export_scoped_meta_graph(
export_scope="hidden1")
saver = saver_module.Saver(var_list=var_list_1, max_to_keep=1)
saver.save(sess, saver0_ckpt, write_state=False)
expected = np.reshape([[5.0999999, 7.0999999, 9.10000038] * 3], (3, 3))
# Verifies copy to the same graph with the same name fails.
with graph1.as_default():
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "need to be different" in str(e)):
meta_graph.copy_scoped_meta_graph(
from_scope="hidden1", to_scope="hidden1")
# Verifies copy to the same graph.
with graph1.as_default():
var_list_2 = meta_graph.copy_scoped_meta_graph(
from_scope="hidden1", to_scope="hidden2")
with self.test_session(graph=graph1) as sess:
saver1 = saver_module.Saver(var_list=var_list_1, max_to_keep=1)
saver1.restore(sess, saver0_ckpt)
saver2 = saver_module.Saver(var_list=var_list_2, max_to_keep=1)
saver2.restore(sess, saver0_ckpt)
self.assertAllClose(expected, sess.run("hidden1/relu:0"))
self.assertAllClose(expected, sess.run("hidden2/relu:0"))
# Verifies copy to differen graph.
graph2 = ops_lib.Graph()
new_var_list_1 = meta_graph.copy_scoped_meta_graph(
from_scope="hidden1",
to_scope="new_hidden1",
from_graph=graph1,
to_graph=graph2)
with self.test_session(graph=graph2) as sess:
saver3 = saver_module.Saver(var_list=new_var_list_1, max_to_keep=1)
saver3.restore(sess, saver0_ckpt)
self.assertAllClose(expected, sess.run("new_hidden1/relu:0"))
def testExportGraphDefWithScope(self):
test_dir = self._get_test_dir("export_graph_def")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
graph1 = ops_lib.Graph()
with graph1.as_default():
with ops_lib.name_scope("hidden1"):
images = constant_op.constant(
1.0, dtypes.float32, shape=[3, 2], name="images")
weights1 = variables.Variable(
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], name="weights")
biases1 = variables.Variable([0.1] * 3, name="biases")
nn_ops.relu(math_ops.matmul(images, weights1) + biases1, name="relu")
# Run the graph and save scoped checkpoint.
with self.test_session(graph=graph1) as sess:
sess.run(variables.global_variables_initializer())
_, var_list_1 = meta_graph.export_scoped_meta_graph(
graph_def=graph1.as_graph_def(), export_scope="hidden1")
saver = saver_module.Saver(var_list=var_list_1, max_to_keep=1)
saver.save(sess, saver0_ckpt, write_state=False)
expected = np.reshape([[5.0999999, 7.0999999, 9.10000038] * 3], (3, 3))
# Verifies that we can run successfully after restoring.
graph2 = ops_lib.Graph()
new_var_list_1 = meta_graph.copy_scoped_meta_graph(
from_scope="hidden1",
to_scope="new_hidden1",
from_graph=graph1,
to_graph=graph2)
with self.test_session(graph=graph2) as sess:
saver3 = saver_module.Saver(var_list=new_var_list_1, max_to_keep=1)
saver3.restore(sess, saver0_ckpt)
self.assertAllClose(expected, sess.run("new_hidden1/relu:0"))
def testSerializeSaverWithScope(self):
test_dir = self._get_test_dir("export_graph_def")
saver1_ckpt = os.path.join(test_dir, "saver1.ckpt")
saver2_ckpt = os.path.join(test_dir, "saver2.ckpt")
graph = ops_lib.Graph()
with graph.as_default():
with ops_lib.name_scope("hidden1"):
variable1 = variables.Variable([1.0], name="variable1")
saver1 = saver_module.Saver(var_list=[variable1])
graph.add_to_collection(ops_lib.GraphKeys.SAVERS, saver1)
with ops_lib.name_scope("hidden2"):
variable2 = variables.Variable([2.0], name="variable2")
saver2 = saver_module.Saver(var_list=[variable2], name="hidden2/")
graph.add_to_collection(ops_lib.GraphKeys.SAVERS, saver2)
with self.test_session(graph=graph) as sess:
variables.global_variables_initializer().run()
saver1.save(sess, saver1_ckpt, write_state=False)
saver2.save(sess, saver2_ckpt, write_state=False)
graph1 = ops_lib.Graph()
var_dict1 = meta_graph.copy_scoped_meta_graph(
from_scope="hidden1",
to_scope="new_hidden1",
from_graph=graph,
to_graph=graph1)
self.assertEqual(1, len(var_dict1))
saver_list1 = graph1.get_collection(ops_lib.GraphKeys.SAVERS)
self.assertEqual(1, len(saver_list1))
with self.test_session(graph=graph1) as sess:
saver_list1[0].restore(sess, saver1_ckpt)
self.assertEqual(1.0, var_dict1["variable1:0"].eval())
graph2 = ops_lib.Graph()
var_dict2 = meta_graph.copy_scoped_meta_graph(
from_scope="hidden2",
to_scope="new_hidden2",
from_graph=graph,
to_graph=graph2)
self.assertEqual(1, len(var_dict2))
saver_list2 = graph2.get_collection(ops_lib.GraphKeys.SAVERS)
self.assertEqual(1, len(saver_list2))
with self.test_session(graph=graph2) as sess:
saver_list2[0].restore(sess, saver2_ckpt)
self.assertEqual(2.0, var_dict2["variable2:0"].eval())
class _OwnsAVariableSimple(checkpointable.CheckpointableBase):
"""A Checkpointable object which can be saved using a tf.train.Saver."""
def __init__(self):
self.non_dep_variable = variable_scope.get_variable(
name="non_dep_variable", initializer=6., use_resource=True)
def _gather_saveables_for_checkpoint(self):
return {checkpointable.VARIABLE_VALUE_KEY: self.non_dep_variable}
# The Saver sorts by name before parsing, so we need a name property.
@property
def name(self):
return self.non_dep_variable.name
class _MirroringSaveable(
saver_module.BaseSaverBuilder.ResourceVariableSaveable):
def __init__(self, primary_variable, mirrored_variable):
self._primary_variable = primary_variable
self._mirrored_variable = mirrored_variable
super(_MirroringSaveable, self).__init__(
self._primary_variable, "", self._primary_variable.name)
def restore(self, restored_tensors, restored_shapes):
"""Restore the same value into both variables."""
tensor, = restored_tensors
return control_flow_ops.group(
self._primary_variable.assign(tensor),
self._mirrored_variable.assign(tensor))
class _OwnsMirroredVariables(checkpointable.CheckpointableBase):
"""A Checkpointable object which returns a more complex SaveableObject."""
def __init__(self):
self.non_dep_variable = variable_scope.get_variable(
name="non_dep_variable", initializer=6., use_resource=True)
self.mirrored = variable_scope.get_variable(
name="mirrored", initializer=15., use_resource=True)
def _gather_saveables_for_checkpoint(self):
saveable = _MirroringSaveable(
primary_variable=self.non_dep_variable,
mirrored_variable=self.mirrored)
return {checkpointable.VARIABLE_VALUE_KEY: saveable}
# The Saver sorts by name before parsing, so we need a name property.
@property
def name(self):
return self.non_dep_variable.name
@test_util.with_c_api
class CheckpointableCompatibilityTests(test.TestCase):
# TODO(allenl): Track down python3 reference cycles in these tests.
@test_util.run_in_graph_and_eager_modes()
def testNotSaveableButIsCheckpointable(self):
v = _OwnsAVariableSimple()
saver = saver_module.Saver(var_list=[v])
test_dir = self.get_temp_dir()
prefix = os.path.join(test_dir, "ckpt")
self.evaluate(v.non_dep_variable.assign(42.))
with self.test_session() as sess:
save_path = saver.save(sess, prefix)
self.evaluate(v.non_dep_variable.assign(43.))
saver.restore(sess, save_path)
self.assertEqual(42., self.evaluate(v.non_dep_variable))
@test_util.run_in_graph_and_eager_modes()
def testMoreComplexSaveableReturned(self):
v = _OwnsMirroredVariables()
saver = saver_module.Saver(var_list=[v])
test_dir = self.get_temp_dir()
prefix = os.path.join(test_dir, "ckpt")
self.evaluate(v.non_dep_variable.assign(42.))
with self.test_session() as sess:
save_path = saver.save(sess, prefix)
self.evaluate(v.non_dep_variable.assign(43.))
self.evaluate(v.mirrored.assign(44.))
saver.restore(sess, save_path)
self.assertEqual(42., self.evaluate(v.non_dep_variable))
self.assertEqual(42., self.evaluate(v.mirrored))
if __name__ == "__main__":
test.main()
| 41.66409 | 80 | 0.673307 |
7947771bed173c96f921d052741742e6078bb873 | 483 | py | Python | tools/telemetry/unittest_data/test_simple_two_page_set.py | shaochangbin/chromium-crosswalk | 634d34e4cf82b4f7400357c53ec12efaffe94add | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2 | 2019-01-16T03:57:28.000Z | 2021-01-23T15:29:45.000Z | tools/telemetry/unittest_data/test_simple_two_page_set.py | shaochangbin/chromium-crosswalk | 634d34e4cf82b4f7400357c53ec12efaffe94add | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | tools/telemetry/unittest_data/test_simple_two_page_set.py | shaochangbin/chromium-crosswalk | 634d34e4cf82b4f7400357c53ec12efaffe94add | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1 | 2017-03-15T13:21:38.000Z | 2017-03-15T13:21:38.000Z | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page.page_set import PageSet
class TestSimpleTwoPageSet(PageSet):
def __init__(self):
super(TestSimpleTwoPageSet, self).__init__(
description='A pageset for testing purpose',
archive_data_file='data/test.json',
credentials_path='data/credential',
user_agent_type='desktop')
| 34.5 | 72 | 0.753623 |
794777f8ea85c1b5f482de2a2c7e1785b079c9fb | 275 | py | Python | blackbox_tests/test_keras_train/serve_model_from_h5.py | shfshf/seq2annotation | d4bf88a869631b43fa2974c2ffa1c5dd6a7623ed | [
"Apache-2.0"
] | 90 | 2018-11-29T07:05:16.000Z | 2021-11-22T11:32:58.000Z | blackbox_tests/test_keras_train/serve_model_from_h5.py | shfshf/seq2annotation | d4bf88a869631b43fa2974c2ffa1c5dd6a7623ed | [
"Apache-2.0"
] | 50 | 2019-06-27T07:11:18.000Z | 2022-02-10T00:01:02.000Z | blackbox_tests/test_keras_train/serve_model_from_h5.py | lanSeFangZhou/seq2annotation | a824520d46f0b3d70268fae422976a5ce1b3f4ce | [
"Apache-2.0"
] | 23 | 2019-01-03T14:57:15.000Z | 2022-03-08T07:50:33.000Z | from seq2annotation.server.tensorflow_keras_h5_inference import Inference
inference = Inference('./results/h5_model/model.h5', './results/h5_model/tag_lookup_table.json', './results/h5_model/vocabulary_lookup_table.json')
result = inference.infer("看一下上海的天气。")
print(result) | 45.833333 | 147 | 0.814545 |
794778467d721a6ae310eff4f32a64d1261f8570 | 3,444 | py | Python | sqlalchemy_media/tests/test_store_manager.py | jpmn/sqlalchemy-media | 7dee4aa70fc8979b6fbb39d04c27d897dd51ae2f | [
"MIT"
] | null | null | null | sqlalchemy_media/tests/test_store_manager.py | jpmn/sqlalchemy-media | 7dee4aa70fc8979b6fbb39d04c27d897dd51ae2f | [
"MIT"
] | 1 | 2020-08-07T14:40:38.000Z | 2020-08-07T14:42:28.000Z | sqlalchemy_media/tests/test_store_manager.py | jpmn/sqlalchemy-media | 7dee4aa70fc8979b6fbb39d04c27d897dd51ae2f | [
"MIT"
] | null | null | null | import unittest
import time
import threading
from sqlalchemy.orm.session import Session
from sqlalchemy_media.stores import StoreManager, Store
from sqlalchemy_media.exceptions import DefaultStoreError, ContextError
# noinspection PyAbstractClass
class DummyStore(Store):
pass
class StoreContextTestCase(unittest.TestCase):
def setUp(self):
StoreManager.register('dummy', DummyStore, default=True)
def test_default_store(self):
with StoreManager(Session) as manager1:
# Hacking StoreManager to raise error
StoreManager._default = None
self.assertRaises(DefaultStoreError, manager1.get)
# making it default again
StoreManager.make_default('dummy')
self.assertIsNotNone(manager1.get())
# unregister
StoreManager.unregister('dummy')
def test_unregister(self):
# unregister
self.assertRaises(KeyError, StoreManager.unregister, 'invalid_Id')
def test_context_stack(self):
self.assertRaises(ContextError, StoreManager.get_current_store_manager)
with StoreManager(Session) as manager1:
store1 = manager1.get()
self.assertIs(store1, manager1.default_store)
with StoreManager(Session) as manager2:
store2 = manager2.get()
self.assertIs(store2, manager2.default_store)
self.assertIsNot(manager1, manager2)
self.assertIsNot(store1, store2)
def test_multithread(self):
class ThreadStat(object):
store1 = None
store2 = None
wait = True
ready = False
class WorkerThread(threading.Thread):
def __init__(self, stat, test_case):
self.stat = stat
self.test_case = test_case
super().__init__(daemon=True)
def run(self):
with StoreManager(Session) as manager1:
store1 = manager1.get()
self.test_case.assertIs(store1, manager1.default_store)
self.stat.store1 = store1
with StoreManager(Session) as manager2:
store2 = manager2.get()
self.test_case.assertIs(store2, manager2.default_store)
self.stat.store2 = store2
self.stat.ready = True
while self.stat.wait:
time.sleep(.7)
thread1_stat = ThreadStat()
thread2_stat = ThreadStat()
thread1 = WorkerThread(thread1_stat, self)
thread2 = WorkerThread(thread2_stat, self)
thread1.start()
thread2.start()
while not (thread1_stat.ready and thread2_stat.ready): # pragma: no cover
time.sleep(.7)
self.assertIsNot(thread1_stat.store1, thread1_stat.store2)
self.assertIsNot(thread2_stat.store1, thread2_stat.store2)
self.assertIsNot(thread1_stat.store1, thread2_stat.store1)
self.assertIsNot(thread1_stat.store1, thread2_stat.store2)
self.assertIsNot(thread1_stat.store2, thread2_stat.store1)
self.assertIsNot(thread1_stat.store2, thread2_stat.store2)
thread1_stat.wait = False
thread2_stat.wait = False
thread1.join()
thread2.join()
if __name__ == '__main__': # pragma: no cover
unittest.main()
| 29.947826 | 82 | 0.619919 |
7947784f05faac3f1ab8a0b7c5373ab29a86af87 | 599 | py | Python | tests/core.py | Forbear/tagcounter | cb326e2ff83a5743388143189718ad718afbddb3 | [
"MIT"
] | null | null | null | tests/core.py | Forbear/tagcounter | cb326e2ff83a5743388143189718ad718afbddb3 | [
"MIT"
] | null | null | null | tests/core.py | Forbear/tagcounter | cb326e2ff83a5743388143189718ad718afbddb3 | [
"MIT"
] | null | null | null | from tagcounter import TagCounter
def test_counter_command():
c = TagCounter(['sc_list'])
assert c.arguments.command == 'sc_list'
def test_counter_command_args():
c = TagCounter(['synthetic', 'arg1', 'arg2', 'arg3', 'arg4'])
assert c.arguments.command_args == ['arg1', 'arg2', 'arg3', 'arg4']
def test_counter_website():
c = TagCounter(['synthetic', '-website', 'test.example'])
assert c.website == 'test.example'
def test_counter_url():
c = TagCounter(['synthetic', '-website', 'test.example', '-uri', '/test/'])
assert c.url == 'https://test.example/test/'
| 27.227273 | 79 | 0.649416 |
794778537168e35dc5d00c8417b8e9965d218c3a | 2,858 | py | Python | pylibsimba/wallet.py | SIMBAChain/PyLibSIMBA | 8ed76110abc0b49a77c5dcbf94c5087af251c249 | [
"MIT"
] | 6 | 2019-11-04T15:51:31.000Z | 2021-09-09T10:39:37.000Z | pylibsimba/wallet.py | SIMBAChain/PyLibSIMBA | 8ed76110abc0b49a77c5dcbf94c5087af251c249 | [
"MIT"
] | 11 | 2019-10-02T14:17:23.000Z | 2021-05-21T12:41:54.000Z | pylibsimba/wallet.py | SIMBAChain/PyLibSIMBA | 8ed76110abc0b49a77c5dcbf94c5087af251c249 | [
"MIT"
] | 2 | 2020-02-18T13:48:29.000Z | 2021-05-07T20:05:37.000Z |
from hdwallet import BIP44HDWallet
from hdwallet.cryptocurrencies import EthereumMainnet
from hdwallet.utils import generate_mnemonic
from web3.auto import w3
from pylibsimba.base.wallet_base import WalletBase
from pylibsimba.exceptions import WalletNotFoundException
class Wallet(WalletBase):
def unlock_wallet(self, passkey):
pass
def generate_wallet(self, mnemonic: str = None):
bip44_hdwallet: BIP44HDWallet = BIP44HDWallet(cryptocurrency=EthereumMainnet)
if not mnemonic:
mnemonic = generate_mnemonic(language="english", strength=128)
# Get Ethereum BIP44HDWallet from mnemonic
bip44_hdwallet.from_mnemonic(
mnemonic=mnemonic,
language="english",
# passphrase=PASSPHRASE
)
# Clean default BIP44 derivation indexes/paths
bip44_hdwallet.clean_derivation()
# print("Mnemonic:", bip44_hdwallet.mnemonic())
self.wallet = bip44_hdwallet
def generate_wallet_from_mnemonic(self, mnemonic: str = None):
self.generate_wallet(mnemonic)
def generate_wallet_from_private_key(self, private_key):
bip44_hdwallet: BIP44HDWallet = BIP44HDWallet(cryptocurrency=EthereumMainnet)
# Get Ethereum BIP44HDWallet from private_key
bip44_hdwallet.from_private_key(
private_key = private_key
)
# Clean default BIP44 derivation indexes/paths
bip44_hdwallet.clean_derivation()
self.wallet = bip44_hdwallet
def delete_wallet(self):
"""
Remove the current wallet. Not actually stored on disk, so just set to None
"""
self.wallet = None
def wallet_exists(self):
"""
Does a wallet currently exist?
"""
return self.wallet is not None
def sign(self, payload: dict):
"""
Sign the payload with the wallet
Args:
payload : an object
Returns:
Returns the signed transaction
"""
if not self.wallet_exists():
raise WalletNotFoundException("No wallet generated!")
transaction_template = {
'to': bytes.fromhex(payload['to'][2:]),
'value': 0,
'gas': payload['gas'],
'gasPrice': payload['gasPrice'],
'data': bytes.fromhex(payload['data'][2:]),
'nonce': payload['nonce'],
}
private_key = self.wallet.private_key()
signed = w3.eth.account.sign_transaction(transaction_template, private_key)
return signed.rawTransaction.hex()
def get_address(self):
"""
The address associated with this wallet
"""
if not self.wallet_exists():
raise WalletNotFoundException("No wallet generated!")
return self.wallet.address()
| 30.731183 | 85 | 0.635059 |
79477923990a6470ba09990f061aa430bd61d736 | 94,811 | py | Python | sympy/polys/tests/test_polytools.py | josephmisiti/sympy | 683ccf471a826a75235694ae1ba3dd935d5c8e12 | [
"BSD-3-Clause"
] | 1 | 2020-12-27T18:43:22.000Z | 2020-12-27T18:43:22.000Z | sympy/polys/tests/test_polytools.py | josephmisiti/sympy | 683ccf471a826a75235694ae1ba3dd935d5c8e12 | [
"BSD-3-Clause"
] | null | null | null | sympy/polys/tests/test_polytools.py | josephmisiti/sympy | 683ccf471a826a75235694ae1ba3dd935d5c8e12 | [
"BSD-3-Clause"
] | null | null | null | """Tests for user-friendly public interface to polynomial functions. """
from sympy.polys.polytools import (
Poly, PurePoly, poly,
parallel_poly_from_expr,
degree, degree_list,
LC, LM, LT,
pdiv, prem, pquo, pexquo,
div, rem, quo, exquo,
half_gcdex, gcdex, invert,
subresultants,
resultant, discriminant,
terms_gcd, cofactors,
gcd, gcd_list,
lcm, lcm_list,
trunc,
monic, content, primitive,
compose, decompose,
sturm,
gff_list, gff,
sqf_norm, sqf_part, sqf_list, sqf,
factor_list, factor,
intervals, refine_root, count_roots,
real_roots, nroots, ground_roots,
nth_power_roots_poly,
cancel, reduced, groebner,
GroebnerBasis, is_zero_dimensional, _keep_coeff)
from sympy.polys.polyerrors import (
MultivariatePolynomialError,
OperationNotSupported,
ExactQuotientFailed,
PolificationFailed,
ComputationFailed,
UnificationFailed,
RefinementFailed,
GeneratorsNeeded,
GeneratorsError,
PolynomialError,
CoercionFailed,
NotAlgebraic,
DomainError,
OptionError,
FlagError)
from sympy.polys.polyclasses import DMP, DMF
from sympy.polys.domains import FF, ZZ, QQ, RR, EX
from sympy.polys.monomialtools import lex, grlex, grevlex
from sympy import (
S, Integer, Rational, Float, Mul, Symbol, symbols, sqrt,
exp, sin, expand, oo, I, pi, re, im, RootOf, Eq, Tuple)
from sympy.core.compatibility import iterable
from sympy.utilities.pytest import raises, XFAIL
x,y,z,p,q,r,s,t,u,v,w,a,b,c,d,e = symbols('x,y,z,p,q,r,s,t,u,v,w,a,b,c,d,e')
def _epsilon_eq(a, b):
for x, y in zip(a, b):
if abs(x-y) > 1e-10:
return False
return True
def _strict_eq(a, b):
if type(a) == type(b):
if iterable(a):
if len(a) == len(b):
return all(_strict_eq(c, d) for c, d in zip(a, b))
else:
return False
else:
return isinstance(a, Poly) and a.eq(b, strict=True)
else:
return False
def test_Poly_from_dict():
K = FF(3)
assert Poly.from_dict({0: 1, 1: 2}, gens=x, domain=K).rep == DMP([K(2),K(1)], K)
assert Poly.from_dict({0: 1, 1: 5}, gens=x, domain=K).rep == DMP([K(2),K(1)], K)
assert Poly.from_dict({(0,): 1, (1,): 2}, gens=x, domain=K).rep == DMP([K(2),K(1)], K)
assert Poly.from_dict({(0,): 1, (1,): 5}, gens=x, domain=K).rep == DMP([K(2),K(1)], K)
assert Poly.from_dict({(0, 0): 1, (1, 1): 2}, gens=(x,y), domain=K).rep == DMP([[K(2),K(0)],[K(1)]], K)
assert Poly.from_dict({0: 1, 1: 2}, gens=x).rep == DMP([ZZ(2),ZZ(1)], ZZ)
assert Poly.from_dict({0: 1, 1: 2}, gens=x, field=True).rep == DMP([QQ(2),QQ(1)], QQ)
assert Poly.from_dict({0: 1, 1: 2}, gens=x, domain=ZZ).rep == DMP([ZZ(2),ZZ(1)], ZZ)
assert Poly.from_dict({0: 1, 1: 2}, gens=x, domain=QQ).rep == DMP([QQ(2),QQ(1)], QQ)
assert Poly.from_dict({(0,): 1, (1,): 2}, gens=x).rep == DMP([ZZ(2),ZZ(1)], ZZ)
assert Poly.from_dict({(0,): 1, (1,): 2}, gens=x, field=True).rep == DMP([QQ(2),QQ(1)], QQ)
assert Poly.from_dict({(0,): 1, (1,): 2}, gens=x, domain=ZZ).rep == DMP([ZZ(2),ZZ(1)], ZZ)
assert Poly.from_dict({(0,): 1, (1,): 2}, gens=x, domain=QQ).rep == DMP([QQ(2),QQ(1)], QQ)
def test_Poly_from_list():
K = FF(3)
assert Poly.from_list([2,1], gens=x, domain=K).rep == DMP([K(2),K(1)], K)
assert Poly.from_list([5,1], gens=x, domain=K).rep == DMP([K(2),K(1)], K)
assert Poly.from_list([2,1], gens=x).rep == DMP([ZZ(2),ZZ(1)], ZZ)
assert Poly.from_list([2,1], gens=x, field=True).rep == DMP([QQ(2),QQ(1)], QQ)
assert Poly.from_list([2,1], gens=x, domain=ZZ).rep == DMP([ZZ(2),ZZ(1)], ZZ)
assert Poly.from_list([2,1], gens=x, domain=QQ).rep == DMP([QQ(2),QQ(1)], QQ)
assert Poly.from_list([0, 1.0], gens=x).rep == DMP([RR(1.0)], RR)
assert Poly.from_list([1.0, 0], gens=x).rep == DMP([RR(1.0), RR(0.0)], RR)
raises(MultivariatePolynomialError, "Poly.from_list([[]], gens=(x,y))")
def test_Poly_from_poly():
f = Poly(x+7, x, domain=ZZ)
g = Poly(x+2, x, modulus=3)
h = Poly(x+y, x, y, domain=ZZ)
K = FF(3)
assert Poly.from_poly(f) == f
assert Poly.from_poly(f, domain=K).rep == DMP([K(1),K(1)], K)
assert Poly.from_poly(f, domain=ZZ).rep == DMP([1,7], ZZ)
assert Poly.from_poly(f, domain=QQ).rep == DMP([1,7], QQ)
assert Poly.from_poly(f, gens=x) == f
assert Poly.from_poly(f, gens=x, domain=K).rep == DMP([K(1),K(1)], K)
assert Poly.from_poly(f, gens=x, domain=ZZ).rep == DMP([1,7], ZZ)
assert Poly.from_poly(f, gens=x, domain=QQ).rep == DMP([1,7], QQ)
assert Poly.from_poly(f, gens=y) == Poly(x + 7, y, domain='ZZ[x]')
raises(CoercionFailed, "Poly.from_poly(f, gens=y, domain=K)")
raises(CoercionFailed, "Poly.from_poly(f, gens=y, domain=ZZ)")
raises(CoercionFailed, "Poly.from_poly(f, gens=y, domain=QQ)")
assert Poly.from_poly(f, gens=(x,y)) == Poly(x + 7, x, y, domain='ZZ')
assert Poly.from_poly(f, gens=(x,y), domain=ZZ) == Poly(x + 7, x, y, domain='ZZ')
assert Poly.from_poly(f, gens=(x,y), domain=QQ) == Poly(x + 7, x, y, domain='QQ')
assert Poly.from_poly(f, gens=(x,y), modulus=3) == Poly(x + 7, x, y, domain='FF(3)')
K = FF(2)
assert Poly.from_poly(g) == g
assert Poly.from_poly(g, domain=ZZ).rep == DMP([1,-1], ZZ)
raises(CoercionFailed, "Poly.from_poly(g, domain=QQ)")
assert Poly.from_poly(g, domain=K).rep == DMP([K(1),K(0)], K)
assert Poly.from_poly(g, gens=x) == g
assert Poly.from_poly(g, gens=x, domain=ZZ).rep == DMP([1,-1], ZZ)
raises(CoercionFailed, "Poly.from_poly(g, gens=x, domain=QQ)")
assert Poly.from_poly(g, gens=x, domain=K).rep == DMP([K(1),K(0)], K)
K = FF(3)
assert Poly.from_poly(h) == h
assert Poly.from_poly(h, domain=ZZ).rep == DMP([[ZZ(1)],[ZZ(1),ZZ(0)]], ZZ)
assert Poly.from_poly(h, domain=QQ).rep == DMP([[QQ(1)],[QQ(1),QQ(0)]], QQ)
assert Poly.from_poly(h, domain=K).rep == DMP([[K(1)],[K(1),K(0)]], K)
assert Poly.from_poly(h, gens=x) == Poly(x+y, x, domain=ZZ[y])
raises(CoercionFailed, "Poly.from_poly(h, gens=x, domain=ZZ)")
assert Poly.from_poly(h, gens=x, domain=ZZ[y]) == Poly(x+y, x, domain=ZZ[y])
raises(CoercionFailed, "Poly.from_poly(h, gens=x, domain=QQ)")
assert Poly.from_poly(h, gens=x, domain=QQ[y]) == Poly(x+y, x, domain=QQ[y])
raises(CoercionFailed, "Poly.from_poly(h, gens=x, modulus=3)")
assert Poly.from_poly(h, gens=y) == Poly(x+y, y, domain=ZZ[x])
raises(CoercionFailed, "Poly.from_poly(h, gens=y, domain=ZZ)")
assert Poly.from_poly(h, gens=y, domain=ZZ[x]) == Poly(x+y, y, domain=ZZ[x])
raises(CoercionFailed, "Poly.from_poly(h, gens=y, domain=QQ)")
assert Poly.from_poly(h, gens=y, domain=QQ[x]) == Poly(x+y, y, domain=QQ[x])
raises(CoercionFailed, "Poly.from_poly(h, gens=y, modulus=3)")
assert Poly.from_poly(h, gens=(x,y)) == h
assert Poly.from_poly(h, gens=(x,y), domain=ZZ).rep == DMP([[ZZ(1)],[ZZ(1),ZZ(0)]], ZZ)
assert Poly.from_poly(h, gens=(x,y), domain=QQ).rep == DMP([[QQ(1)],[QQ(1),QQ(0)]], QQ)
assert Poly.from_poly(h, gens=(x,y), domain=K).rep == DMP([[K(1)],[K(1),K(0)]], K)
assert Poly.from_poly(h, gens=(y,x)).rep == DMP([[ZZ(1)],[ZZ(1),ZZ(0)]], ZZ)
assert Poly.from_poly(h, gens=(y,x), domain=ZZ).rep == DMP([[ZZ(1)],[ZZ(1),ZZ(0)]], ZZ)
assert Poly.from_poly(h, gens=(y,x), domain=QQ).rep == DMP([[QQ(1)],[QQ(1),QQ(0)]], QQ)
assert Poly.from_poly(h, gens=(y,x), domain=K).rep == DMP([[K(1)],[K(1),K(0)]], K)
assert Poly.from_poly(h, gens=(x,y), field=True).rep == DMP([[QQ(1)],[QQ(1),QQ(0)]], QQ)
assert Poly.from_poly(h, gens=(x,y), field=True).rep == DMP([[QQ(1)],[QQ(1),QQ(0)]], QQ)
def test_Poly_from_expr():
raises(GeneratorsNeeded, "Poly.from_expr(S(0))")
raises(GeneratorsNeeded, "Poly.from_expr(S(7))")
K = FF(3)
assert Poly.from_expr(x + 5, domain=K).rep == DMP([K(1),K(2)], K)
assert Poly.from_expr(y + 5, domain=K).rep == DMP([K(1),K(2)], K)
assert Poly.from_expr(x + 5, x, domain=K).rep == DMP([K(1),K(2)], K)
assert Poly.from_expr(y + 5, y, domain=K).rep == DMP([K(1),K(2)], K)
assert Poly.from_expr(x + y, domain=K).rep == DMP([[K(1)],[K(1),K(0)]], K)
assert Poly.from_expr(x + y, x, y, domain=K).rep == DMP([[K(1)],[K(1),K(0)]], K)
assert Poly.from_expr(x + 5).rep == DMP([1,5], ZZ)
assert Poly.from_expr(y + 5).rep == DMP([1,5], ZZ)
assert Poly.from_expr(x + 5, x).rep == DMP([1,5], ZZ)
assert Poly.from_expr(y + 5, y).rep == DMP([1,5], ZZ)
assert Poly.from_expr(x + 5, domain=ZZ).rep == DMP([1,5], ZZ)
assert Poly.from_expr(y + 5, domain=ZZ).rep == DMP([1,5], ZZ)
assert Poly.from_expr(x + 5, x, domain=ZZ).rep == DMP([1,5], ZZ)
assert Poly.from_expr(y + 5, y, domain=ZZ).rep == DMP([1,5], ZZ)
assert Poly.from_expr(x + 5, x, y, domain=ZZ).rep == DMP([[1],[5]], ZZ)
assert Poly.from_expr(y + 5, x, y, domain=ZZ).rep == DMP([[1,5]], ZZ)
def test_Poly__new__():
raises(GeneratorsError, "Poly(x+1, x, x)")
raises(GeneratorsError, "Poly(x+y, x, y, domain=ZZ[x])")
raises(GeneratorsError, "Poly(x+y, x, y, domain=ZZ[y])")
raises(OptionError, "Poly(x, x, symmetric=True)")
raises(OptionError, "Poly(x+2, x, modulus=3, domain=QQ)")
raises(OptionError, "Poly(x+2, x, domain=ZZ, gaussian=True)")
raises(OptionError, "Poly(x+2, x, modulus=3, gaussian=True)")
raises(OptionError, "Poly(x+2, x, domain=ZZ, extension=[sqrt(3)])")
raises(OptionError, "Poly(x+2, x, modulus=3, extension=[sqrt(3)])")
raises(OptionError, "Poly(x+2, x, domain=ZZ, extension=True)")
raises(OptionError, "Poly(x+2, x, modulus=3, extension=True)")
raises(OptionError, "Poly(x+2, x, domain=ZZ, greedy=True)")
raises(OptionError, "Poly(x+2, x, domain=QQ, field=True)")
raises(OptionError, "Poly(x+2, x, domain=ZZ, greedy=False)")
raises(OptionError, "Poly(x+2, x, domain=QQ, field=False)")
raises(NotImplementedError, "Poly(x+1, x, modulus=3, order='grlex')")
raises(NotImplementedError, "Poly(x+1, x, order='grlex')")
raises(GeneratorsNeeded, "Poly({1: 2, 0: 1})")
raises(GeneratorsNeeded, "Poly([2, 1])")
raises(GeneratorsNeeded, "Poly((2, 1))")
raises(GeneratorsNeeded, "Poly(1)")
f = a*x**2 + b*x + c
assert Poly({2: a, 1: b, 0: c}, x) == f
assert Poly(iter([a, b, c]), x) == f
assert Poly([a, b, c], x) == f
assert Poly((a, b, c), x) == f
assert Poly(Poly(a*x + b*y, x, y), x) == Poly(a*x + b*y, x)
assert Poly(3*x**2 + 2*x + 1, domain='ZZ').all_coeffs() == [3, 2, 1]
assert Poly(3*x**2 + 2*x + 1, domain='QQ').all_coeffs() == [3, 2, 1]
assert Poly(3*x**2 + 2*x + 1, domain='RR').all_coeffs() == [3.0, 2.0, 1.0]
raises(CoercionFailed, "Poly(3*x**2/5 + 2*x/5 + 1, domain='ZZ')")
assert Poly(3*x**2/5 + 2*x/5 + 1, domain='QQ').all_coeffs() == [S(3)/5, S(2)/5, 1]
assert _epsilon_eq(Poly(3*x**2/5 + 2*x/5 + 1, domain='RR').all_coeffs(), [0.6, 0.4, 1.0])
assert Poly(3.0*x**2 + 2.0*x + 1, domain='ZZ').all_coeffs() == [3, 2, 1]
assert Poly(3.0*x**2 + 2.0*x + 1, domain='QQ').all_coeffs() == [3, 2, 1]
assert Poly(3.0*x**2 + 2.0*x + 1, domain='RR').all_coeffs() == [3.0, 2.0, 1.0]
raises(CoercionFailed, "Poly(3.1*x**2 + 2.1*x + 1, domain='ZZ')")
assert Poly(3.1*x**2 + 2.1*x + 1, domain='QQ').all_coeffs() == [S(31)/10, S(21)/10, 1]
assert Poly(3.1*x**2 + 2.1*x + 1, domain='RR').all_coeffs() == [3.1, 2.1, 1.0]
assert Poly({(2,1): 1, (1,2): 2, (1,1): 3}, x, y) == \
Poly(x**2*y + 2*x*y**2 + 3*x*y, x, y)
assert Poly(x**2 + 1, extension=I).get_domain() == QQ.algebraic_field(I)
f = 3*x**5 - x**4 + x**3 - x** 2 + 65538
assert Poly(f, x, modulus=65537, symmetric=True) == \
Poly(3*x**5 - x**4 + x**3 - x** 2 + 1, x, modulus=65537, symmetric=True)
assert Poly(f, x, modulus=65537, symmetric=False) == \
Poly(3*x**5 + 65536*x**4 + x**3 + 65536*x** 2 + 1, x, modulus=65537, symmetric=False)
assert Poly(x**2 + x + 1.0).get_domain() == RR
def test_Poly__args():
assert Poly(x**2 + 1).args == [x**2 + 1]
def test_Poly__gens():
assert Poly((x-p)*(x-q), x).gens == (x,)
assert Poly((x-p)*(x-q), p).gens == (p,)
assert Poly((x-p)*(x-q), q).gens == (q,)
assert Poly((x-p)*(x-q), x, p).gens == (x, p)
assert Poly((x-p)*(x-q), x, q).gens == (x, q)
assert Poly((x-p)*(x-q), x, p, q).gens == (x, p, q)
assert Poly((x-p)*(x-q), p, x, q).gens == (p, x, q)
assert Poly((x-p)*(x-q), p, q, x).gens == (p, q, x)
assert Poly((x-p)*(x-q)).gens == (x, p, q)
assert Poly((x-p)*(x-q), sort='x > p > q').gens == (x, p, q)
assert Poly((x-p)*(x-q), sort='p > x > q').gens == (p, x, q)
assert Poly((x-p)*(x-q), sort='p > q > x').gens == (p, q, x)
assert Poly((x-p)*(x-q), x, p, q, sort='p > q > x').gens == (x, p, q)
assert Poly((x-p)*(x-q), wrt='x').gens == (x, p, q)
assert Poly((x-p)*(x-q), wrt='p').gens == (p, x, q)
assert Poly((x-p)*(x-q), wrt='q').gens == (q, x, p)
assert Poly((x-p)*(x-q), wrt=x).gens == (x, p, q)
assert Poly((x-p)*(x-q), wrt=p).gens == (p, x, q)
assert Poly((x-p)*(x-q), wrt=q).gens == (q, x, p)
assert Poly((x-p)*(x-q), x, p, q, wrt='p').gens == (x, p, q)
assert Poly((x-p)*(x-q), wrt='p', sort='q > x').gens == (p, q, x)
assert Poly((x-p)*(x-q), wrt='q', sort='p > x').gens == (q, p, x)
def test_Poly_zero():
assert Poly(x).zero == Poly(0, x, domain=ZZ)
assert Poly(x/2).zero == Poly(0, x, domain=QQ)
def test_Poly_one():
assert Poly(x).one == Poly(1, x, domain=ZZ)
assert Poly(x/2).one == Poly(1, x, domain=QQ)
def test_Poly__unify():
raises(UnificationFailed, "Poly(x)._unify(y)")
K = FF(3)
raises(UnificationFailed, "Poly(x, x, modulus=3)._unify(Poly(x, x, modulus=5))")
assert Poly(x, x, modulus=3)._unify(Poly(y, y, modulus=3))[2:] == (DMP([[K(1)],[]], K), DMP([[K(1),K(0)]], K))
assert Poly(y, x, y)._unify(Poly(x, x, modulus=3))[2:] == (DMP([[K(1),K(0)]], K), DMP([[K(1)],[]], K))
assert Poly(x, x, modulus=3)._unify(Poly(y, x, y))[2:] == (DMP([[K(1)],[]], K), DMP([[K(1),K(0)]], K))
assert Poly(x+1, x)._unify(Poly(x+2, x))[2:] == (DMP([1, 1], ZZ), DMP([1, 2], ZZ))
assert Poly(x+1, x, domain='QQ')._unify(Poly(x+2, x))[2:] == (DMP([1, 1], QQ), DMP([1, 2], QQ))
assert Poly(x+1, x)._unify(Poly(x+2, x, domain='QQ'))[2:] == (DMP([1, 1], QQ), DMP([1, 2], QQ))
assert Poly(x+1, x)._unify(Poly(x+2, x, y))[2:] == (DMP([[1], [1]], ZZ), DMP([[1], [2]], ZZ))
assert Poly(x+1, x, domain='QQ')._unify(Poly(x+2, x, y))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x+1, x)._unify(Poly(x+2, x, y, domain='QQ'))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x+1, x, y)._unify(Poly(x+2, x))[2:] == (DMP([[1], [1]], ZZ), DMP([[1], [2]], ZZ))
assert Poly(x+1, x, y, domain='QQ')._unify(Poly(x+2, x))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x+1, x, y)._unify(Poly(x+2, x, domain='QQ'))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x+1, x, y)._unify(Poly(x+2, x, y))[2:] == (DMP([[1], [1]], ZZ), DMP([[1], [2]], ZZ))
assert Poly(x+1, x, y, domain='QQ')._unify(Poly(x+2, x, y))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x+1, x, y)._unify(Poly(x+2, x, y, domain='QQ'))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x+1, x)._unify(Poly(x+2, y, x))[2:] == (DMP([[1, 1]], ZZ), DMP([[1, 2]], ZZ))
assert Poly(x+1, x, domain='QQ')._unify(Poly(x+2, y, x))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ))
assert Poly(x+1, x)._unify(Poly(x+2, y, x, domain='QQ'))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ))
assert Poly(x+1, y, x)._unify(Poly(x+2, x))[2:] == (DMP([[1, 1]], ZZ), DMP([[1, 2]], ZZ))
assert Poly(x+1, y, x, domain='QQ')._unify(Poly(x+2, x))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ))
assert Poly(x+1, y, x)._unify(Poly(x+2, x, domain='QQ'))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ))
assert Poly(x+1, x, y)._unify(Poly(x+2, y, x))[2:] == (DMP([[1], [1]], ZZ), DMP([[1], [2]], ZZ))
assert Poly(x+1, x, y, domain='QQ')._unify(Poly(x+2, y, x))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x+1, x, y)._unify(Poly(x+2, y, x, domain='QQ'))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x+1, y, x)._unify(Poly(x+2, x, y))[2:] == (DMP([[1, 1]], ZZ), DMP([[1, 2]], ZZ))
assert Poly(x+1, y, x, domain='QQ')._unify(Poly(x+2, x, y))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ))
assert Poly(x+1, y, x)._unify(Poly(x+2, x, y, domain='QQ'))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ))
assert Poly(a*x, x, domain='ZZ[a]')._unify(Poly(a*b*x, x, domain='ZZ(a,b)'))[2:] == \
(DMP([DMF(([[1], []], [[1]]), ZZ), DMF(([[]], [[1]]), ZZ)], ZZ.frac_field(a,b)),
DMP([DMF(([[1, 0], []], [[1]]), ZZ), DMF(([[]], [[1]]), ZZ)], ZZ.frac_field(a,b)))
assert Poly(a*x, x, domain='ZZ(a)')._unify(Poly(a*b*x, x, domain='ZZ(a,b)'))[2:] == \
(DMP([DMF(([[1], []], [[1]]), ZZ), DMF(([[]], [[1]]), ZZ)], ZZ.frac_field(a,b)),
DMP([DMF(([[1, 0], []], [[1]]), ZZ), DMF(([[]], [[1]]), ZZ)], ZZ.frac_field(a,b)))
raises(CoercionFailed, "Poly(Poly(x**2 + x**2*z, y, field=True), domain='ZZ(x)')")
def test_Poly_free_symbols():
assert Poly(x**2 + 1).free_symbols == set([x])
assert Poly(x**2 + y*z).free_symbols == set([x, y, z])
assert Poly(x**2 + y*z, x).free_symbols == set([x, y, z])
assert Poly(x**2 + sin(y*z)).free_symbols == set([x, y, z])
assert Poly(x**2 + sin(y*z), x).free_symbols == set([x, y, z])
assert Poly(x**2 + sin(y*z), x, domain=EX).free_symbols == set([x, y, z])
def test_PurePoly_free_symbols():
assert PurePoly(x**2 + 1).free_symbols == set([])
assert PurePoly(x**2 + y*z).free_symbols == set([])
assert PurePoly(x**2 + y*z, x).free_symbols == set([y, z])
assert PurePoly(x**2 + sin(y*z)).free_symbols == set([])
assert PurePoly(x**2 + sin(y*z), x).free_symbols == set([y, z])
assert PurePoly(x**2 + sin(y*z), x, domain=EX).free_symbols == set([y, z])
def test_Poly__eq__():
assert (Poly(x, x) == Poly(x, x)) == True
assert (Poly(x, x, domain=QQ) == Poly(x, x)) == True
assert (Poly(x, x) == Poly(x, x, domain=QQ)) == True
assert (Poly(x, x, domain=ZZ[a]) == Poly(x, x)) == True
assert (Poly(x, x) == Poly(x, x, domain=ZZ[a])) == True
assert (Poly(x*y, x, y) == Poly(x, x)) == False
assert (Poly(x, x, y) == Poly(x, x)) == False
assert (Poly(x, x) == Poly(x, x, y)) == False
assert (Poly(x**2 + 1, x) == Poly(y**2 + 1, y)) == False
assert (Poly(y**2 + 1, y) == Poly(x**2 + 1, x)) == False
f = Poly(x, x, domain=ZZ)
g = Poly(x, x, domain=QQ)
assert f.eq(g) == True
assert f.ne(g) == False
assert f.eq(g, strict=True) == False
assert f.ne(g, strict=True) == True
def test_PurePoly__eq__():
assert (PurePoly(x, x) == PurePoly(x, x)) == True
assert (PurePoly(x, x, domain=QQ) == PurePoly(x, x)) == True
assert (PurePoly(x, x) == PurePoly(x, x, domain=QQ)) == True
assert (PurePoly(x, x, domain=ZZ[a]) == PurePoly(x, x)) == True
assert (PurePoly(x, x) == PurePoly(x, x, domain=ZZ[a])) == True
assert (PurePoly(x*y, x, y) == PurePoly(x, x)) == False
assert (PurePoly(x, x, y) == PurePoly(x, x)) == False
assert (PurePoly(x, x) == PurePoly(x, x, y)) == False
assert (PurePoly(x**2 + 1, x) == PurePoly(y**2 + 1, y)) == True
assert (PurePoly(y**2 + 1, y) == PurePoly(x**2 + 1, x)) == True
f = PurePoly(x, x, domain=ZZ)
g = PurePoly(x, x, domain=QQ)
assert f.eq(g) == True
assert f.ne(g) == False
assert f.eq(g, strict=True) == False
assert f.ne(g, strict=True) == True
f = PurePoly(x, x, domain=ZZ)
g = PurePoly(y, y, domain=QQ)
assert f.eq(g) == True
assert f.ne(g) == False
assert f.eq(g, strict=True) == False
assert f.ne(g, strict=True) == True
def test_PurePoly_Poly():
assert isinstance(PurePoly(Poly(x**2 + 1)), PurePoly) == True
assert isinstance(Poly(PurePoly(x**2 + 1)), Poly) == True
def test_Poly_get_domain():
assert Poly(2*x).get_domain() == ZZ
assert Poly(2*x, domain='ZZ').get_domain() == ZZ
assert Poly(2*x, domain='QQ').get_domain() == QQ
assert Poly(x/2).get_domain() == QQ
raises(CoercionFailed, "Poly(x/2, domain='ZZ')")
assert Poly(x/2, domain='QQ').get_domain() == QQ
assert Poly(0.2*x).get_domain() == RR
def test_Poly_set_domain():
assert Poly(2*x + 1).set_domain(ZZ) == Poly(2*x + 1)
assert Poly(2*x + 1).set_domain('ZZ') == Poly(2*x + 1)
assert Poly(2*x + 1).set_domain(QQ) == Poly(2*x + 1, domain='QQ')
assert Poly(2*x + 1).set_domain('QQ') == Poly(2*x + 1, domain='QQ')
assert Poly(S(2)/10*x + S(1)/10).set_domain('RR') == Poly(0.2*x + 0.1)
assert Poly(0.2*x + 0.1).set_domain('QQ') == Poly(S(2)/10*x + S(1)/10)
raises(CoercionFailed, "Poly(x/2 + 1).set_domain(ZZ)")
raises(CoercionFailed, "Poly(x + 1, modulus=2).set_domain(QQ)")
raises(GeneratorsError, "Poly(x*y, x, y).set_domain(ZZ[y])")
def test_Poly_get_modulus():
assert Poly(x**2 + 1, modulus=2).get_modulus() == 2
raises(PolynomialError, "Poly(x**2 + 1).get_modulus()")
def test_Poly_set_modulus():
assert Poly(x**2 + 1, modulus=2).set_modulus(7) == Poly(x**2 + 1, modulus=7)
assert Poly(x**2 + 5, modulus=7).set_modulus(2) == Poly(x**2 + 1, modulus=2)
assert Poly(x**2 + 1).set_modulus(2) == Poly(x**2 + 1, modulus=2)
raises(CoercionFailed, "Poly(x/2 + 1).set_modulus(2)")
def test_Poly_add_ground():
assert Poly(x + 1).add_ground(2) == Poly(x + 3)
def test_Poly_sub_ground():
assert Poly(x + 1).sub_ground(2) == Poly(x - 1)
def test_Poly_mul_ground():
assert Poly(x + 1).mul_ground(2) == Poly(2*x + 2)
def test_Poly_quo_ground():
assert Poly(2*x + 4).quo_ground(2) == Poly(x + 2)
assert Poly(2*x + 3).quo_ground(2) == Poly(x + 1)
def test_Poly_exquo_ground():
assert Poly(2*x + 4).exquo_ground(2) == Poly(x + 2)
raises(ExactQuotientFailed, "Poly(2*x + 3).exquo_ground(2)")
def test_Poly_abs():
assert Poly(-x+1, x).abs() == abs(Poly(-x+1, x)) == Poly(x+1, x)
def test_Poly_neg():
assert Poly(-x+1, x).neg() == -Poly(-x+1, x) == Poly(x-1, x)
def test_Poly_add():
assert Poly(0, x).add(Poly(0, x)) == Poly(0, x)
assert Poly(0, x) + Poly(0, x) == Poly(0, x)
assert Poly(1, x).add(Poly(0, x)) == Poly(1, x)
assert Poly(1, x, y) + Poly(0, x) == Poly(1, x, y)
assert Poly(0, x).add(Poly(1, x, y)) == Poly(1, x, y)
assert Poly(0, x, y) + Poly(1, x, y) == Poly(1, x, y)
assert Poly(1, x) + x == Poly(x+1, x)
assert Poly(1, x) + sin(x) == 1+sin(x)
assert Poly(x, x) + 1 == Poly(x+1, x)
assert 1 + Poly(x, x) == Poly(x+1, x)
def test_Poly_sub():
assert Poly(0, x).sub(Poly(0, x)) == Poly(0, x)
assert Poly(0, x) - Poly(0, x) == Poly(0, x)
assert Poly(1, x).sub(Poly(0, x)) == Poly(1, x)
assert Poly(1, x, y) - Poly(0, x) == Poly(1, x, y)
assert Poly(0, x).sub(Poly(1, x, y)) == Poly(-1, x, y)
assert Poly(0, x, y) - Poly(1, x, y) == Poly(-1, x, y)
assert Poly(1, x) - x == Poly(1-x, x)
assert Poly(1, x) - sin(x) == 1-sin(x)
assert Poly(x, x) - 1 == Poly(x-1, x)
assert 1 - Poly(x, x) == Poly(1-x, x)
def test_Poly_mul():
assert Poly(0, x).mul(Poly(0, x)) == Poly(0, x)
assert Poly(0, x) * Poly(0, x) == Poly(0, x)
assert Poly(2, x).mul(Poly(4, x)) == Poly(8, x)
assert Poly(2, x, y) * Poly(4, x) == Poly(8, x, y)
assert Poly(4, x).mul(Poly(2, x, y)) == Poly(8, x, y)
assert Poly(4, x, y) * Poly(2, x, y) == Poly(8, x, y)
assert Poly(1, x) * x == Poly(x, x)
assert Poly(1, x) * sin(x) == sin(x)
assert Poly(x, x) * 2 == Poly(2*x, x)
assert 2 * Poly(x, x) == Poly(2*x, x)
def test_Poly_sqr():
assert Poly(x*y, x, y).sqr() == Poly(x**2*y**2, x, y)
def test_Poly_pow():
assert Poly(x, x).pow(10) == Poly(x**10, x)
assert Poly(x, x).pow(Integer(10)) == Poly(x**10, x)
assert Poly(2*y, x, y).pow(4) == Poly(16*y**4, x, y)
assert Poly(2*y, x, y).pow(Integer(4)) == Poly(16*y**4, x, y)
assert Poly(7*x*y, x, y)**3 == Poly(343*x**3*y**3, x, y)
assert Poly(x*y+1, x, y)**(-1) == (x*y+1)**(-1)
assert Poly(x*y+1, x, y)**x == (x*y+1)**x
def test_Poly_divmod():
f, g = Poly(x**2), Poly(x)
q, r = g, Poly(0, x)
assert divmod(f, g) == (q, r)
assert f // g == q
assert f % g == r
assert divmod(f, x) == (q, r)
assert f // x == q
assert f % x == r
q, r = Poly(0, x), Poly(2, x)
assert divmod(2, g) == (q, r)
assert 2 // g == q
assert 2 % g == r
assert Poly(x)/Poly(x) == 1
assert Poly(x**2)/Poly(x) == x
assert Poly(x)/Poly(x**2) == 1/x
def test_Poly_eq_ne():
assert (Poly(x+y, x, y) == Poly(x+y, x, y)) == True
assert (Poly(x+y, x) == Poly(x+y, x, y)) == False
assert (Poly(x+y, x, y) == Poly(x+y, x)) == False
assert (Poly(x+y, x) == Poly(x+y, x)) == True
assert (Poly(x+y, y) == Poly(x+y, y)) == True
assert (Poly(x+y, x, y) == x+y) == True
assert (Poly(x+y, x) == x+y) == True
assert (Poly(x+y, x, y) == x+y) == True
assert (Poly(x+y, x) == x+y) == True
assert (Poly(x+y, y) == x+y) == True
assert (Poly(x+y, x, y) != Poly(x+y, x, y)) == False
assert (Poly(x+y, x) != Poly(x+y, x, y)) == True
assert (Poly(x+y, x, y) != Poly(x+y, x)) == True
assert (Poly(x+y, x) != Poly(x+y, x)) == False
assert (Poly(x+y, y) != Poly(x+y, y)) == False
assert (Poly(x+y, x, y) != x+y) == False
assert (Poly(x+y, x) != x+y) == False
assert (Poly(x+y, x, y) != x+y) == False
assert (Poly(x+y, x) != x+y) == False
assert (Poly(x+y, y) != x+y) == False
assert (Poly(x, x) == sin(x)) == False
assert (Poly(x, x) != sin(x)) == True
def test_Poly_nonzero():
assert not bool(Poly(0, x)) == True
assert not bool(Poly(1, x)) == False
def test_Poly_properties():
assert Poly(0, x).is_zero == True
assert Poly(1, x).is_zero == False
assert Poly(1, x).is_one == True
assert Poly(2, x).is_one == False
assert Poly(x-1, x).is_sqf == True
assert Poly((x-1)**2, x).is_sqf == False
assert Poly(x-1, x).is_monic == True
assert Poly(2*x-1, x).is_monic == False
assert Poly(3*x+2, x).is_primitive == True
assert Poly(4*x+2, x).is_primitive == False
assert Poly(1, x).is_ground == True
assert Poly(x, x).is_ground == False
assert Poly(x+y+z+1).is_linear == True
assert Poly(x*y*z+1).is_linear == False
assert Poly(x*y+z+1).is_quadratic == True
assert Poly(x*y*z+1).is_quadratic == False
assert Poly(x*y).is_monomial == True
assert Poly(x*y+1).is_monomial == False
assert Poly(x**2 + x*y).is_homogeneous == True
assert Poly(x**3 + x*y).is_homogeneous == False
assert Poly(x).is_univariate == True
assert Poly(x*y).is_univariate == False
assert Poly(x*y).is_multivariate == True
assert Poly(x).is_multivariate == False
assert Poly(x**16 + x**14 - x**10 + x**8 - x**6 + x**2 + 1).is_cyclotomic == False
assert Poly(x**16 + x**14 - x**10 - x**8 - x**6 + x**2 + 1).is_cyclotomic == True
def test_Poly_is_irreducible():
assert Poly(x**2 + x + 1).is_irreducible == True
assert Poly(x**2 + 2*x + 1).is_irreducible == False
assert Poly(7*x + 3, modulus=11).is_irreducible == True
assert Poly(7*x**2 + 3*x + 1, modulus=11).is_irreducible == False
def test_Poly_subs():
assert Poly(x + 1).subs(x, 0) == 1
assert Poly(x + 1).subs(x, x) == Poly(x + 1)
assert Poly(x + 1).subs(x, y) == Poly(y + 1)
assert Poly(x*y, x).subs(y, x) == x**2
assert Poly(x*y, x).subs(x, y) == y**2
def test_Poly_replace():
assert Poly(x + 1).replace(x) == Poly(x + 1)
assert Poly(x + 1).replace(y) == Poly(y + 1)
raises(PolynomialError, "Poly(x + y).replace(z)")
assert Poly(x + 1).replace(x, x) == Poly(x + 1)
assert Poly(x + 1).replace(x, y) == Poly(y + 1)
assert Poly(x + y).replace(x, x) == Poly(x + y)
assert Poly(x + y).replace(x, z) == Poly(z + y, z, y)
assert Poly(x + y).replace(y, y) == Poly(x + y)
assert Poly(x + y).replace(y, z) == Poly(x + z, x, z)
raises(PolynomialError, "Poly(x + y).replace(x, y)")
raises(PolynomialError, "Poly(x + y).replace(z, t)")
assert Poly(x + y, x).replace(x, z) == Poly(z + y, z)
assert Poly(x + y, y).replace(y, z) == Poly(x + z, z)
raises(PolynomialError, "Poly(x + y, x).replace(x, y)")
raises(PolynomialError, "Poly(x + y, y).replace(y, x)")
def test_Poly_reorder():
raises(PolynomialError, "Poly(x+y).reorder(x, z)")
assert Poly(x + y, x, y).reorder(x, y) == Poly(x + y, x, y)
assert Poly(x + y, x, y).reorder(y, x) == Poly(x + y, y, x)
assert Poly(x + y, y, x).reorder(x, y) == Poly(x + y, x, y)
assert Poly(x + y, y, x).reorder(y, x) == Poly(x + y, y, x)
assert Poly(x + y, x, y).reorder(wrt=x) == Poly(x + y, x, y)
assert Poly(x + y, x, y).reorder(wrt=y) == Poly(x + y, y, x)
def test_Poly_ltrim():
f = Poly(y**2 + y*z**2, x, y, z).ltrim(y)
assert f.as_expr() == y**2 + y*z**2 and f.gens == (y, z)
raises(PolynomialError, "Poly(x*y**2 + y**2, x, y).ltrim(y)")
def test_Poly_has_only_gens():
assert Poly(x*y + 1, x, y, z).has_only_gens(x, y) == True
assert Poly(x*y + z, x, y, z).has_only_gens(x, y) == False
raises(GeneratorsError, "Poly(x*y**2 + y**2, x, y).has_only_gens(t)")
def test_Poly_to_ring():
assert Poly(2*x+1, domain='ZZ').to_ring() == Poly(2*x+1, domain='ZZ')
assert Poly(2*x+1, domain='QQ').to_ring() == Poly(2*x+1, domain='ZZ')
raises(CoercionFailed, "Poly(x/2+1).to_ring()")
raises(DomainError, "Poly(2*x+1, modulus=3).to_ring()")
def test_Poly_to_field():
assert Poly(2*x+1, domain='ZZ').to_field() == Poly(2*x+1, domain='QQ')
assert Poly(2*x+1, domain='QQ').to_field() == Poly(2*x+1, domain='QQ')
assert Poly(x/2+1, domain='QQ').to_field() == Poly(x/2+1, domain='QQ')
assert Poly(2*x+1, modulus=3).to_field() == Poly(2*x+1, modulus=3)
raises(DomainError, "Poly(2.0*x + 1.0).to_field()")
def test_Poly_to_exact():
assert Poly(2*x).to_exact() == Poly(2*x)
assert Poly(x/2).to_exact() == Poly(x/2)
assert Poly(0.1*x).to_exact() == Poly(x/10)
def test_Poly_retract():
f = Poly(x**2 + 1, x, domain=QQ[y])
assert f.retract() == Poly(x**2 + 1, x, domain='ZZ')
assert f.retract(field=True) == Poly(x**2 + 1, x, domain='QQ')
assert Poly(0, x, y).retract() == Poly(0, x, y)
def test_Poly_slice():
f = Poly(x**3 + 2*x**2 + 3*x + 4)
assert f.slice(0, 0) == Poly(0, x)
assert f.slice(0, 1) == Poly(4, x)
assert f.slice(0, 2) == Poly(3*x + 4, x)
assert f.slice(0, 3) == Poly(2*x**2 + 3*x + 4, x)
assert f.slice(0, 4) == Poly(x**3 + 2*x**2 + 3*x + 4, x)
assert f.slice(x, 0, 0) == Poly(0, x)
assert f.slice(x, 0, 1) == Poly(4, x)
assert f.slice(x, 0, 2) == Poly(3*x + 4, x)
assert f.slice(x, 0, 3) == Poly(2*x**2 + 3*x + 4, x)
assert f.slice(x, 0, 4) == Poly(x**3 + 2*x**2 + 3*x + 4, x)
def test_Poly_coeffs():
assert Poly(0, x).coeffs() == [0]
assert Poly(1, x).coeffs() == [1]
assert Poly(2*x+1, x).coeffs() == [2,1]
assert Poly(7*x**2+2*x+1, x).coeffs() == [7,2,1]
assert Poly(7*x**4+2*x+1, x).coeffs() == [7,2,1]
assert Poly(x*y**7 + 2*x**2*y**3).coeffs('lex') == [2, 1]
assert Poly(x*y**7 + 2*x**2*y**3).coeffs('grlex') == [1, 2]
def test_Poly_monoms():
assert Poly(0, x).monoms() == [(0,)]
assert Poly(1, x).monoms() == [(0,)]
assert Poly(2*x+1, x).monoms() == [(1,),(0,)]
assert Poly(7*x**2+2*x+1, x).monoms() == [(2,),(1,),(0,)]
assert Poly(7*x**4+2*x+1, x).monoms() == [(4,),(1,),(0,)]
assert Poly(x*y**7 + 2*x**2*y**3).monoms('lex') == [(2, 3), (1, 7)]
assert Poly(x*y**7 + 2*x**2*y**3).monoms('grlex') == [(1, 7), (2, 3)]
def test_Poly_terms():
assert Poly(0, x).terms() == [((0,), 0)]
assert Poly(1, x).terms() == [((0,), 1)]
assert Poly(2*x+1, x).terms() == [((1,), 2),((0,), 1)]
assert Poly(7*x**2+2*x+1, x).terms() == [((2,), 7),((1,), 2),((0,), 1)]
assert Poly(7*x**4+2*x+1, x).terms() == [((4,), 7),((1,), 2),((0,), 1)]
assert Poly(x*y**7 + 2*x**2*y**3).terms('lex') == [((2, 3), 2), ((1, 7), 1)]
assert Poly(x*y**7 + 2*x**2*y**3).terms('grlex') == [((1, 7), 1), ((2, 3), 2)]
def test_Poly_all_coeffs():
assert Poly(0, x).all_coeffs() == [0]
assert Poly(1, x).all_coeffs() == [1]
assert Poly(2*x+1, x).all_coeffs() == [2,1]
assert Poly(7*x**2+2*x+1, x).all_coeffs() == [7,2,1]
assert Poly(7*x**4+2*x+1, x).all_coeffs() == [7,0,0,2,1]
def test_Poly_all_monoms():
assert Poly(0, x).all_monoms() == [(0,)]
assert Poly(1, x).all_monoms() == [(0,)]
assert Poly(2*x+1, x).all_monoms() == [(1,),(0,)]
assert Poly(7*x**2+2*x+1, x).all_monoms() == [(2,),(1,),(0,)]
assert Poly(7*x**4+2*x+1, x).all_monoms() == [(4,),(3,),(2,),(1,),(0,)]
def test_Poly_all_terms():
assert Poly(0, x).all_terms() == [((0,), 0)]
assert Poly(1, x).all_terms() == [((0,), 1)]
assert Poly(2*x+1, x).all_terms() == [((1,), 2),((0,), 1)]
assert Poly(7*x**2+2*x+1, x).all_terms() == [((2,), 7),((1,), 2),((0,), 1)]
assert Poly(7*x**4+2*x+1, x).all_terms() == [((4,), 7),((3,),0),((2,),0),((1,), 2),((0,), 1)]
def test_Poly_termwise():
f = Poly(x**2 + 20*x + 400)
g = Poly(x**2 + 2*x + 4)
def func(monom, coeff):
(k,) = monom
return coeff//10**(2-k)
assert f.termwise(func) == g
def func(monom, coeff):
(k,) = monom
return (k,), coeff//10**(2-k)
assert f.termwise(func) == g
def test_Poly_length():
assert Poly(0, x).length() == 0
assert Poly(1, x).length() == 1
assert Poly(x, x).length() == 1
assert Poly(x+1, x).length() == 2
assert Poly(x**2+1, x).length() == 2
assert Poly(x**2+x+1, x).length() == 3
def test_Poly_as_dict():
assert Poly(0, x).as_dict() == {}
assert Poly(0, x, y, z).as_dict() == {}
assert Poly(1, x).as_dict() == {(0,): 1}
assert Poly(1, x, y, z).as_dict() == {(0,0,0): 1}
assert Poly(x**2+3, x).as_dict() == {(2,): 1, (0,): 3}
assert Poly(x**2+3, x, y, z).as_dict() == {(2,0,0): 1, (0,0,0): 3}
assert Poly(3*x**2*y*z**3+4*x*y+5*x*z).as_dict() == {(2,1,3): 3, (1,1,0): 4, (1,0,1): 5}
def test_Poly_as_expr():
assert Poly(0, x).as_expr() == 0
assert Poly(0, x, y, z).as_expr() == 0
assert Poly(1, x).as_expr() == 1
assert Poly(1, x, y, z).as_expr() == 1
assert Poly(x**2+3, x).as_expr() == x**2 + 3
assert Poly(x**2+3, x, y, z).as_expr() == x**2 + 3
assert Poly(3*x**2*y*z**3+4*x*y+5*x*z).as_expr() == 3*x**2*y*z**3 + 4*x*y + 5*x*z
f = Poly(x**2 + 2*x*y**2 - y, x, y)
assert f.as_expr() == -y + x**2 + 2*x*y**2
assert f.as_expr({x: 5}) == 25 - y + 10*y**2
assert f.as_expr({y: 6}) == -6 + 72*x + x**2
assert f.as_expr({x: 5, y: 6}) == 379
assert f.as_expr(5, 6) == 379
raises(GeneratorsError, "f.as_expr({z: 7})")
def test_Poly_lift():
assert Poly(x**4 - I*x + 17*I, x, gaussian=True).lift() == \
Poly(x**16 + 2*x**10 + 578*x**8 + x**4 - 578*x**2 + 83521, x, domain='QQ')
def test_Poly_deflate():
assert Poly(0, x).deflate() == ((1,), Poly(0, x))
assert Poly(1, x).deflate() == ((1,), Poly(1, x))
assert Poly(x, x).deflate() == ((1,), Poly(x, x))
assert Poly(x**2, x).deflate() == ((2,), Poly(x, x))
assert Poly(x**17, x).deflate() == ((17,), Poly(x, x))
assert Poly(x**2*y*z**11+x**4*z**11).deflate() == ((2,1,11), Poly(x*y*z+x**2*z))
def test_Poly_inject():
f = Poly(x**2*y + x*y**3 + x*y + 1, x)
assert f.inject() == Poly(x**2*y + x*y**3 + x*y + 1, x, y)
assert f.inject(front=True) == Poly(y**3*x + y*x**2 + y*x + 1, y, x)
def test_Poly_eject():
f = Poly(x**2*y + x*y**3 + x*y + 1, x, y)
assert f.eject(x) == Poly(x*y**3 + (x**2 + x)*y + 1, y, domain='ZZ[x]')
assert f.eject(y) == Poly(y*x**2 + (y**3 + y)*x + 1, x, domain='ZZ[y]')
raises(DomainError, "Poly(x*y, x, y, domain=ZZ[z]).eject(y)")
raises(NotImplementedError, "Poly(x*y, x, y, z).eject(y)")
def test_Poly_exclude():
assert Poly(x, x, y).exclude() == Poly(x, x)
assert Poly(x*y, x, y).exclude() == Poly(x*y, x, y)
assert Poly(1, x, y).exclude() == Poly(1, x, y)
def test_Poly__gen_to_level():
assert Poly(1, x, y)._gen_to_level(-2) == 0
assert Poly(1, x, y)._gen_to_level(-1) == 1
assert Poly(1, x, y)._gen_to_level( 0) == 0
assert Poly(1, x, y)._gen_to_level( 1) == 1
raises(PolynomialError, "Poly(1, x, y)._gen_to_level(-3)")
raises(PolynomialError, "Poly(1, x, y)._gen_to_level( 2)")
assert Poly(1, x, y)._gen_to_level(x) == 0
assert Poly(1, x, y)._gen_to_level(y) == 1
assert Poly(1, x, y)._gen_to_level('x') == 0
assert Poly(1, x, y)._gen_to_level('y') == 1
raises(PolynomialError, "Poly(1, x, y)._gen_to_level(z)")
raises(PolynomialError, "Poly(1, x, y)._gen_to_level('z')")
def test_Poly_degree():
assert Poly(0, x).degree() ==-1
assert Poly(1, x).degree() == 0
assert Poly(x, x).degree() == 1
assert Poly(0, x).degree(gen=0) ==-1
assert Poly(1, x).degree(gen=0) == 0
assert Poly(x, x).degree(gen=0) == 1
assert Poly(0, x).degree(gen=x) ==-1
assert Poly(1, x).degree(gen=x) == 0
assert Poly(x, x).degree(gen=x) == 1
assert Poly(0, x).degree(gen='x') ==-1
assert Poly(1, x).degree(gen='x') == 0
assert Poly(x, x).degree(gen='x') == 1
raises(PolynomialError, "Poly(1, x).degree(gen=1)")
raises(PolynomialError, "Poly(1, x).degree(gen=y)")
raises(PolynomialError, "Poly(1, x).degree(gen='y')")
assert Poly(1, x, y).degree() == 0
assert Poly(2*y, x, y).degree() == 0
assert Poly(x*y, x, y).degree() == 1
assert Poly(1, x, y).degree(gen=x) == 0
assert Poly(2*y, x, y).degree(gen=x) == 0
assert Poly(x*y, x, y).degree(gen=x) == 1
assert Poly(1, x, y).degree(gen=y) == 0
assert Poly(2*y, x, y).degree(gen=y) == 1
assert Poly(x*y, x, y).degree(gen=y) == 1
assert degree(1, x) == 0
assert degree(x, x) == 1
assert degree(x*y**2, gen=x) == 1
assert degree(x*y**2, gen=y) == 2
assert degree(x*y**2, x, y) == 1
assert degree(x*y**2, y, x) == 2
raises(ComputationFailed, "degree(1)")
def test_Poly_degree_list():
assert Poly(0, x).degree_list() == (-1,)
assert Poly(0, x, y).degree_list() == (-1,-1)
assert Poly(0, x, y, z).degree_list() == (-1,-1,-1)
assert Poly(1, x).degree_list() == (0,)
assert Poly(1, x, y).degree_list() == (0,0)
assert Poly(1, x, y, z).degree_list() == (0,0,0)
assert Poly(x**2*y+x**3*z**2+1).degree_list() == (3,1,2)
assert degree_list(1, x) == (0,)
assert degree_list(x, x) == (1,)
assert degree_list(x*y**2) == (1,2)
raises(ComputationFailed, "degree_list(1)")
def test_Poly_total_degree():
assert Poly(x**2*y+x**3*z**2+1).total_degree() == 5
assert Poly(x**2 + z**3).total_degree() == 3
assert Poly(x*y*z + z**4).total_degree() == 4
assert Poly(x**3 + x + 1).total_degree() == 3
def test_Poly_homogeneous_order():
assert Poly(0, x, y).homogeneous_order() == -1
assert Poly(1, x, y).homogeneous_order() == 0
assert Poly(x, x, y).homogeneous_order() == 1
assert Poly(x*y, x, y).homogeneous_order() == 2
assert Poly(x + 1, x, y).homogeneous_order() is None
assert Poly(x*y + x, x, y).homogeneous_order() is None
assert Poly(x**5 + 2*x**3*y**2 + 9*x*y**4).homogeneous_order() == 5
assert Poly(x**5 + 2*x**3*y**3 + 9*x*y**4).homogeneous_order() is None
def test_Poly_LC():
assert Poly(0, x).LC() == 0
assert Poly(1, x).LC() == 1
assert Poly(2*x**2+x, x).LC() == 2
assert Poly(x*y**7 + 2*x**2*y**3).LC('lex') == 2
assert Poly(x*y**7 + 2*x**2*y**3).LC('grlex') == 1
assert LC(x*y**7 + 2*x**2*y**3, order='lex') == 2
assert LC(x*y**7 + 2*x**2*y**3, order='grlex') == 1
def test_Poly_TC():
assert Poly(0, x).TC() == 0
assert Poly(1, x).TC() == 1
assert Poly(2*x**2+x, x).TC() == 0
def test_Poly_EC():
assert Poly(0, x).EC() == 0
assert Poly(1, x).EC() == 1
assert Poly(2*x**2+x, x).EC() == 1
assert Poly(x*y**7 + 2*x**2*y**3).EC('lex') == 1
assert Poly(x*y**7 + 2*x**2*y**3).EC('grlex') == 2
def test_Poly_nth():
assert Poly(0, x).nth(0) == 0
assert Poly(0, x).nth(1) == 0
assert Poly(1, x).nth(0) == 1
assert Poly(1, x).nth(1) == 0
assert Poly(x**8, x).nth(0) == 0
assert Poly(x**8, x).nth(7) == 0
assert Poly(x**8, x).nth(8) == 1
assert Poly(x**8, x).nth(9) == 0
assert Poly(3*x*y**2 + 1).nth(0, 0) == 1
assert Poly(3*x*y**2 + 1).nth(1, 2) == 3
def test_Poly_LM():
assert Poly(0, x).LM() == (0,)
assert Poly(1, x).LM() == (0,)
assert Poly(2*x**2+x, x).LM() == (2,)
assert Poly(x*y**7 + 2*x**2*y**3).LM('lex') == (2, 3)
assert Poly(x*y**7 + 2*x**2*y**3).LM('grlex') == (1, 7)
assert LM(x*y**7 + 2*x**2*y**3, order='lex') == x**2*y**3
assert LM(x*y**7 + 2*x**2*y**3, order='grlex') == x*y**7
def test_Poly_LM_custom_order():
f = Poly(x**2*y**3*z + x**2*y*z**3 + x*y*z + 1)
rev_lex = lambda monom: tuple(reversed(monom))
assert f.LM(order='lex') == (2, 3, 1)
assert f.LM(order=rev_lex) == (2, 1, 3)
def test_Poly_EM():
assert Poly(0, x).EM() == (0,)
assert Poly(1, x).EM() == (0,)
assert Poly(2*x**2+x, x).EM() == (1,)
assert Poly(x*y**7 + 2*x**2*y**3).EM('lex') == (1, 7)
assert Poly(x*y**7 + 2*x**2*y**3).EM('grlex') == (2, 3)
def test_Poly_LT():
assert Poly(0, x).LT() == ((0,), 0)
assert Poly(1, x).LT() == ((0,), 1)
assert Poly(2*x**2+x, x).LT() == ((2,), 2)
assert Poly(x*y**7 + 2*x**2*y**3).LT('lex') == ((2, 3), 2)
assert Poly(x*y**7 + 2*x**2*y**3).LT('grlex') == ((1, 7), 1)
assert LT(x*y**7 + 2*x**2*y**3, order='lex') == 2*x**2*y**3
assert LT(x*y**7 + 2*x**2*y**3, order='grlex') == x*y**7
def test_Poly_ET():
assert Poly(0, x).ET() == ((0,), 0)
assert Poly(1, x).ET() == ((0,), 1)
assert Poly(2*x**2+x, x).ET() == ((1,), 1)
assert Poly(x*y**7 + 2*x**2*y**3).ET('lex') == ((1, 7), 1)
assert Poly(x*y**7 + 2*x**2*y**3).ET('grlex') == ((2, 3), 2)
def test_Poly_max_norm():
assert Poly(-1, x).max_norm() == 1
assert Poly( 0, x).max_norm() == 0
assert Poly( 1, x).max_norm() == 1
def test_Poly_l1_norm():
assert Poly(-1, x).l1_norm() == 1
assert Poly( 0, x).l1_norm() == 0
assert Poly( 1, x).l1_norm() == 1
def test_Poly_clear_denoms():
coeff, poly = Poly(x + 2, x).clear_denoms()
assert coeff == 1 and poly == Poly(x + 2, x, domain='ZZ') and poly.get_domain() == ZZ
coeff, poly = Poly(x/2 + 1, x).clear_denoms()
assert coeff == 2 and poly == Poly(x + 2, x, domain='QQ') and poly.get_domain() == QQ
coeff, poly = Poly(x/2 + 1, x).clear_denoms(convert=True)
assert coeff == 2 and poly == Poly(x + 2, x, domain='ZZ') and poly.get_domain() == ZZ
coeff, poly = Poly(x/y + 1, x).clear_denoms(convert=True)
assert coeff == y and poly == Poly(x + y, x, domain='ZZ[y]') and poly.get_domain() == ZZ[y]
def test_Poly_rat_clear_denoms():
f = Poly(x**2/y + 1, x)
g = Poly(x**3 + y, x)
assert f.rat_clear_denoms(g) == \
(Poly(x**2 + y, x), Poly(y*x**3 + y**2, x))
f = f.set_domain(EX)
g = g.set_domain(EX)
assert f.rat_clear_denoms(g) == (f, g)
def test_Poly_integrate():
assert Poly(x + 1).integrate() == Poly(x**2/2 + x)
assert Poly(x + 1).integrate(x) == Poly(x**2/2 + x)
assert Poly(x + 1).integrate((x, 1)) == Poly(x**2/2 + x)
assert Poly(x*y + 1).integrate(x) == Poly(x**2*y/2 + x)
assert Poly(x*y + 1).integrate(y) == Poly(x*y**2/2 + y)
assert Poly(x*y + 1).integrate(x, x) == Poly(x**3*y/6 + x**2/2)
assert Poly(x*y + 1).integrate(y, y) == Poly(x*y**3/6 + y**2/2)
assert Poly(x*y + 1).integrate((x, 2)) == Poly(x**3*y/6 + x**2/2)
assert Poly(x*y + 1).integrate((y, 2)) == Poly(x*y**3/6 + y**2/2)
assert Poly(x*y + 1).integrate(x, y) == Poly(x**2*y**2/4 + x*y)
assert Poly(x*y + 1).integrate(y, x) == Poly(x**2*y**2/4 + x*y)
def test_Poly_diff():
assert Poly(x**2 + x).diff() == Poly(2*x + 1)
assert Poly(x**2 + x).diff(x) == Poly(2*x + 1)
assert Poly(x**2 + x).diff((x, 1)) == Poly(2*x + 1)
assert Poly(x**2*y**2 + x*y).diff(x) == Poly(2*x*y**2 + y)
assert Poly(x**2*y**2 + x*y).diff(y) == Poly(2*x**2*y + x)
assert Poly(x**2*y**2 + x*y).diff(x, x) == Poly(2*y**2, x, y)
assert Poly(x**2*y**2 + x*y).diff(y, y) == Poly(2*x**2, x, y)
assert Poly(x**2*y**2 + x*y).diff((x, 2)) == Poly(2*y**2, x, y)
assert Poly(x**2*y**2 + x*y).diff((y, 2)) == Poly(2*x**2, x, y)
assert Poly(x**2*y**2 + x*y).diff(x, y) == Poly(4*x*y + 1)
assert Poly(x**2*y**2 + x*y).diff(y, x) == Poly(4*x*y + 1)
def test_Poly_eval():
assert Poly(0, x).eval(7) == 0
assert Poly(1, x).eval(7) == 1
assert Poly(x, x).eval(7) == 7
assert Poly(0, x).eval(0, 7) == 0
assert Poly(1, x).eval(0, 7) == 1
assert Poly(x, x).eval(0, 7) == 7
assert Poly(0, x).eval(x, 7) == 0
assert Poly(1, x).eval(x, 7) == 1
assert Poly(x, x).eval(x, 7) == 7
assert Poly(0, x).eval('x', 7) == 0
assert Poly(1, x).eval('x', 7) == 1
assert Poly(x, x).eval('x', 7) == 7
raises(PolynomialError, "Poly(1, x).eval(1, 7)")
raises(PolynomialError, "Poly(1, x).eval(y, 7)")
raises(PolynomialError, "Poly(1, x).eval('y', 7)")
assert Poly(123, x, y).eval(7) == Poly(123, y)
assert Poly(2*y, x, y).eval(7) == Poly(2*y, y)
assert Poly(x*y, x, y).eval(7) == Poly(7*y, y)
assert Poly(123, x, y).eval(x, 7) == Poly(123, y)
assert Poly(2*y, x, y).eval(x, 7) == Poly(2*y, y)
assert Poly(x*y, x, y).eval(x, 7) == Poly(7*y, y)
assert Poly(123, x, y).eval(y, 7) == Poly(123, x)
assert Poly(2*y, x, y).eval(y, 7) == Poly(14, x)
assert Poly(x*y, x, y).eval(y, 7) == Poly(7*x, x)
assert Poly(x*y + y, x, y).eval({x: 7}) == Poly(8*y, y)
assert Poly(x*y + y, x, y).eval({y: 7}) == Poly(7*x + 7, x)
assert Poly(x*y + y, x, y).eval({x: 6, y: 7}) == 49
assert Poly(x*y + y, x, y).eval({x: 7, y: 6}) == 48
assert Poly(x*y + y, x, y).eval((6, 7)) == 49
assert Poly(x*y + y, x, y).eval([6, 7]) == 49
Poly(x + 1, domain='ZZ').eval(S(1)/2) == S(3)/2
Poly(x + 1, domain='ZZ').eval(sqrt(2)) == sqrt(2) + 1
raises(ValueError, "Poly(x*y + y, x, y).eval((6, 7, 8))")
raises(DomainError, "Poly(x+1, domain='ZZ').eval(S(1)/2, auto=False)")
def test_Poly___call__():
f = Poly(2*x*y + 3*x + y + 2*z)
assert f(2) == Poly(5*y + 2*z + 6)
assert f(2, 5) == Poly(2*z + 31)
assert f(2, 5, 7) == 45
def test_parallel_poly_from_expr():
assert parallel_poly_from_expr([x-1, x**2-1], x)[0] == [Poly(x-1, x), Poly(x**2-1, x)]
assert parallel_poly_from_expr([Poly(x-1, x), x**2-1], x)[0] == [Poly(x-1, x), Poly(x**2-1, x)]
assert parallel_poly_from_expr([x-1, Poly(x**2-1, x)], x)[0] == [Poly(x-1, x), Poly(x**2-1, x)]
assert parallel_poly_from_expr([Poly(x-1, x), Poly(x**2-1, x)], x)[0] == [Poly(x-1, x), Poly(x**2-1, x)]
assert parallel_poly_from_expr([x-1, x**2-1], x, y)[0] == [Poly(x-1, x, y), Poly(x**2-1, x, y)]
assert parallel_poly_from_expr([Poly(x-1, x), x**2-1], x, y)[0] == [Poly(x-1, x, y), Poly(x**2-1, x, y)]
assert parallel_poly_from_expr([x-1, Poly(x**2-1, x)], x, y)[0] == [Poly(x-1, x, y), Poly(x**2-1, x, y)]
assert parallel_poly_from_expr([Poly(x-1, x), Poly(x**2-1, x)], x, y)[0] == [Poly(x-1, x, y), Poly(x**2-1, x, y)]
assert parallel_poly_from_expr([x-1, x**2-1])[0] == [Poly(x-1, x), Poly(x**2-1, x)]
assert parallel_poly_from_expr([Poly(x-1, x), x**2-1])[0] == [Poly(x-1, x), Poly(x**2-1, x)]
assert parallel_poly_from_expr([x-1, Poly(x**2-1, x)])[0] == [Poly(x-1, x), Poly(x**2-1, x)]
assert parallel_poly_from_expr([Poly(x-1, x), Poly(x**2-1, x)])[0] == [Poly(x-1, x), Poly(x**2-1, x)]
assert parallel_poly_from_expr([1, x**2-1])[0] == [Poly(1, x), Poly(x**2-1, x)]
assert parallel_poly_from_expr([1, x**2-1])[0] == [Poly(1, x), Poly(x**2-1, x)]
assert parallel_poly_from_expr([1, Poly(x**2-1, x)])[0] == [Poly(1, x), Poly(x**2-1, x)]
assert parallel_poly_from_expr([1, Poly(x**2-1, x)])[0] == [Poly(1, x), Poly(x**2-1, x)]
assert parallel_poly_from_expr([x**2-1, 1])[0] == [Poly(x**2-1, x), Poly(1, x)]
assert parallel_poly_from_expr([x**2-1, 1])[0] == [Poly(x**2-1, x), Poly(1, x)]
assert parallel_poly_from_expr([Poly(x**2-1, x), 1])[0] == [Poly(x**2-1, x), Poly(1, x)]
assert parallel_poly_from_expr([Poly(x**2-1, x), 1])[0] == [Poly(x**2-1, x), Poly(1, x)]
assert parallel_poly_from_expr([Poly(x, x, y), Poly(y, x, y)], x, y, order='lex')[0] == \
[Poly(x, x, y, domain='ZZ'), Poly(y, x, y, domain='ZZ')]
raises(PolificationFailed, "parallel_poly_from_expr([0, 1])")
def test_pdiv():
f, g = x**2 - y**2, x - y
q, r = x + y, 0
F, G, Q, R = [ Poly(h, x, y) for h in (f, g, q, r) ]
assert F.pdiv(G) == (Q, R)
assert F.prem(G) == R
assert F.pquo(G) == Q
assert F.pexquo(G) == Q
assert pdiv(f, g) == (q, r)
assert prem(f, g) == r
assert pquo(f, g) == q
assert pexquo(f, g) == q
assert pdiv(f, g, x, y) == (q, r)
assert prem(f, g, x, y) == r
assert pquo(f, g, x, y) == q
assert pexquo(f, g, x, y) == q
assert pdiv(f, g, (x,y)) == (q, r)
assert prem(f, g, (x,y)) == r
assert pquo(f, g, (x,y)) == q
assert pexquo(f, g, (x,y)) == q
assert pdiv(F, G) == (Q, R)
assert prem(F, G) == R
assert pquo(F, G) == Q
assert pexquo(F, G) == Q
assert pdiv(f, g, polys=True) == (Q, R)
assert prem(f, g, polys=True) == R
assert pquo(f, g, polys=True) == Q
assert pexquo(f, g, polys=True) == Q
assert pdiv(F, G, polys=False) == (q, r)
assert prem(F, G, polys=False) == r
assert pquo(F, G, polys=False) == q
assert pexquo(F, G, polys=False) == q
raises(ComputationFailed, "pdiv(4, 2)")
raises(ComputationFailed, "prem(4, 2)")
raises(ComputationFailed, "pquo(4, 2)")
raises(ComputationFailed, "pexquo(4, 2)")
def test_div():
f, g = x**2 - y**2, x - y
q, r = x + y, 0
F, G, Q, R = [ Poly(h, x, y) for h in (f, g, q, r) ]
assert F.div(G) == (Q, R)
assert F.rem(G) == R
assert F.quo(G) == Q
assert F.exquo(G) == Q
assert div(f, g) == (q, r)
assert rem(f, g) == r
assert quo(f, g) == q
assert exquo(f, g) == q
assert div(f, g, x, y) == (q, r)
assert rem(f, g, x, y) == r
assert quo(f, g, x, y) == q
assert exquo(f, g, x, y) == q
assert div(f, g, (x,y)) == (q, r)
assert rem(f, g, (x,y)) == r
assert quo(f, g, (x,y)) == q
assert exquo(f, g, (x,y)) == q
assert div(F, G) == (Q, R)
assert rem(F, G) == R
assert quo(F, G) == Q
assert exquo(F, G) == Q
assert div(f, g, polys=True) == (Q, R)
assert rem(f, g, polys=True) == R
assert quo(f, g, polys=True) == Q
assert exquo(f, g, polys=True) == Q
assert div(F, G, polys=False) == (q, r)
assert rem(F, G, polys=False) == r
assert quo(F, G, polys=False) == q
assert exquo(F, G, polys=False) == q
raises(ComputationFailed, "div(4, 2)")
raises(ComputationFailed, "rem(4, 2)")
raises(ComputationFailed, "quo(4, 2)")
raises(ComputationFailed, "exquo(4, 2)")
f, g = x**2 + 1, 2*x - 4
qz, rz = 0, x**2 + 1
qq, rq = x/2 + 1, 5
assert div(f, g) == (qq, rq)
assert div(f, g, auto=True) == (qq, rq)
assert div(f, g, auto=False) == (qz, rz)
assert div(f, g, domain=ZZ) == (qz, rz)
assert div(f, g, domain=QQ) == (qq, rq)
assert div(f, g, domain=ZZ, auto=True) == (qq, rq)
assert div(f, g, domain=ZZ, auto=False) == (qz, rz)
assert div(f, g, domain=QQ, auto=True) == (qq, rq)
assert div(f, g, domain=QQ, auto=False) == (qq, rq)
assert rem(f, g) == rq
assert rem(f, g, auto=True) == rq
assert rem(f, g, auto=False) == rz
assert rem(f, g, domain=ZZ) == rz
assert rem(f, g, domain=QQ) == rq
assert rem(f, g, domain=ZZ, auto=True) == rq
assert rem(f, g, domain=ZZ, auto=False) == rz
assert rem(f, g, domain=QQ, auto=True) == rq
assert rem(f, g, domain=QQ, auto=False) == rq
assert quo(f, g) == qq
assert quo(f, g, auto=True) == qq
assert quo(f, g, auto=False) == qz
assert quo(f, g, domain=ZZ) == qz
assert quo(f, g, domain=QQ) == qq
assert quo(f, g, domain=ZZ, auto=True) == qq
assert quo(f, g, domain=ZZ, auto=False) == qz
assert quo(f, g, domain=QQ, auto=True) == qq
assert quo(f, g, domain=QQ, auto=False) == qq
f, g, q = x**2, 2*x, x/2
assert exquo(f, g) == q
assert exquo(f, g, auto=True) == q
raises(ExactQuotientFailed, "exquo(f, g, auto=False)")
raises(ExactQuotientFailed, "exquo(f, g, domain=ZZ)")
assert exquo(f, g, domain=QQ) == q
assert exquo(f, g, domain=ZZ, auto=True) == q
raises(ExactQuotientFailed, "exquo(f, g, domain=ZZ, auto=False)")
assert exquo(f, g, domain=QQ, auto=True) == q
assert exquo(f, g, domain=QQ, auto=False) == q
f, g = Poly(x**2), Poly(x)
q, r = f.div(g)
assert q.get_domain().is_ZZ and r.get_domain().is_ZZ
r = f.rem(g)
assert r.get_domain().is_ZZ
q = f.quo(g)
assert q.get_domain().is_ZZ
q = f.exquo(g)
assert q.get_domain().is_ZZ
def test_gcdex():
f, g = 2*x, x**2 - 16
s, t, h = x/32, -Rational(1,16), 1
F, G, S, T, H = [ Poly(u, x, domain='QQ') for u in (f, g, s, t, h) ]
assert F.half_gcdex(G) == (S, H)
assert F.gcdex(G) == (S, T, H)
assert F.invert(G) == S
assert half_gcdex(f, g) == (s, h)
assert gcdex(f, g) == (s, t, h)
assert invert(f, g) == s
assert half_gcdex(f, g, x) == (s, h)
assert gcdex(f, g, x) == (s, t, h)
assert invert(f, g, x) == s
assert half_gcdex(f, g, (x,)) == (s, h)
assert gcdex(f, g, (x,)) == (s, t, h)
assert invert(f, g, (x,)) == s
assert half_gcdex(F, G) == (S, H)
assert gcdex(F, G) == (S, T, H)
assert invert(F, G) == S
assert half_gcdex(f, g, polys=True) == (S, H)
assert gcdex(f, g, polys=True) == (S, T, H)
assert invert(f, g, polys=True) == S
assert half_gcdex(F, G, polys=False) == (s, h)
assert gcdex(F, G, polys=False) == (s, t, h)
assert invert(F, G, polys=False) == s
assert half_gcdex(100, 2004) == (-20, 4)
assert gcdex(100, 2004) == (-20, 1, 4)
assert invert(3, 7) == 5
raises(DomainError, "half_gcdex(x + 1, 2*x + 1, auto=False)")
raises(DomainError, "gcdex(x + 1, 2*x + 1, auto=False)")
raises(DomainError, "invert(x + 1, 2*x + 1, auto=False)")
def test_revert():
f = Poly(1 - x**2/2 + x**4/24 - x**6/720)
g = Poly(61*x**6/720 + 5*x**4/24 + x**2/2 + 1)
assert f.revert(8) == g
def test_subresultants():
f, g, h = x**2 - 2*x + 1, x**2 - 1, 2*x - 2
F, G, H = Poly(f), Poly(g), Poly(h)
assert F.subresultants(G) == [F, G, H]
assert subresultants(f, g) == [f, g, h]
assert subresultants(f, g, x) == [f, g, h]
assert subresultants(f, g, (x,)) == [f, g, h]
assert subresultants(F, G) == [F, G, H]
assert subresultants(f, g, polys=True) == [F, G, H]
assert subresultants(F, G, polys=False) == [f, g, h]
raises(ComputationFailed, "subresultants(4, 2)")
def test_resultant():
f, g, h = x**2 - 2*x + 1, x**2 - 1, 0
F, G = Poly(f), Poly(g)
assert F.resultant(G) == h
assert resultant(f, g) == h
assert resultant(f, g, x) == h
assert resultant(f, g, (x,)) == h
assert resultant(F, G) == h
assert resultant(f, g, polys=True) == h
assert resultant(F, G, polys=False) == h
f, g, h = x - a, x - b, a - b
F, G, H = Poly(f), Poly(g), Poly(h)
assert F.resultant(G) == H
assert resultant(f, g) == h
assert resultant(f, g, x) == h
assert resultant(f, g, (x,)) == h
assert resultant(F, G) == H
assert resultant(f, g, polys=True) == H
assert resultant(F, G, polys=False) == h
raises(ComputationFailed, "resultant(4, 2)")
def test_discriminant():
f, g = x**3 + 3*x**2 + 9*x - 13, -11664
F = Poly(f)
assert F.discriminant() == g
assert discriminant(f) == g
assert discriminant(f, x) == g
assert discriminant(f, (x,)) == g
assert discriminant(F) == g
assert discriminant(f, polys=True) == g
assert discriminant(F, polys=False) == g
f, g = a*x**2 + b*x + c, b**2 - 4*a*c
F, G = Poly(f), Poly(g)
assert F.discriminant() == G
assert discriminant(f) == g
assert discriminant(f, x, a, b, c) == g
assert discriminant(f, (x, a, b, c)) == g
assert discriminant(F) == G
assert discriminant(f, polys=True) == G
assert discriminant(F, polys=False) == g
raises(ComputationFailed, "discriminant(4)")
def test_gcd_list():
F = [x**3 - 1, x**2 - 1, x**2 - 3*x + 2]
assert gcd_list(F) == x - 1
assert gcd_list(F, polys=True) == Poly(x - 1)
assert gcd_list([]) == 0
assert gcd_list([1, 2]) == 1
assert gcd_list([4, 6, 8]) == 2
gcd = gcd_list([], x)
assert gcd.is_Number and gcd is S.Zero
gcd = gcd_list([], x, polys=True)
assert gcd.is_Poly and gcd.is_zero
raises(ComputationFailed, "gcd_list([], polys=True)")
def test_lcm_list():
F = [x**3 - 1, x**2 - 1, x**2 - 3*x + 2]
assert lcm_list(F) == x**5 - x**4 - 2*x**3 - x**2 + x + 2
assert lcm_list(F, polys=True) == Poly(x**5 - x**4 - 2*x**3 - x**2 + x + 2)
assert lcm_list([]) == 1
assert lcm_list([1, 2]) == 2
assert lcm_list([4, 6, 8]) == 24
lcm = lcm_list([], x)
assert lcm.is_Number and lcm is S.One
lcm = lcm_list([], x, polys=True)
assert lcm.is_Poly and lcm.is_one
raises(ComputationFailed, "lcm_list([], polys=True)")
def test_gcd():
f, g = x**3 - 1, x**2 - 1
s, t = x**2 + x + 1, x + 1
h, r = x - 1, x**4 + x**3 - x - 1
F, G, S, T, H, R = [ Poly(u) for u in (f, g, s, t, h, r) ]
assert F.cofactors(G) == (H, S, T)
assert F.gcd(G) == H
assert F.lcm(G) == R
assert cofactors(f, g) == (h, s, t)
assert gcd(f, g) == h
assert lcm(f, g) == r
assert cofactors(f, g, x) == (h, s, t)
assert gcd(f, g, x) == h
assert lcm(f, g, x) == r
assert cofactors(f, g, (x,)) == (h, s, t)
assert gcd(f, g, (x,)) == h
assert lcm(f, g, (x,)) == r
assert cofactors(F, G) == (H, S, T)
assert gcd(F, G) == H
assert lcm(F, G) == R
assert cofactors(f, g, polys=True) == (H, S, T)
assert gcd(f, g, polys=True) == H
assert lcm(f, g, polys=True) == R
assert cofactors(F, G, polys=False) == (h, s, t)
assert gcd(F, G, polys=False) == h
assert lcm(F, G, polys=False) == r
f, g = 1.0*x**2 - 1.0, 1.0*x - 1.0
h, s, t = g, 1.0*x + 1.0, 1.0
assert cofactors(f, g) == (h, s, t)
assert gcd(f, g) == h
assert lcm(f, g) == f
f, g = 1.0*x**2 - 1.0, 1.0*x - 1.0
h, s, t = g, 1.0*x + 1.0, 1.0
assert cofactors(f, g) == (h, s, t)
assert gcd(f, g) == h
assert lcm(f, g) == f
assert cofactors(8, 6) == (2, 4, 3)
assert gcd(8, 6) == 2
assert lcm(8, 6) == 24
f, g = x**2 - 3*x - 4, x**3 - 4*x**2 + x - 4
l = x**4 - 3*x**3 - 3*x**2 - 3*x - 4
h, s, t = x - 4, x + 1, x**2 + 1
assert cofactors(f, g, modulus=11) == (h, s, t)
assert gcd(f, g, modulus=11) == h
assert lcm(f, g, modulus=11) == l
f, g = x**2 + 8*x + 7, x**3 + 7*x**2 + x + 7
l = x**4 + 8*x**3 + 8*x**2 + 8*x + 7
h, s, t = x + 7, x + 1, x**2 + 1
assert cofactors(f, g, modulus=11, symmetric=False) == (h, s, t)
assert gcd(f, g, modulus=11, symmetric=False) == h
assert lcm(f, g, modulus=11, symmetric=False) == l
def test_terms_gcd():
assert terms_gcd(1) == 1
assert terms_gcd(1, x) == 1
assert terms_gcd(x - 1) == x - 1
assert terms_gcd(-x - 1) == -x - 1
assert terms_gcd(2*x + 3) == 2*x + 3
assert terms_gcd(6*x + 4) == Mul(2, 3*x + 2, evaluate=False)
assert terms_gcd(x**3*y + x*y**3) == x*y*(x**2 + y**2)
assert terms_gcd(2*x**3*y + 2*x*y**3) == 2*x*y*(x**2 + y**2)
assert terms_gcd(x**3*y/2 + x*y**3/2) == x*y/2*(x**2 + y**2)
assert terms_gcd(x**3*y + 2*x*y**3) == x*y*(x**2 + 2*y**2)
assert terms_gcd(2*x**3*y + 4*x*y**3) == 2*x*y*(x**2 + 2*y**2)
assert terms_gcd(2*x**3*y/3 + 4*x*y**3/5) == 2*x*y/15*(5*x**2 + 6*y**2)
assert terms_gcd(2.0*x**3*y + 4.1*x*y**3) == x*y*(2.0*x**2 + 4.1*y**2)
def test_trunc():
f, g = x**5 + 2*x**4 + 3*x**3 + 4*x**2 + 5*x + 6, x**5 - x**4 + x**2 - x
F, G = Poly(f), Poly(g)
assert F.trunc(3) == G
assert trunc(f, 3) == g
assert trunc(f, 3, x) == g
assert trunc(f, 3, (x,)) == g
assert trunc(F, 3) == G
assert trunc(f, 3, polys=True) == G
assert trunc(F, 3, polys=False) == g
f, g = 6*x**5 + 5*x**4 + 4*x**3 + 3*x**2 + 2*x + 1, -x**4 + x**3 - x + 1
F, G = Poly(f), Poly(g)
assert F.trunc(3) == G
assert trunc(f, 3) == g
assert trunc(f, 3, x) == g
assert trunc(f, 3, (x,)) == g
assert trunc(F, 3) == G
assert trunc(f, 3, polys=True) == G
assert trunc(F, 3, polys=False) == g
f = Poly(x**2 + 2*x + 3, modulus=5)
assert f.trunc(2) == Poly(x**2 + 1, modulus=5)
def test_monic():
f, g = 2*x - 1, x - S(1)/2
F, G = Poly(f, domain='QQ'), Poly(g)
assert F.monic() == G
assert monic(f) == g
assert monic(f, x) == g
assert monic(f, (x,)) == g
assert monic(F) == G
assert monic(f, polys=True) == G
assert monic(F, polys=False) == g
raises(ComputationFailed, "monic(4)")
assert monic(2*x**2 + 6*x + 4, auto=False) == x**2 + 3*x + 2
raises(ExactQuotientFailed, "monic(2*x + 6*x + 1, auto=False)")
assert monic(2.0*x**2 + 6.0*x + 4.0) == 1.0*x**2 + 3.0*x + 2.0
assert monic(2*x**2 + 3*x + 4, modulus=5) == x**2 - x + 2
def test_content():
f, F = 4*x + 2, Poly(4*x + 2)
assert F.content() == 2
assert content(f) == 2
raises(ComputationFailed, "content(4)")
f = Poly(2*x, modulus=3)
assert f.content() == 1
def test_primitive():
f, g = 4*x + 2, 2*x + 1
F, G = Poly(f), Poly(g)
assert F.primitive() == (2, G)
assert primitive(f) == (2, g)
assert primitive(f, x) == (2, g)
assert primitive(f, (x,)) == (2, g)
assert primitive(F) == (2, G)
assert primitive(f, polys=True) == (2, G)
assert primitive(F, polys=False) == (2, g)
raises(ComputationFailed, "primitive(4)")
f = Poly(2*x, modulus=3)
g = Poly(2.0*x, domain=RR)
assert f.primitive() == (1, f)
assert g.primitive() == (1.0, g)
def test_compose():
f = x**12+20*x**10+150*x**8+500*x**6+625*x**4-2*x**3-10*x+9
g = x**4 - 2*x + 9
h = x**3 + 5*x
F, G, H = map(Poly, (f, g, h))
assert G.compose(H) == F
assert compose(g, h) == f
assert compose(g, h, x) == f
assert compose(g, h, (x,)) == f
assert compose(G, H) == F
assert compose(g, h, polys=True) == F
assert compose(G, H, polys=False) == f
assert F.decompose() == [G, H]
assert decompose(f) == [g, h]
assert decompose(f, x) == [g, h]
assert decompose(f, (x,)) == [g, h]
assert decompose(F) == [G, H]
assert decompose(f, polys=True) == [G, H]
assert decompose(F, polys=False) == [g, h]
raises(ComputationFailed, "compose(4, 2)")
raises(ComputationFailed, "decompose(4)")
assert compose(x**2 - y**2, x - y, x, y) == x**2 - 2*x*y
assert compose(x**2 - y**2, x - y, y, x) == -y**2 + 2*x*y
def test_shift():
assert Poly(x**2 - 2*x + 1, x).shift(2) == Poly(x**2 + 2*x + 1, x)
def test_sturm():
f, F = x, Poly(x, domain='QQ')
g, G = 1, Poly(1, x, domain='QQ')
assert F.sturm() == [F, G]
assert sturm(f) == [f, g]
assert sturm(f, x) == [f, g]
assert sturm(f, (x,)) == [f, g]
assert sturm(F) == [F, G]
assert sturm(f, polys=True) == [F, G]
assert sturm(F, polys=False) == [f, g]
raises(ComputationFailed, "sturm(4)")
raises(DomainError, "sturm(f, auto=False)")
f = Poly(S(1024)/(15625*pi**8)*x**5 \
- S(4096)/(625*pi**8)*x**4 \
+ S(32)/(15625*pi**4)*x**3 \
- S(128)/(625*pi**4)*x**2 \
+ S(1)/62500*x \
- S(1)/625, x, domain='ZZ(pi)')
assert sturm(f) == \
[Poly(x**3 - 100*x**2 + pi**4/64*x - 25*pi**4/16, x, domain='ZZ(pi)'),
Poly(3*x**2 - 200*x + pi**4/64, x, domain='ZZ(pi)'),
Poly((S(20000)/9 - pi**4/96)*x + 25*pi**4/18, x, domain='ZZ(pi)'),
Poly((-3686400000000*pi**4 - 11520000*pi**8 - 9*pi**12)/(26214400000000 - 245760000*pi**4 + 576*pi**8), x, domain='ZZ(pi)')]
def test_gff():
f = x**5 + 2*x**4 - x**3 - 2*x**2
assert Poly(f).gff_list() == [(Poly(x), 1), (Poly(x + 2), 4)]
assert gff_list(f) == [(x, 1), (x + 2, 4)]
raises(NotImplementedError, "gff(f)")
f = x*(x - 1)**3*(x - 2)**2*(x - 4)**2*(x - 5)
assert Poly(f).gff_list() == [(Poly(x**2 - 5*x + 4), 1), (Poly(x**2 - 5*x + 4), 2), (Poly(x), 3)]
assert gff_list(f) == [(x**2 - 5*x + 4, 1), (x**2 - 5*x + 4, 2), (x, 3)]
raises(NotImplementedError, "gff(f)")
def test_sqf_norm():
assert sqf_norm(x**2-2, extension=sqrt(3)) == \
(1, x**2 - 2*sqrt(3)*x + 1, x**4 - 10*x**2 + 1)
assert sqf_norm(x**2-3, extension=sqrt(2)) == \
(1, x**2 - 2*sqrt(2)*x - 1, x**4 - 10*x**2 + 1)
assert Poly(x**2-2, extension=sqrt(3)).sqf_norm() == \
(1, Poly(x**2 - 2*sqrt(3)*x + 1, x, extension=sqrt(3)),
Poly(x**4 - 10*x**2 + 1, x, domain='QQ'))
assert Poly(x**2-3, extension=sqrt(2)).sqf_norm() == \
(1, Poly(x**2 - 2*sqrt(2)*x - 1, x, extension=sqrt(2)),
Poly(x**4 - 10*x**2 + 1, x, domain='QQ'))
def test_sqf():
f = x**5 - x**3 - x**2 + 1
g = x**3 + 2*x**2 + 2*x + 1
h = x - 1
p = x**4 + x**3 - x - 1
F, G, H, P = map(Poly, (f, g, h, p))
assert F.sqf_part() == P
assert sqf_part(f) == p
assert sqf_part(f, x) == p
assert sqf_part(f, (x,)) == p
assert sqf_part(F) == P
assert sqf_part(f, polys=True) == P
assert sqf_part(F, polys=False) == p
assert F.sqf_list() == (1, [(G, 1), (H, 2)])
assert sqf_list(f) == (1, [(g, 1), (h, 2)])
assert sqf_list(f, x) == (1, [(g, 1), (h, 2)])
assert sqf_list(f, (x,)) == (1, [(g, 1), (h, 2)])
assert sqf_list(F) == (1, [(G, 1), (H, 2)])
assert sqf_list(f, polys=True) == (1, [(G, 1), (H, 2)])
assert sqf_list(F, polys=False) == (1, [(g, 1), (h, 2)])
assert F.sqf_list_include() == [(G, 1), (H, 2)]
raises(ComputationFailed, "sqf_part(4)")
assert sqf(1) == 1
assert sqf_list(1) == (1, [])
assert sqf((2*x**2 + 2)**7) == 128*(x**2 + 1)**7
assert sqf(f) == g*h**2
assert sqf(f, x) == g*h**2
assert sqf(f, (x,)) == g*h**2
d = x**2 + y**2
assert sqf(f/d) == (g*h**2)/d
assert sqf(f/d, x) == (g*h**2)/d
assert sqf(f/d, (x,)) == (g*h**2)/d
assert sqf(x - 1) == x - 1
assert sqf(-x - 1) == -x - 1
assert sqf(x - 1) == x - 1
assert sqf(6*x - 10) == Mul(2, 3*x - 5, evaluate=False)
assert sqf((6*x - 10)/(3*x - 6)) == S(2)/3*((3*x - 5)/(x - 2))
assert sqf(Poly(x**2 - 2*x + 1)) == (x - 1)**2
f = 3 + x - x*(1 + x) + x**2
assert sqf(f) == 3
f = (x**2 + 2*x + 1)**20000000000
assert sqf(f) == (x + 1)**40000000000
assert sqf_list(f) == (1, [(x + 1, 40000000000)])
def test_factor():
f = x**5 - x**3 - x**2 + 1
u = x + 1
v = x - 1
w = x**2 + x + 1
F, U, V, W = map(Poly, (f, u, v, w))
assert F.factor_list() == (1, [(U, 1), (V, 2), (W, 1)])
assert factor_list(f) == (1, [(u, 1), (v, 2), (w, 1)])
assert factor_list(f, x) == (1, [(u, 1), (v, 2), (w, 1)])
assert factor_list(f, (x,)) == (1, [(u, 1), (v, 2), (w, 1)])
assert factor_list(F) == (1, [(U, 1), (V, 2), (W, 1)])
assert factor_list(f, polys=True) == (1, [(U, 1), (V, 2), (W, 1)])
assert factor_list(F, polys=False) == (1, [(u, 1), (v, 2), (w, 1)])
assert F.factor_list_include() == [(U, 1), (V, 2), (W, 1)]
assert factor_list(1) == (1, [])
assert factor_list(6) == (6, [])
assert factor_list(sqrt(3), x) == (1, [(3, S.Half)])
assert factor_list((-1)**x, x) == (1, [(-1, x)])
assert factor_list((2*x)**y, x) == (1, [(2, y), (x, y)])
assert factor_list(sqrt(x*y),x) == (1, [(x*y, S.Half)])
assert factor(1) == 1
assert factor(6) == 6
assert factor_list(3*x) == (3, [(x, 1)])
assert factor_list(3*x**2) == (3, [(x, 2)])
assert factor(3*x) == 3*x
assert factor(3*x**2) == 3*x**2
assert factor((2*x**2 + 2)**7) == 128*(x**2 + 1)**7
assert factor(f) == u*v**2*w
assert factor(f, x) == u*v**2*w
assert factor(f, (x,)) == u*v**2*w
g, p, q, r = x**2 - y**2, x - y, x + y, x**2 + 1
assert factor(f/g) == (u*v**2*w)/(p*q)
assert factor(f/g, x) == (u*v**2*w)/(p*q)
assert factor(f/g, (x,)) == (u*v**2*w)/(p*q)
p = Symbol('p', positive=True)
i = Symbol('i', integer=True)
r = Symbol('r', real=True)
assert factor(sqrt(x*y)).is_Pow == True
assert factor(sqrt(3*x**2 - 3)) == sqrt(3)*sqrt((x - 1)*(x + 1))
assert factor(sqrt(3*x**2 + 3)) == sqrt(3)*sqrt(x**2 + 1)
assert factor((y*x**2 - y)**i) == y**i*(x - 1)**i*(x + 1)**i
assert factor((y*x**2 + y)**i) == y**i*(x**2 + 1)**i
assert factor((y*x**2 - y)**t) == (y*(x - 1)*(x + 1))**t
assert factor((y*x**2 + y)**t) == (y*(x**2 + 1))**t
f = sqrt(expand((r**2 + 1)*(p + 1)*(p - 1)*(p - 2)**3))
g = sqrt((p - 2)**3*(p - 1))*sqrt(p + 1)*sqrt(r**2 + 1)
assert factor(f) == g
assert factor(g) == g
f = sqrt(expand((x - 1)**5*(r**2 + 1)))
g = sqrt(r**2 + 1)*(x - 1)**(S(5)/2)
assert factor(f) == g
assert factor(g) == g
f = Poly(sin(1)*x + 1, x, domain=EX)
assert f.factor_list() == (1, [(f, 1)])
f = x**4 + 1
assert factor(f) == f
assert factor(f, extension=I) == (x**2 - I)*(x**2 + I)
assert factor(f, gaussian=True) == (x**2 - I)*(x**2 + I)
assert factor(f, extension=sqrt(2)) == (x**2 + sqrt(2)*x + 1)*(x**2 - sqrt(2)*x + 1)
f = x**2 + 2*sqrt(2)*x + 2
assert factor(f, extension=sqrt(2)) == (x + sqrt(2))**2
assert factor(f**3, extension=sqrt(2)) == (x + sqrt(2))**6
assert factor(x**2 - 2*y**2, extension=sqrt(2)) == \
(x + sqrt(2)*y)*(x - sqrt(2)*y)
assert factor(2*x**2 - 4*y**2, extension=sqrt(2)) == \
2*((x + sqrt(2)*y)*(x - sqrt(2)*y))
assert factor(x - 1) == x - 1
assert factor(-x - 1) == -x - 1
assert factor(x**11 + x + 1, modulus=65537, symmetric=True) == \
(x**2 + x + 1)*(x**9 - x**8 + x**6 - x**5 + x**3 - x** 2 + 1)
assert factor(x**11 + x + 1, modulus=65537, symmetric=False) == \
(x**2 + x + 1)*(x**9 + 65536*x**8 + x**6 + 65536*x**5 + x**3 + 65536*x** 2 + 1)
f = x/pi + x*sin(x)/pi
g = y/(pi**2 + 2*pi + 1) + y*sin(x)/(pi**2 + 2*pi + 1)
assert factor(f) == x*(sin(x) + 1)/pi
assert factor(g) == y*(sin(x) + 1)/(pi + 1)**2
assert factor(Eq(x**2 + 2*x + 1, x**3 + 1)) == Eq((x + 1)**2, (x + 1)*(x**2 - x + 1))
f = (x**2 - 1)/(x**2 + 4*x + 4)
assert factor(f) == (x + 1)*(x - 1)/(x + 2)**2
assert factor(f, x) == (x + 1)*(x - 1)/(x + 2)**2
f = 3 + x - x*(1 + x) + x**2
assert factor(f) == 3
assert factor(f, x) == 3
assert factor(1/(x**2 + 2*x + 1/x) - 1) == -((1 - x + 2*x**2 + x**3)/(1 + 2*x**2 + x**3))
assert factor(f, expand=False) == f
raises(PolynomialError, "factor(f, x, expand=False)")
raises(FlagError, "factor(x**2 - 1, polys=True)")
assert factor([x, Eq(x**2 - y**2, Tuple(x**2 - z**2, 1/x + 1/y))]) == \
[x, Eq((x - y)*(x + y), Tuple((x - z)*(x + z), (x + y)/x/y))]
assert not isinstance(Poly(x**3 + x + 1).factor_list()[1][0][0], PurePoly) == True
assert isinstance(PurePoly(x**3 + x + 1).factor_list()[1][0][0], PurePoly) == True
def test_factor_large():
f = (x**2 + 4*x + 4)**10000000*(x**2 + 1)*(x**2 + 2*x + 1)**1234567
g = ((x**2 + 2*x + 1)**3000*y**2 + (x**2 + 2*x + 1)**3000*2*y + (x**2 + 2*x + 1)**3000)
assert factor(f) == (x + 2)**20000000*(x**2 + 1)*(x + 1)**2469134
assert factor(g) == (x + 1)**6000*(y + 1)**2
assert factor_list(f) == (1, [(x + 1, 2469134), (x + 2, 20000000), (x**2 + 1, 1)])
assert factor_list(g) == (1, [(y + 1, 2), (x + 1, 6000)])
f = (x**2 - y**2)**200000*(x**7 + 1)
g = (x**2 + y**2)**200000*(x**7 + 1)
assert factor(f) == \
(x + 1)*(x - y)**200000*(x + y)**200000*(x**6 - x**5 + x**4 - x**3 + x**2 - x + 1)
assert factor(g, gaussian=True) == \
(x + 1)*(x - I*y)**200000*(x + I*y)**200000*(x**6 - x**5 + x**4 - x**3 + x**2 - x + 1)
assert factor_list(f) == \
(1, [(x + 1, 1), (x - y, 200000), (x + y, 200000), (x**6 - x**5 + x**4 - x**3 + x**2 - x + 1, 1)])
assert factor_list(g, gaussian=True) == \
(1, [(x + 1, 1), (x - I*y, 200000), (x + I*y, 200000), (x**6 - x**5 + x**4 - x**3 + x**2 - x + 1, 1)])
@XFAIL
def test_factor_noeval():
assert factor(6*x - 10) == 2*(3*x - 5)
assert factor((6*x - 10)/(3*x - 6)) == S(2)/3*((3*x - 5)/(x - 2))
def test_intervals():
assert intervals(0) == []
assert intervals(1) == []
assert intervals(x, sqf=True) == [(0, 0)]
assert intervals(x) == [((0, 0), 1)]
assert intervals(x**128) == [((0, 0), 128)]
assert intervals([x**2, x**4]) == [((0, 0), {0: 2, 1: 4})]
f = Poly((2*x/5 - S(17)/3)*(4*x + S(1)/257))
assert f.intervals(sqf=True) == [(-1, 0), (14, 15)]
assert f.intervals() == [((-1, 0), 1), ((14, 15), 1)]
assert f.intervals(fast=True, sqf=True) == [(-1, 0), (14, 15)]
assert f.intervals(fast=True) == [((-1, 0), 1), ((14, 15), 1)]
assert f.intervals(eps=S(1)/10) == f.intervals(eps=0.1) == \
[((-S(1)/258, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert f.intervals(eps=S(1)/100) == f.intervals(eps=0.01) == \
[((-S(1)/258, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert f.intervals(eps=S(1)/1000) == f.intervals(eps=0.001) == \
[((-S(1)/1005, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert f.intervals(eps=S(1)/10000) == f.intervals(eps=0.0001) == \
[((-S(1)/1028, -S(1)/1028), 1), ((S(85)/6, S(85)/6), 1)]
f = (2*x/5 - S(17)/3)*(4*x + S(1)/257)
assert intervals(f, sqf=True) == [(-1, 0), (14, 15)]
assert intervals(f) == [((-1, 0), 1), ((14, 15), 1)]
assert intervals(f, eps=S(1)/10) == intervals(f, eps=0.1) == \
[((-S(1)/258, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert intervals(f, eps=S(1)/100) == intervals(f, eps=0.01) == \
[((-S(1)/258, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert intervals(f, eps=S(1)/1000) == intervals(f, eps=0.001) == \
[((-S(1)/1005, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert intervals(f, eps=S(1)/10000) == intervals(f, eps=0.0001) == \
[((-S(1)/1028, -S(1)/1028), 1), ((S(85)/6, S(85)/6), 1)]
f = Poly((x**2 - 2)*(x**2-3)**7*(x+1)*(7*x+3)**3)
assert f.intervals() == \
[((-2, -S(3)/2), 7), ((-S(3)/2, -1), 1),
((-1, -1), 1), ((-1, 0), 3),
((1, S(3)/2), 1), ((S(3)/2, 2), 7)]
assert intervals([x**5 - 200, x**5 - 201]) == \
[((S(75)/26, S(101)/35), {0: 1}), ((S(283)/98, S(26)/9), {1: 1})]
assert intervals([x**5 - 200, x**5 - 201], fast=True) == \
[((S(75)/26, S(101)/35), {0: 1}), ((S(283)/98, S(26)/9), {1: 1})]
assert intervals([x**2 - 200, x**2 - 201]) == \
[((-S(71)/5, -S(85)/6), {1: 1}), ((-S(85)/6, -14), {0: 1}), ((14, S(85)/6), {0: 1}), ((S(85)/6, S(71)/5), {1: 1})]
assert intervals([x+1, x+2, x-1, x+1, 1, x-1, x-1, (x-2)**2]) == \
[((-2, -2), {1: 1}), ((-1, -1), {0: 1, 3: 1}), ((1, 1), {2: 1, 5: 1, 6: 1}), ((2, 2), {7: 2})]
f, g, h = x**2 - 2, x**4 - 4*x**2 + 4, x - 1
assert intervals(f, inf=S(7)/4, sqf=True) == []
assert intervals(f, inf=S(7)/5, sqf=True) == [(S(7)/5, S(3)/2)]
assert intervals(f, sup=S(7)/4, sqf=True) == [(-2, -1), (1, S(3)/2)]
assert intervals(f, sup=S(7)/5, sqf=True) == [(-2, -1)]
assert intervals(g, inf=S(7)/4) == []
assert intervals(g, inf=S(7)/5) == [((S(7)/5, S(3)/2), 2)]
assert intervals(g, sup=S(7)/4) == [((-2, -1), 2), ((1, S(3)/2), 2)]
assert intervals(g, sup=S(7)/5) == [((-2, -1), 2)]
assert intervals([g, h], inf=S(7)/4) == []
assert intervals([g, h], inf=S(7)/5) == [((S(7)/5, S(3)/2), {0: 2})]
assert intervals([g, h], sup=S(7)/4) == [((-2, -1), {0: 2}), ((1, 1), {1: 1}), ((1, S(3)/2), {0: 2})]
assert intervals([g, h], sup=S(7)/5) == [((-2, -1), {0: 2}), ((1, 1), {1: 1})]
assert intervals([x+2, x**2 - 2]) == \
[((-2, -2), {0: 1}), ((-2, -1), {1: 1}), ((1, 2), {1: 1})]
assert intervals([x+2, x**2 - 2], strict=True) == \
[((-2, -2), {0: 1}), ((-S(3)/2, -1), {1: 1}), ((1, 2), {1: 1})]
f = 7*z**4 - 19*z**3 + 20*z**2 + 17*z + 20
assert intervals(f) == []
real_part, complex_part = intervals(f, all=True, sqf=True)
assert real_part == []
assert all(re(a) < re(r) < re(b) and im(a) < im(r) < im(b) for (a, b), r in zip(complex_part, nroots(f)))
assert complex_part == [(-S(40)/7 - 40*I/7, 0), (-S(40)/7, 40*I/7),
(-40*I/7, S(40)/7), (0, S(40)/7 + 40*I/7)]
real_part, complex_part = intervals(f, all=True, sqf=True, eps=S(1)/10)
assert real_part == []
assert all(re(a) < re(r) < re(b) and im(a) < im(r) < im(b) for (a, b), r in zip(complex_part, nroots(f)))
raises(ValueError, "intervals(x**2 - 2, eps=10**-100000)")
raises(ValueError, "Poly(x**2 - 2).intervals(eps=10**-100000)")
raises(ValueError, "intervals([x**2 - 2, x**2 - 3], eps=10**-100000)")
def test_refine_root():
f = Poly(x**2 - 2)
assert f.refine_root(1, 2, steps=0) == (1, 2)
assert f.refine_root(-2, -1, steps=0) == (-2, -1)
assert f.refine_root(1, 2, steps=None) == (1, S(3)/2)
assert f.refine_root(-2, -1, steps=None) == (-S(3)/2, -1)
assert f.refine_root(1, 2, steps=1) == (1, S(3)/2)
assert f.refine_root(-2, -1, steps=1) == (-S(3)/2, -1)
assert f.refine_root(1, 2, steps=1, fast=True) == (1, S(3)/2)
assert f.refine_root(-2, -1, steps=1, fast=True) == (-S(3)/2, -1)
assert f.refine_root(1, 2, eps=S(1)/100) == (S(24)/17, S(17)/12)
assert f.refine_root(1, 2, eps=1e-2) == (S(24)/17, S(17)/12)
raises(PolynomialError, "(f**2).refine_root(1, 2, check_sqf=True)")
raises(RefinementFailed, "(f**2).refine_root(1, 2)")
raises(RefinementFailed, "(f**2).refine_root(2, 3)")
f = x**2 - 2
assert refine_root(f, 1, 2, steps=1) == (1, S(3)/2)
assert refine_root(f, -2, -1, steps=1) == (-S(3)/2, -1)
assert refine_root(f, 1, 2, steps=1, fast=True) == (1, S(3)/2)
assert refine_root(f, -2, -1, steps=1, fast=True) == (-S(3)/2, -1)
assert refine_root(f, 1, 2, eps=S(1)/100) == (S(24)/17, S(17)/12)
assert refine_root(f, 1, 2, eps=1e-2) == (S(24)/17, S(17)/12)
raises(PolynomialError, "refine_root(1, 7, 8, eps=S(1)/100)")
raises(ValueError, "Poly(f).refine_root(1, 2, eps=10**-100000)")
raises(ValueError, "refine_root(f, 1, 2, eps=10**-100000)")
def test_count_roots():
assert count_roots(x**2 - 2) == 2
assert count_roots(x**2 - 2, inf=-oo) == 2
assert count_roots(x**2 - 2, sup=+oo) == 2
assert count_roots(x**2 - 2, inf=-oo, sup=+oo) == 2
assert count_roots(x**2 - 2, inf=-2) == 2
assert count_roots(x**2 - 2, inf=-1) == 1
assert count_roots(x**2 - 2, sup=1) == 1
assert count_roots(x**2 - 2, sup=2) == 2
assert count_roots(x**2 - 2, inf=-1, sup=1) == 0
assert count_roots(x**2 - 2, inf=-2, sup=2) == 2
assert count_roots(x**2 - 2, inf=-1, sup=1) == 0
assert count_roots(x**2 - 2, inf=-2, sup=2) == 2
assert count_roots(x**2 + 2) == 0
assert count_roots(x**2 + 2, inf=-2*I) == 2
assert count_roots(x**2 + 2, sup=+2*I) == 2
assert count_roots(x**2 + 2, inf=-2*I, sup=+2*I) == 2
assert count_roots(x**2 + 2, inf=0) == 0
assert count_roots(x**2 + 2, sup=0) == 0
assert count_roots(x**2 + 2, inf=-I) == 1
assert count_roots(x**2 + 2, sup=+I) == 1
assert count_roots(x**2 + 2, inf=+I/2, sup=+I) == 0
assert count_roots(x**2 + 2, inf=-I, sup=-I/2) == 0
raises(PolynomialError, "count_roots(1)")
def test_Poly_root():
f = Poly(2*x**3 - 7*x**2 + 4*x + 4)
assert f.root(0) == -S(1)/2
assert f.root(1) == 2
assert f.root(2) == 2
raises(IndexError, "f.root(3)")
assert Poly(x**5 + x + 1).root(0) == RootOf(x**3 - x**2 + 1, 0)
def test_real_roots():
assert real_roots(x) == [0]
assert real_roots(x, multiple=False) == [(0, 1)]
assert real_roots(x**3) == [0, 0, 0]
assert real_roots(x**3, multiple=False) == [(0, 3)]
assert real_roots(x*(x**3 + x + 3)) == [RootOf(x**3 + x + 3, 0), 0]
assert real_roots(x*(x**3 + x + 3), multiple=False) == [(RootOf(x**3 + x + 3, 0), 1), (0, 1)]
assert real_roots(x**3*(x**3 + x + 3)) == [RootOf(x**3 + x + 3, 0), 0, 0, 0]
assert real_roots(x**3*(x**3 + x + 3), multiple=False) == [(RootOf(x**3 + x + 3, 0), 1), (0, 3)]
f = 2*x**3 - 7*x**2 + 4*x + 4
g = x**3 + x + 1
assert Poly(f).real_roots() == [-S(1)/2, 2, 2]
assert Poly(g).real_roots() == [RootOf(g, 0)]
def test_all_roots():
f = 2*x**3 - 7*x**2 + 4*x + 4
g = x**3 + x + 1
assert Poly(f).all_roots() == [-S(1)/2, 2, 2]
assert Poly(g).all_roots() == [RootOf(g, 0), RootOf(g, 1), RootOf(g, 2)]
def test_nroots():
assert Poly(0, x).nroots() == []
assert Poly(1, x).nroots() == []
assert Poly(x**2 - 1, x).nroots() == [-1.0, 1.0]
assert Poly(x**2 + 1, x).nroots() == [-1.0*I, 1.0*I]
roots, error = Poly(x**2 - 1, x).nroots(error=True)
assert roots == [-1.0, 1.0] and error < 1e25;
roots, error = Poly(x**2 + 1, x).nroots(error=True)
assert roots == [-1.0*I, 1.0*I] and error < 1e25;
roots, error = Poly(x**2/3 - S(1)/3, x).nroots(error=True)
assert roots == [-1.0, 1.0] and error < 1e25;
roots, error = Poly(x**2/3 + S(1)/3, x).nroots(error=True)
assert roots == [-1.0*I, 1.0*I] and error < 1e25;
assert Poly(x**2 + 2*I, x).nroots() == [-1.0 + 1.0*I, 1.0 - 1.0*I]
assert Poly(x**2 + 2*I, x, extension=I).nroots() == [-1.0 + 1.0*I, 1.0 - 1.0*I]
assert Poly(0.2*x + 0.1).nroots() == [-0.5]
roots = nroots(x**5 + x + 1, n=5)
eps = Float("1e-5")
assert re(roots[0]).epsilon_eq(-0.75487, eps) is True
assert im(roots[0]) == 0.0
assert re(roots[1]) == -0.5
assert im(roots[1]).epsilon_eq(-0.86602, eps) is True
assert re(roots[2]) == -0.5
assert im(roots[2]).epsilon_eq(+0.86602, eps) is True
assert re(roots[3]).epsilon_eq(+0.87743, eps) is True
assert im(roots[3]).epsilon_eq(-0.74486, eps) is True
assert re(roots[4]).epsilon_eq(+0.87743, eps) is True
assert im(roots[4]).epsilon_eq(+0.74486, eps) is True
eps = Float("1e-6")
assert re(roots[0]).epsilon_eq(-0.75487, eps) is False
assert im(roots[0]) == 0.0
assert re(roots[1]) == -0.5
assert im(roots[1]).epsilon_eq(-0.86602, eps) is False
assert re(roots[2]) == -0.5
assert im(roots[2]).epsilon_eq(+0.86602, eps) is False
assert re(roots[3]).epsilon_eq(+0.87743, eps) is False
assert im(roots[3]).epsilon_eq(-0.74486, eps) is False
assert re(roots[4]).epsilon_eq(+0.87743, eps) is False
assert im(roots[4]).epsilon_eq(+0.74486, eps) is False
raises(DomainError, "Poly(x + y, x).nroots()")
raises(MultivariatePolynomialError, "Poly(x + y).nroots()")
assert nroots(x**2 - 1) == [-1.0, 1.0]
roots, error = nroots(x**2 - 1, error=True)
assert roots == [-1.0, 1.0] and error < 1e25;
assert nroots(x + I) == [-1.0*I]
assert nroots(x + 2*I) == [-2.0*I]
raises(PolynomialError, "nroots(0)")
def test_ground_roots():
f = x**6 - 4*x**4 + 4*x**3 - x**2
assert Poly(f).ground_roots() == {S(1): 2, S(0): 2}
assert ground_roots(f) == {S(1): 2, S(0): 2}
def test_nth_power_roots_poly():
f = x**4 - x**2 + 1
f_2 = (x**2 - x + 1)**2
f_3 = (x**2 + 1)**2
f_4 = (x**2 + x + 1)**2
f_12 = (x - 1)**4
assert nth_power_roots_poly(f, 1) == f
raises(ValueError, "nth_power_roots_poly(f, 0)")
raises(ValueError, "nth_power_roots_poly(f, x)")
assert factor(nth_power_roots_poly(f, 2)) == f_2
assert factor(nth_power_roots_poly(f, 3)) == f_3
assert factor(nth_power_roots_poly(f, 4)) == f_4
assert factor(nth_power_roots_poly(f, 12)) == f_12
raises(MultivariatePolynomialError, "nth_power_roots_poly(x + y, 2, x, y)")
def test_cancel():
assert cancel(0) == 0
assert cancel(7) == 7
assert cancel(x) == x
assert cancel(oo) == oo
assert cancel((2, 3)) == (1, 2, 3)
assert cancel((1, 0), x) == (1, 1, 0)
assert cancel((0, 1), x) == (1, 0, 1)
f, g, p, q = 4*x**2-4, 2*x-2, 2*x+2, 1
F, G, P, Q = [ Poly(u, x) for u in (f, g, p, q) ]
assert F.cancel(G) == (1, P, Q)
assert cancel((f, g)) == (1, p, q)
assert cancel((f, g), x) == (1, p, q)
assert cancel((f, g), (x,)) == (1, p, q)
assert cancel((F, G)) == (1, P, Q)
assert cancel((f, g), polys=True) == (1, P, Q)
assert cancel((F, G), polys=False) == (1, p, q)
f = (x**2 - 2)/(x + sqrt(2))
assert cancel(f) == f
assert cancel(f, greedy=False) == x - sqrt(2)
f = (x**2 - 2)/(x - sqrt(2))
assert cancel(f) == f
assert cancel(f, greedy=False) == x + sqrt(2)
assert cancel((x**2/4 - 1, x/2 - 1)) == (S(1)/2, x + 2, 1)
assert cancel((x**2-y)/(x-y)) == 1/(x - y)*(x**2 - y)
assert cancel((x**2-y**2)/(x-y), x) == x + y
assert cancel((x**2-y**2)/(x-y), y) == x + y
assert cancel((x**2-y**2)/(x-y)) == x + y
assert cancel((x**3-1)/(x**2-1)) == (x**2+x+1)/(x+1)
assert cancel((x**3/2-S(1)/2)/(x**2-1)) == (x**2+x+1)/(2*x+2)
assert cancel((exp(2*x) + 2*exp(x) + 1)/(exp(x) + 1)) == exp(x) + 1
f = Poly(x**2 - a**2, x)
g = Poly(x - a, x)
F = Poly(x + a, x)
G = Poly(1, x)
assert cancel((f, g)) == (1, F, G)
f = x**3 + (sqrt(2) - 2)*x**2 - (2*sqrt(2) + 3)*x - 3*sqrt(2)
g = x**2 - 2
assert cancel((f, g), extension=True) == (1, x**2 - 2*x - 3, x - sqrt(2))
f = Poly(-2*x + 3, x)
g = Poly(-x**9 + x**8 + x**6 - x**5 + 2*x**2 - 3*x + 1, x)
assert cancel((f, g)) == (1, -f, -g)
f = Poly(y, y, domain='ZZ(x)')
g = Poly(1, y, domain='ZZ[x]')
assert f.cancel(g) == (1, Poly(y, y, domain='ZZ(x)'), Poly(1, y, domain='ZZ(x)'))
assert f.cancel(g, include=True) == (Poly(y, y, domain='ZZ(x)'), Poly(1, y, domain='ZZ(x)'))
f = Poly(5*x*y + x, y, domain='ZZ(x)')
g = Poly(2*x**2*y, y, domain='ZZ(x)')
assert f.cancel(g, include=True) == (Poly(5*y + 1, y, domain='ZZ(x)'), Poly(2*x*y, y, domain='ZZ(x)'))
def test_reduced():
f = 2*x**4 + y**2 - x**2 + y**3
G = [x**3 - x, y**3 - y]
Q = [2*x, 1]
r = x**2 + y**2 + y
assert reduced(f, G) == (Q, r)
assert reduced(f, G, x, y) == (Q, r)
H = groebner(G)
assert H.reduce(f) == (Q, r)
Q = [Poly(2*x, x, y), Poly(1, x, y)]
r = Poly(x**2 + y**2 + y, x, y)
assert _strict_eq(reduced(f, G, polys=True), (Q, r))
assert _strict_eq(reduced(f, G, x, y, polys=True), (Q, r))
H = groebner(G, polys=True)
assert _strict_eq(H.reduce(f), (Q, r))
f = 2*x**3 + y**3 + 3*y
G = groebner([x**2 + y**2 - 1, x*y - 2])
Q = [x**2 - x*y**3/2 + x*y/2 + y**6/4 - y**4/2 + y**2/4, -y**5/4 + y**3/2 + 3*y/4]
r = 0
assert reduced(f, G) == (Q, r)
assert G.reduce(f) == (Q, r)
assert reduced(f, G, auto=False)[1] != 0
assert G.reduce(f, auto=False)[1] != 0
assert G.contains(f) == True
assert G.contains(f + 1) == False
assert reduced(1, [1], x) == ([1], 0)
raises(ComputationFailed, "reduced(1, [1])")
def test_groebner():
assert groebner([], x, y, z) == []
assert groebner([x**2 + 1, y**4*x + x**3],
x, y, order='lex') == [1 + x**2, -1 + y**4]
assert groebner([x**2 + 1, y**4*x + x**3, x*y*z**3],
x, y, z, order='grevlex') == [-1 + y**4, z**3, 1 + x**2]
assert groebner([x**2 + 1, y**4*x + x**3], x, y, order='lex', polys=True) == \
[Poly(1 + x**2, x, y), Poly(-1 + y**4, x, y)]
assert groebner([x**2 + 1, y**4*x + x**3, x*y*z**3], x, y, z, order='grevlex', polys=True) == \
[Poly(-1 + y**4, x, y, z), Poly(z**3, x, y, z), Poly(1 + x**2, x, y, z)]
assert groebner([x**3 - 1, x**2 - 1]) == [x - 1]
assert groebner([Eq(x**3, 1), Eq(x**2, 1)]) == [x - 1]
F = [3*x**2 + y*z - 5*x - 1, 2*x + 3*x*y + y**2, x - 3*y + x*z - 2*z**2]
f = z**9 - x**2*y**3 - 3*x*y**2*z + 11*y*z**2 + x**2*z**2 - 5
G = groebner(F, x, y, z, modulus=7, symmetric=False)
assert G == [1 + x + y + 3*z + 2*z**2 + 2*z**3 + 6*z**4 + z**5,
1 + 3*y + y**2 + 6*z**2 + 3*z**3 + 3*z**4 + 3*z**5 + 4*z**6,
1 + 4*y + 4*z + y*z + 4*z**3 + z**4 + z**6,
6 + 6*z + z**2 + 4*z**3 + 3*z**4 + 6*z**5 + 3*z**6 + z**7]
Q, r = reduced(f, G, x, y, z, modulus=7, symmetric=False, polys=True)
assert sum([ q*g for q, g in zip(Q, G)]) + r == Poly(f, modulus=7)
F = [x*y - 2*y, 2*y**2 - x**2]
assert groebner(F, x, y, order='grevlex') == \
[y**3 - 2*y, x**2 - 2*y**2, x*y - 2*y]
assert groebner(F, y, x, order='grevlex') == \
[x**3 - 2*x**2, -x**2 + 2*y**2, x*y - 2*y]
assert groebner(F, order='grevlex', field=True) == \
[y**3 - 2*y, x**2 - 2*y**2, x*y - 2*y]
assert groebner([1], x) == [1]
raises(DomainError, "groebner([x**2 + 2.0*y], x, y)")
raises(ComputationFailed, "groebner([1])")
assert groebner([x**2 - 1, x**3 + 1], method='buchberger') == [x + 1]
assert groebner([x**2 - 1, x**3 + 1], method='f5b') == [x + 1]
raises(ValueError, "groebner([x, y], method='unknown')")
def test_fglm():
F = [a + b + c + d, a*b + a*d + b*c + b*d, a*b*c + a*b*d + a*c*d + b*c*d, a*b*c*d - 1]
G = groebner(F, a, b, c, d, order='grlex')
assert G.fglm('lex') == [
4*a + 3*d**9 - 4*d**5 - 3*d,
4*b + 4*c - 3*d**9 + 4*d**5 + 7*d,
4*c**2 + 3*d**10 - 4*d**6 - 3*d**2,
4*c*d**4 + 4*c - d**9 + 4*d**5 + 5*d,
d**12 - d**8 - d**4 + 1,
]
F = [9*x**8 + 36*x**7 - 32*x**6 - 252*x**5 - 78*x**4 + 468*x**3 + 288*x**2 - 108*x + 9,
-72*t*x**7 - 252*t*x**6 + 192*t*x**5 + 1260*t*x**4 + 312*t*x**3 - 404*t*x**2 - 576*t*x + \
108*t - 72*x**7 - 256*x**6 + 192*x**5 + 1280*x**4 + 312*x**3 - 576*x + 96]
G = groebner(F, t, x, order='grlex')
assert G.fglm('lex') == [
203577793572507451707*t + 627982239411707112*x**7 - 666924143779443762*x**6 - \
10874593056632447619*x**5 + 5119998792707079562*x**4 + 72917161949456066376*x**3 + \
20362663855832380362*x**2 - 142079311455258371571*x + 183756699868981873194,
9*x**8 + 36*x**7 - 32*x**6 - 252*x**5 - 78*x**4 + 468*x**3 + 288*x**2 - 108*x + 9,
]
F = [x**2 - x - 3*y + 1, -2*x + y**2 + y - 1]
G = groebner(F, x, y, order='lex')
assert G.fglm('grlex') == [
x**2 - x - 3*y + 1,
y**2 - 2*x + y - 1,
]
def test_is_zero_dimensional():
assert is_zero_dimensional([x, y], x, y) == True
assert is_zero_dimensional([x**3 + y**2], x, y) == False
assert is_zero_dimensional([x, y, z], x, y, z) == True
assert is_zero_dimensional([x, y, z], x, y, z, t) == False
F = [x*y - z, y*z - x, x*y - y]
assert is_zero_dimensional(F, x, y, z) == True
F = [x**2 - 2*x*z + 5, x*y**2 + y*z**3, 3*y**2 - 8*z**2]
assert is_zero_dimensional(F, x, y, z) == True
def test_GroebnerBasis():
F = [x*y - 2*y, 2*y**2 - x**2]
G = groebner(F, x, y, order='grevlex')
H = [y**3 - 2*y, x**2 - 2*y**2, x*y - 2*y]
P = [ Poly(h, x, y) for h in H ]
assert isinstance(G, GroebnerBasis) == True
assert len(G) == 3
assert G[0] == H[0] and not G[0].is_Poly
assert G[1] == H[1] and not G[1].is_Poly
assert G[2] == H[2] and not G[2].is_Poly
assert G[1:] == H[1:] and not any(g.is_Poly for g in G[1:])
assert G[:2] == H[:2] and not any(g.is_Poly for g in G[1:])
assert G.exprs == H
assert G.polys == P
assert G.gens == (x, y)
assert G.domain == ZZ
assert G.order == grevlex
assert G == H
assert G == tuple(H)
assert G == P
assert G == tuple(P)
assert G != []
G = groebner(F, x, y, order='grevlex', polys=True)
assert G[0] == P[0] and G[0].is_Poly
assert G[1] == P[1] and G[1].is_Poly
assert G[2] == P[2] and G[2].is_Poly
assert G[1:] == P[1:] and all(g.is_Poly for g in G[1:])
assert G[:2] == P[:2] and all(g.is_Poly for g in G[1:])
def test_poly():
assert poly(x) == Poly(x, x)
assert poly(y) == Poly(y, y)
assert poly(x + y) == Poly(x + y, x, y)
assert poly(x + sin(x)) == Poly(x + sin(x), x, sin(x))
assert poly(x + y, wrt=y) == Poly(x + y, y, x)
assert poly(x + sin(x), wrt=sin(x)) == Poly(x + sin(x), sin(x), x)
assert poly(x*y + 2*x*z**2 + 17) == Poly(x*y + 2*x*z**2 + 17, x, y, z)
assert poly(2*(y + z)**2 - 1) == Poly(2*y**2 + 4*y*z + 2*z**2 - 1, y, z)
assert poly(x*(y + z)**2 - 1) == Poly(x*y**2 + 2*x*y*z + x*z**2 - 1, x, y, z)
assert poly(2*x*(y + z)**2 - 1) == Poly(2*x*y**2 + 4*x*y*z + 2*x*z**2 - 1, x, y, z)
assert poly(2*(y + z)**2 - x - 1) == Poly(2*y**2 + 4*y*z + 2*z**2 - x - 1, x, y, z)
assert poly(x*(y + z)**2 - x - 1) == Poly(x*y**2 + 2*x*y*z + x*z**2 - x - 1, x, y, z)
assert poly(2*x*(y + z)**2 - x - 1) == Poly(2*x*y**2 + 4*x*y*z + 2*x*z**2 - x - 1, x, y, z)
assert poly(x*y + (x + y)**2 + (x + z)**2) == \
Poly(2*x*z + 3*x*y + y**2 + z**2 + 2*x**2, x, y, z)
assert poly(x*y*(x + y)*(x + z)**2) == \
Poly(x**3*y**2 + x*y**2*z**2 + y*x**2*z**2 + 2*z*x**2*y**2 + 2*y*z*x**3 + y*x**4, x, y, z)
assert poly(Poly(x + y + z, y, x, z)) == Poly(x + y + z, y, x, z)
assert poly((x + y)**2, x) == Poly(x**2 + 2*x*y + y**2, x, domain=ZZ[y])
assert poly((x + y)**2, y) == Poly(x**2 + 2*x*y + y**2, y, domain=ZZ[x])
assert poly(1, x) == Poly(1, x)
raises(GeneratorsNeeded, "poly(1)")
def test_keep_coeff():
u = Mul(2, x + 1, evaluate=False)
assert _keep_coeff(S(1), x) == x
assert _keep_coeff(S(-1), x) == -x
assert _keep_coeff(S(1), 2*x) == 2*x
assert _keep_coeff(S(2), x/2) == x
assert _keep_coeff(S(2), sin(x)) == 2*sin(x)
assert _keep_coeff(S(2), x + 1) == u
assert _keep_coeff(x, 1/x) == 1
assert _keep_coeff(x + 1, S(2)) == u
| 35.509738 | 133 | 0.515984 |
79477928937d76b96e26230c09cd2a208eb87e1e | 4,007 | py | Python | util/dataset.py | shpotes/License-Plate-Recognition | fed40cb1299ec6885bd8bebc47cb1db5d3ccd50a | [
"MIT"
] | null | null | null | util/dataset.py | shpotes/License-Plate-Recognition | fed40cb1299ec6885bd8bebc47cb1db5d3ccd50a | [
"MIT"
] | null | null | null | util/dataset.py | shpotes/License-Plate-Recognition | fed40cb1299ec6885bd8bebc47cb1db5d3ccd50a | [
"MIT"
] | null | null | null | import os, random, cv2
import numpy as np
def one_hot(class_numbers, num_classes=None):
if num_classes is None:
num_classes = np.max(class_numbers) + 1
return np.eye(num_classes, dtype=float)[class_numbers]
class DataSet:
def __init__(self, img_size, train, data_dir='.', validation=True):
self.class_names = []
self.filenames = []
self.class_names = []
self.num_classes = 0
self.validation_filenames = []
self.img_size = img_size
self.img_shape = (img_size, img_size)
self.img_size_flat = img_size ** 2
for folder in next(os.walk(data_dir))[1]:
if '.' == folder[0]:
continue
if 'validation' in folder:
for file in os.listdir(data_dir + folder):
self.validation_filenames.append(data_dir + folder + '/' + file)
continue
self.class_names.append(folder)
self.num_classes += 1
for file in os.listdir(data_dir + folder):
self.filenames.append(data_dir + folder + '/' + file)
self.class_names.sort()
random.shuffle(self.filenames)
self._num_data = len(self.filenames)
self.num_train = int(self._num_data * train)
self.num_test = self._num_data - self.num_train
self.train_filenames = self.filenames[:self.num_train]
self.test_filenames = self.filenames[self.num_train:]
if validation:
self.x_val = []
self.x_val_flat = []
self.y_val = []
self.y_val_cls = []
for file in self.validation_filenames:
tmp_cls = file.split('/')[-1][0].upper()
self.y_val_cls.append(self.class_names.index(tmp_cls))
self.y_val.append(one_hot(self.class_names.index(tmp_cls), self.num_classes))
img = cv2.cvtColor(cv2.resize(cv2.imread(file), self.img_shape), cv2.COLOR_BGR2GRAY)
self.x_val.append(img)
self.x_val_flat.append(img.flatten())
self.x_val_flat = np.vstack(self.x_val_flat)
self.y_val = np.vstack(self.y_val)
self.y_val_cls = np.array(self.y_val_cls)
self.x_train = []
self.x_train_flat = []
self.y_train = []
self.y_train_cls = []
for file in self.train_filenames:
tmp_cls = file.split('/')[-2]
self.y_train_cls.append(self.class_names.index(tmp_cls))
self.y_train.append(one_hot(self.class_names.index(tmp_cls), self.num_classes))
img = cv2.cvtColor(cv2.resize(cv2.imread(file), self.img_shape), cv2.COLOR_BGR2GRAY)
self.x_train.append(img)
self.x_train_flat.append(img.flatten())
self.x_train_flat = np.vstack(self.x_train_flat)
self.y_train = np.vstack(self.y_train)
#print(self.y_train_cls)
self.y_train_cls = np.array(self.y_train_cls)
self.x_test = []
self.x_test_flat = []
self.y_test = []
self.y_test_cls = []
for file in self.test_filenames:
tmp_cls = file.split('/')[-2]
self.y_test_cls.append(self.class_names.index(tmp_cls))
self.y_test.append(one_hot(self.class_names.index(tmp_cls), self.num_classes))
img = cv2.cvtColor(cv2.resize(cv2.imread(file), self.img_shape), cv2.COLOR_BGR2GRAY)
self.x_test.append(img)
self.x_test_flat.append(img.flatten())
self.x_test_flat = np.vstack(self.x_test_flat)
self.y_test = np.vstack(self.y_test)
self.y_test_cls = np.array(self.y_test_cls)
def random_batch(self, batch_size=32):
idx = np.random.randint(low=0, high=self.num_train, size=batch_size)
x_batch = self.x_train_flat[idx]
y_batch = self.y_train[idx]
y_batch_cls = self.y_train_cls[idx]
return x_batch, y_batch, y_batch_cls | 36.427273 | 100 | 0.595957 |
79477939c3da5dc986422871422b75299ed34e6d | 1,356 | py | Python | setup.py | Guiorgy/vaksina | 917de581a8fe1b77fa69484d1a88cbf2724c0610 | [
"MIT"
] | 39 | 2022-01-01T19:01:52.000Z | 2022-01-25T03:05:19.000Z | setup.py | Guiorgy/vaksina | 917de581a8fe1b77fa69484d1a88cbf2724c0610 | [
"MIT"
] | 10 | 2022-01-01T19:17:34.000Z | 2022-01-21T16:35:31.000Z | setup.py | Guiorgy/vaksina | 917de581a8fe1b77fa69484d1a88cbf2724c0610 | [
"MIT"
] | 14 | 2022-01-02T11:43:02.000Z | 2022-01-17T20:17:03.000Z | #!/usr/bin/env python3
# Copyright (c) 2022 Michael Casadevall
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from setuptools import find_packages, setup
setup(
name="vaksina",
version="0.1",
packages=find_packages(exclude=("tests",)),
install_requires=[
"python-jose[cryptography]",
],
test_suite="tests",
)
| 38.742857 | 80 | 0.753687 |
794779ff5fed2ad99b5eadb8439ca947cf4e6ca9 | 2,604 | py | Python | aliyun-python-sdk-polardb/aliyunsdkpolardb/request/v20170801/ResetAccountRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 1,001 | 2015-07-24T01:32:41.000Z | 2022-03-25T01:28:18.000Z | aliyun-python-sdk-polardb/aliyunsdkpolardb/request/v20170801/ResetAccountRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 363 | 2015-10-20T03:15:00.000Z | 2022-03-08T12:26:19.000Z | aliyun-python-sdk-polardb/aliyunsdkpolardb/request/v20170801/ResetAccountRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 682 | 2015-09-22T07:19:02.000Z | 2022-03-22T09:51:46.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkpolardb.endpoint import endpoint_data
class ResetAccountRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'polardb', '2017-08-01', 'ResetAccount','polardb')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_AccountName(self):
return self.get_query_params().get('AccountName')
def set_AccountName(self,AccountName):
self.add_query_param('AccountName',AccountName)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_DBClusterId(self):
return self.get_query_params().get('DBClusterId')
def set_DBClusterId(self,DBClusterId):
self.add_query_param('DBClusterId',DBClusterId)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_AccountPassword(self):
return self.get_query_params().get('AccountPassword')
def set_AccountPassword(self,AccountPassword):
self.add_query_param('AccountPassword',AccountPassword) | 35.189189 | 79 | 0.774962 |
79477bc2b9069f063dda32227e48a0ec9dedad3b | 473 | py | Python | ai_ml_projects/masters_courses/image_and_vision_processing/intensityscale.py | 5aurabhpathak/src | dda72beba2aaae67542a2f10e89048e86d04cb28 | [
"BSD-3-Clause"
] | 1 | 2021-07-07T06:51:18.000Z | 2021-07-07T06:51:18.000Z | ai_ml_projects/masters_courses/image_and_vision_processing/intensityscale.py | 5aurabhpathak/all-I-ve-done | dda72beba2aaae67542a2f10e89048e86d04cb28 | [
"BSD-3-Clause"
] | null | null | null | ai_ml_projects/masters_courses/image_and_vision_processing/intensityscale.py | 5aurabhpathak/all-I-ve-done | dda72beba2aaae67542a2f10e89048e86d04cb28 | [
"BSD-3-Clause"
] | 1 | 2020-08-11T09:53:22.000Z | 2020-08-11T09:53:22.000Z | #!/bin/env python3.5
from matplotlib import pyplot as pl
from numpy import max, min
def disp(im, n, t):
pl.subplot(120 + n)
pl.title(t)
pl.axis('off')
pl.imshow(im, pl.get_cmap('Greys_r'))
im = pl.imread('data/cameraman.tif').astype('uint16')
print('In=\n',im)
disp(im,1, 'input')
out = (30 + im * 150/(max(im) - min(im))).astype('uint8')
print('out=\n', out,'\nMaximum intensity',max(out),'\nMinimum intensity', min(out))
disp(out,2,'output')
pl.show()
| 24.894737 | 83 | 0.638478 |
79477c512d217ca41158a4712a0d2403fe26c484 | 1,700 | py | Python | ZipFileLearn/zip.file.py | subash-kc/2022-01-04-Python | 5ce51e4265bcd860a4e62423edef6ec9cd1437b4 | [
"MIT"
] | 1 | 2022-01-14T18:03:42.000Z | 2022-01-14T18:03:42.000Z | ZipFileLearn/zip.file.py | subash-kc/2022-01-04-Python | 5ce51e4265bcd860a4e62423edef6ec9cd1437b4 | [
"MIT"
] | null | null | null | ZipFileLearn/zip.file.py | subash-kc/2022-01-04-Python | 5ce51e4265bcd860a4e62423edef6ec9cd1437b4 | [
"MIT"
] | null | null | null | # standard library imports
import os # low-level, operating system agnostic commands (i.e. supports most OSes)
import zipfile # tools to create, read, write, append, and list a ZIP file
# function to search for all files within a directory, and add them to our ZIP
def zipdir(dirpath, zipfileobj):
"""does the work of writing data into our zipfile"""
# os.walk() returns a 3-tuple
# thats a fancy wany of saying it returns 3 things
# always in the order... root, dirs, files
# so the following line says given that you will return to us roots, dirs and files...
for root, dirs, files in os.walk(dirpath):
for file in files: # we only want to loop across the file component
print(os.path.join(root,file)) # create an aboslute path of where file lives
zipfileobj.write(os.path.join(root, file)) ## adds files to our zipfileobject that was passed in
return None # when we are done, no need to return any value
def main():
"""called at runtime"""
# ask ths user for the directory to be archived
dirpath = input("What directory are we archiving today? ")
## If the directory exists, then we can begin archiving it
if os.path.isdir(dirpath):
zippedfn = input("What should we call the finished archive? ")
## zippedfn is the zipped file for the archive
with zipfile.ZipFile(zippedfn, "w", zipfile.ZIP_DEFLATED) as zipfileobj:
# create a zip file object -- we are making a new zip file
zipdir(dirpath, zipfileobj) # call to our function
else:
print("Run the script again when you have a valid directory to zip.")
# this line calls our main function
main() | 50 | 108 | 0.684118 |
79477d323a9ad6facbdf21a306a568159ce97378 | 2,827 | py | Python | splt_panel.py | shachar700/SplatoonImageGenerator | 1b2aefbfac8f7ec6608f64084c01f241895cdaaa | [
"MIT"
] | 1 | 2021-08-25T05:16:10.000Z | 2021-08-25T05:16:10.000Z | splt_panel.py | shachar700/SplatoonImageGenerator | 1b2aefbfac8f7ec6608f64084c01f241895cdaaa | [
"MIT"
] | null | null | null | splt_panel.py | shachar700/SplatoonImageGenerator | 1b2aefbfac8f7ec6608f64084c01f241895cdaaa | [
"MIT"
] | 1 | 2021-08-25T10:24:43.000Z | 2021-08-25T10:24:43.000Z | import bpy
class SPLT_PT_Panel(bpy.types.Panel):
# bl_idname = "splatoonpanel"
bl_label = "Splatoon Tools"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_category = "Splatoon Tools"
def draw(self, context):
layout = self.layout
box = layout.box()
box.label(text="Basic Settings")
box.prop(context.window_manager, "objectselection_props")
box.operator("object.rotate_and_scale")
box.operator("object.position_model")
box.operator("object.position_camera")
box.operator("object.check_rotation")
box.operator("object.fix_material")
box.operator("object.addhdri")
layout.separator()
box = layout.box()
box.label(text="Advanced Settings")
box.prop(context.window_manager, "x_rotations")
row = box.row()
row.prop(context.window_manager, "x_resolution")
row.prop(context.window_manager, "y_resolution")
box.prop(context.window_manager, "output_format")
layout.separator()
box = layout.box()
box.prop(context.window_manager, "output_folder")
box.operator("object.render_wiki")
layout.separator()
box = layout.box()
box.operator("wm.url_open", text="Manual",
icon='HELP', emboss=True).url = "https://splatoonwiki.org/wiki/Inkipedia:3D_Models"
class SPLT_PT_warning_panel(bpy.types.Panel):
bl_label = "Install Dependencies"
bl_category = "Splatoon Tools"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
@classmethod
def poll(self, context):
return not context.window_manager.dependencies_installed
def draw(self, context):
layout = self.layout
lines = [f"Please install the missing dependencies for the PIL and numpy add-ons.",
f"1. Open the preferences (Edit > Preferences > Add-ons).",
f"2. Search for the Splatoon add-on.",
f"3. Open the details section of the add-on.",
f"4. Click on the Install button.",
f" This will download and install the missing Python packages, if Blender has the required",
f" permissions.",
f"",
f"If you're attempting to run the add-on from the text editor, you won't see the options described",
f"above. Please install the add-on properly through the preferences.",
f"1. Open the add-on preferences (Edit > Preferences > Add-ons).",
f"2. Press the \"Install\" button.",
f"3. Search for the add-on file.",
f"4. Confirm the selection by pressing the \"Install Add-on\" button in the file browser."]
for line in lines:
layout.label(text=line)
| 35.3375 | 117 | 0.611249 |
79477f2450e5cd2dfc0f02eb54afde0b3970bd40 | 1,539 | py | Python | hooks/post_gen_project.py | movadaml/cookiecutter-flask | e2897cd231d5ff3433409e859d12f3838a9f5100 | [
"MIT"
] | null | null | null | hooks/post_gen_project.py | movadaml/cookiecutter-flask | e2897cd231d5ff3433409e859d12f3838a9f5100 | [
"MIT"
] | null | null | null | hooks/post_gen_project.py | movadaml/cookiecutter-flask | e2897cd231d5ff3433409e859d12f3838a9f5100 | [
"MIT"
] | null | null | null | """Post gen hook to ensure that the generated project
has only one package management, either pipenv or pip."""
import logging
import os
import shutil
import sys
_logger = logging.getLogger()
def clean_extra_package_management_files():
"""Removes either requirements files and folder or the Pipfile."""
use_pipenv = "{{cookiecutter.use_pipenv}}"
use_heroku = "{{cookiecutter.use_heroku}}"
to_delete = []
if use_pipenv == "True":
to_delete = to_delete + ["requirements.txt", "requirements"]
else:
to_delete.append("Pipfile")
if use_heroku == "False":
to_delete = to_delete + ["Procfile", "app.json"]
try:
for file_or_dir in to_delete:
if os.path.isfile(file_or_dir):
os.remove(file_or_dir)
else:
shutil.rmtree(file_or_dir)
shutil.copy(".env.example", ".env")
open("dev.db", 'a').close()
except OSError as e:
_logger.warning("While attempting to remove file(s) an error occurred")
_logger.warning(f"Error: {e}")
sys.exit(1)
def create_namespace_folder():
module_name = "{{cookiecutter.app_name}}"
if "." in module_name:
namespace, package_name = module_name.split(".")
old = os.path.join(module_name)
new = os.path.join(namespace)
os.makedirs(new)
new = os.path.join(namespace, package_name)
shutil.move(old, new)
if __name__ == "__main__":
clean_extra_package_management_files()
create_namespace_folder() | 30.78 | 79 | 0.640026 |
79477fd0c8ac9b26098445b3efc5f6b23b296c86 | 998 | py | Python | kubernetes_asyncio/test/test_v1_limit_range_item.py | hubo1016/kubernetes_asyncio | d57e9e9be11f6789e1ce8d5b161acb64d29acf35 | [
"Apache-2.0"
] | 1 | 2021-01-13T09:28:57.000Z | 2021-01-13T09:28:57.000Z | kubernetes_asyncio/test/test_v1_limit_range_item.py | hubo1016/kubernetes_asyncio | d57e9e9be11f6789e1ce8d5b161acb64d29acf35 | [
"Apache-2.0"
] | null | null | null | kubernetes_asyncio/test/test_v1_limit_range_item.py | hubo1016/kubernetes_asyncio | d57e9e9be11f6789e1ce8d5b161acb64d29acf35 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1.12.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import kubernetes_asyncio.client
from kubernetes_asyncio.client.models.v1_limit_range_item import V1LimitRangeItem # noqa: E501
from kubernetes_asyncio.client.rest import ApiException
class TestV1LimitRangeItem(unittest.TestCase):
"""V1LimitRangeItem unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1LimitRangeItem(self):
"""Test V1LimitRangeItem"""
# FIXME: construct object with mandatory attributes with example values
# model = kubernetes_asyncio.client.models.v1_limit_range_item.V1LimitRangeItem() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 24.341463 | 119 | 0.724449 |
794780d5bddf288a820cb853bd9c3e7290707489 | 522 | py | Python | applications/migrations/0024_application_hear_about.py | hackkosice/registration | 086103571638706e80b9215ab9f7d57e18ebf13e | [
"MIT"
] | 4 | 2019-02-04T21:21:34.000Z | 2021-03-25T01:49:21.000Z | applications/migrations/0024_application_hear_about.py | hackkosice/registration | 086103571638706e80b9215ab9f7d57e18ebf13e | [
"MIT"
] | null | null | null | applications/migrations/0024_application_hear_about.py | hackkosice/registration | 086103571638706e80b9215ab9f7d57e18ebf13e | [
"MIT"
] | 1 | 2018-11-19T10:17:19.000Z | 2018-11-19T10:17:19.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2018-11-07 21:27
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('applications', '0023_remove_application_lennyface'),
]
operations = [
migrations.AddField(
model_name='application',
name='hear_about',
field=models.CharField(default='', max_length=200),
preserve_default=False,
),
]
| 23.727273 | 63 | 0.630268 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.