filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_13095
|
from functions import *
in_dfs2_list = []
in_ascii = input("ASCII template: ").replace('"', '')
while True:
in_dfs2 = input("Dfs2 file path (press 'enter' to quit): ").replace('"', '')
if in_dfs2 == '':
break
if os.path.isfile(in_dfs2) is False:
print("The path entered is not a file.")
quit()
# Printing the header of the current dfs2 file
print(Dfs2(in_dfs2).read())
item_number = input('Item number to be converted (leave blank to pick first): ')
if item_number == '':
item_number = 0
# Checking if a file should contain only integers
is_int = input("Do you want to force integer values in the result files? (type 'y' to confirm): ").lower()
if is_int == 'y':
is_int = True
else:
is_int = False
# Adding arguments to a tuple
list_of_parameters = (in_dfs2, int(item_number), is_int)
# Passing a set of arguments to a list
in_dfs2_list.append(list_of_parameters)
# Script termination if no dfs2 file is provided for conversion
if not in_dfs2_list:
print("Nothing to do. The user has not entered a file path.")
quit()
# Creating a folder
out_folder = os.path.join(os.path.dirname(in_dfs2_list[0][0]), 'dfs2_to_ascii')
try:
os.mkdir(out_folder)
except FileExistsError:
print(f"The destination folder already exists \n{out_folder}")
else:
print(f"A new folder has been created \n{out_folder}")
# Execution of the function
for list_of_arguments in in_dfs2_list:
print(f"\nInput dfs2 file: {list_of_arguments[0]}")
out_ascii = os.path.join(out_folder, os.path.split(list_of_arguments[0])[1].replace('.dfs2', '.asc'))
print(f"Output ascii file: {out_ascii}")
dfs2_to_ascii(in_ascii, out_ascii, *list_of_arguments)
|
the-stack_106_13099
|
# Copyright (c) 2017 crocoite contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Command line interface
"""
import argparse, sys, signal, asyncio, os, json
from traceback import TracebackException
from enum import IntEnum
from yarl import URL
from http.cookies import SimpleCookie
import pkg_resources
try:
import manhole
manhole.install (patch_fork=False, oneshot_on='USR1')
except ModuleNotFoundError:
pass
from . import behavior, browser
from .controller import SinglePageController, \
ControllerSettings, StatsHandler, LogHandler, \
RecursiveController, DepthLimit, PrefixLimit
from .devtools import Passthrough, Process
from .warc import WarcHandler
from .logger import Logger, JsonPrintConsumer, DatetimeConsumer, \
WarcHandlerConsumer, Level
from .devtools import Crashed
def absurl (s):
""" argparse: Absolute URL """
u = URL (s)
if u.is_absolute ():
return u
raise argparse.ArgumentTypeError ('Must be absolute')
def cookie (s):
""" argparse: Cookie """
c = SimpleCookie (s)
# for some reason the constructor does not raise an exception if the cookie
# supplied is invalid. It’ll simply be empty.
if len (c) != 1:
raise argparse.ArgumentTypeError ('Invalid cookie')
# we want a single Morsel
return next (iter (c.values ()))
def cookiejar (f):
""" argparse: Cookies from file """
cookies = []
try:
with open (f, 'r') as fd:
for l in fd:
l = l.lstrip ()
if l and not l.startswith ('#'):
cookies.append (cookie (l))
except FileNotFoundError:
raise argparse.ArgumentTypeError (f'Cookie jar "{f}" does not exist')
return cookies
class SingleExitStatus(IntEnum):
""" Exit status for single-shot command line """
Ok = 0
Fail = 1
BrowserCrash = 2
Navigate = 3
def single ():
parser = argparse.ArgumentParser(description='crocoite helper tools to fetch individual pages.')
parser.add_argument('--browser', help='DevTools URL', type=absurl, metavar='URL')
parser.add_argument('--timeout', default=1*60*60, type=int, help='Maximum time for archival', metavar='SEC')
parser.add_argument('--idle-timeout', default=30, type=int, help='Maximum idle seconds (i.e. no requests)', dest='idleTimeout', metavar='SEC')
parser.add_argument('--behavior', help='Enable behavior script',
dest='enabledBehaviorNames',
default=list (behavior.availableMap.keys ()),
choices=list (behavior.availableMap.keys ()),
metavar='NAME', nargs='*')
parser.add_argument('--warcinfo', help='Add extra information to warcinfo record',
metavar='JSON', type=json.loads)
# re-using curl’s short/long switch names whenever possible
parser.add_argument('-k', '--insecure',
action='store_true',
help='Disable certificate validation')
parser.add_argument ('-b', '--cookie', type=cookie, metavar='SET-COOKIE',
action='append', default=[], help='Cookies in Set-Cookie format.')
parser.add_argument ('-c', '--cookie-jar', dest='cookieJar',
type=cookiejar, metavar='FILE',
default=pkg_resources.resource_filename (__name__, 'data/cookies.txt'),
help='Cookie jar file, read-only.')
parser.add_argument('url', help='Website URL', type=absurl, metavar='URL')
parser.add_argument('output', help='WARC filename', metavar='FILE')
args = parser.parse_args ()
logger = Logger (consumer=[DatetimeConsumer (), JsonPrintConsumer ()])
ret = SingleExitStatus.Fail
service = Process ()
if args.browser:
service = Passthrough (args.browser)
settings = ControllerSettings (
idleTimeout=args.idleTimeout,
timeout=args.timeout,
insecure=args.insecure,
cookies=args.cookieJar + args.cookie,
)
with open (args.output, 'wb') as fd, WarcHandler (fd, logger) as warcHandler:
logger.connect (WarcHandlerConsumer (warcHandler))
handler = [StatsHandler (), LogHandler (logger), warcHandler]
b = list (map (lambda x: behavior.availableMap[x], args.enabledBehaviorNames))
controller = SinglePageController (url=args.url, settings=settings,
service=service, handler=handler, behavior=b, logger=logger,
warcinfo=args.warcinfo)
try:
loop = asyncio.get_event_loop()
run = asyncio.ensure_future (controller.run ())
stop = lambda signum: run.cancel ()
loop.add_signal_handler (signal.SIGINT, stop, signal.SIGINT)
loop.add_signal_handler (signal.SIGTERM, stop, signal.SIGTERM)
loop.run_until_complete(run)
loop.close()
ret = SingleExitStatus.Ok
except Crashed:
ret = SingleExitStatus.BrowserCrash
except asyncio.CancelledError:
# don’t log this one
pass
except browser.NavigateError:
ret = SingleExitStatus.Navigate
except Exception as e:
ret = SingleExitStatus.Fail
logger.error ('cli exception',
uuid='7fd69858-ecaa-4225-b213-8ab880aa3cc5',
traceback=list (TracebackException.from_exception (e).format ()))
finally:
r = handler[0].stats
logger.info ('stats', context='cli', uuid='24d92d16-770e-4088-b769-4020e127a7ff', **r)
logger.info ('exit', context='cli', uuid='9b1bd603-f7cd-4745-895a-5b894a5166f2', status=ret)
return ret
def parsePolicy (recursive, url):
if recursive is None:
return DepthLimit (0)
elif recursive.isdigit ():
return DepthLimit (int (recursive))
elif recursive == 'prefix':
return PrefixLimit (url)
raise argparse.ArgumentTypeError ('Unsupported recursion mode')
def recursive ():
logger = Logger (consumer=[DatetimeConsumer (), JsonPrintConsumer ()])
parser = argparse.ArgumentParser(description='Save website to WARC using Google Chrome.')
parser.add_argument('-j', '--concurrency',
help='Run at most N jobs concurrently', metavar='N', default=1,
type=int)
parser.add_argument('-r', '--recursion', help='Recursion policy',
metavar='POLICY')
parser.add_argument('--tempdir', help='Directory for temporary files',
metavar='DIR')
parser.add_argument('url', help='Seed URL', type=absurl, metavar='URL')
parser.add_argument('output',
help='Output file, supports templates {host}, {date} and {seqnum}',
metavar='FILE')
parser.add_argument('command',
help='Fetch command, supports templates {url} and {dest}',
metavar='CMD', nargs='*',
default=['crocoite-single', '{url}', '{dest}'])
args = parser.parse_args ()
try:
policy = parsePolicy (args.recursion, args.url)
except argparse.ArgumentTypeError as e:
parser.error (str (e))
try:
controller = RecursiveController (url=args.url, output=args.output,
command=args.command, logger=logger, policy=policy,
tempdir=args.tempdir, concurrency=args.concurrency)
except ValueError as e:
parser.error (str (e))
run = asyncio.ensure_future (controller.run ())
loop = asyncio.get_event_loop()
stop = lambda signum: run.cancel ()
loop.add_signal_handler (signal.SIGINT, stop, signal.SIGINT)
loop.add_signal_handler (signal.SIGTERM, stop, signal.SIGTERM)
try:
loop.run_until_complete(run)
except asyncio.CancelledError:
pass
finally:
loop.close()
return 0
def irc ():
import json, re
from .irc import Chromebot
logger = Logger (consumer=[DatetimeConsumer (), JsonPrintConsumer ()])
parser = argparse.ArgumentParser(description='IRC bot.')
parser.add_argument('--config', '-c', help='Config file location', metavar='PATH', default='chromebot.json')
args = parser.parse_args ()
with open (args.config) as fd:
config = json.load (fd)
s = config['irc']
blacklist = dict (map (lambda x: (re.compile (x[0], re.I), x[1]), config['blacklist'].items ()))
loop = asyncio.get_event_loop()
bot = Chromebot (
host=s['host'],
port=s['port'],
ssl=s['ssl'],
nick=s['nick'],
channels=s['channels'],
tempdir=config['tempdir'],
destdir=config['destdir'],
processLimit=config['process_limit'],
logger=logger,
blacklist=blacklist,
needVoice=config['need_voice'],
loop=loop)
stop = lambda signum: bot.cancel ()
loop.add_signal_handler (signal.SIGINT, stop, signal.SIGINT)
loop.add_signal_handler (signal.SIGTERM, stop, signal.SIGTERM)
loop.run_until_complete(bot.run ())
def dashboard ():
from .irc import Dashboard
loop = asyncio.get_event_loop()
d = Dashboard (sys.stdin, loop)
loop.run_until_complete(d.run ())
loop.run_forever()
|
the-stack_106_13102
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""New implementation of Visual Studio project generation."""
import hashlib
import os
import random
from operator import attrgetter
import gyp.common
try:
cmp
except NameError:
def cmp(x, y):
return (x > y) - (x < y)
# Initialize random number generator
random.seed()
# GUIDs for project types
ENTRY_TYPE_GUIDS = {
"project": "{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}",
"folder": "{2150E333-8FDC-42A3-9474-1A3956D46DE8}",
}
# ------------------------------------------------------------------------------
# Helper functions
def MakeGuid(name, seed="msvs_new"):
"""Returns a GUID for the specified target name.
Args:
name: Target name.
seed: Seed for MD5 hash.
Returns:
A GUID-line string calculated from the name and seed.
This generates something which looks like a GUID, but depends only on the
name and seed. This means the same name/seed will always generate the same
GUID, so that projects and solutions which refer to each other can explicitly
determine the GUID to refer to explicitly. It also means that the GUID will
not change when the project for a target is rebuilt.
"""
# Calculate a MD5 signature for the seed and name.
d = hashlib.md5((str(seed) + str(name)).encode("utf-8")).hexdigest().upper()
# Convert most of the signature to GUID form (discard the rest)
guid = (
"{"
+ d[:8]
+ "-"
+ d[8:12]
+ "-"
+ d[12:16]
+ "-"
+ d[16:20]
+ "-"
+ d[20:32]
+ "}"
)
return guid
# ------------------------------------------------------------------------------
class MSVSSolutionEntry(object):
def __cmp__(self, other):
# Sort by name then guid (so things are in order on vs2008).
return cmp((self.name, self.get_guid()), (other.name, other.get_guid()))
class MSVSFolder(MSVSSolutionEntry):
"""Folder in a Visual Studio project or solution."""
def __init__(self, path, name=None, entries=None, guid=None, items=None):
"""Initializes the folder.
Args:
path: Full path to the folder.
name: Name of the folder.
entries: List of folder entries to nest inside this folder. May contain
Folder or Project objects. May be None, if the folder is empty.
guid: GUID to use for folder, if not None.
items: List of solution items to include in the folder project. May be
None, if the folder does not directly contain items.
"""
if name:
self.name = name
else:
# Use last layer.
self.name = os.path.basename(path)
self.path = path
self.guid = guid
# Copy passed lists (or set to empty lists)
self.entries = sorted(entries or [], key=attrgetter("path"))
self.items = list(items or [])
self.entry_type_guid = ENTRY_TYPE_GUIDS["folder"]
def get_guid(self):
if self.guid is None:
# Use consistent guids for folders (so things don't regenerate).
self.guid = MakeGuid(self.path, seed="msvs_folder")
return self.guid
# ------------------------------------------------------------------------------
class MSVSProject(MSVSSolutionEntry):
"""Visual Studio project."""
def __init__(
self,
path,
name=None,
dependencies=None,
guid=None,
spec=None,
build_file=None,
config_platform_overrides=None,
fixpath_prefix=None,
):
"""Initializes the project.
Args:
path: Absolute path to the project file.
name: Name of project. If None, the name will be the same as the base
name of the project file.
dependencies: List of other Project objects this project is dependent
upon, if not None.
guid: GUID to use for project, if not None.
spec: Dictionary specifying how to build this project.
build_file: Filename of the .gyp file that the vcproj file comes from.
config_platform_overrides: optional dict of configuration platforms to
used in place of the default for this target.
fixpath_prefix: the path used to adjust the behavior of _fixpath
"""
self.path = path
self.guid = guid
self.spec = spec
self.build_file = build_file
# Use project filename if name not specified
self.name = name or os.path.splitext(os.path.basename(path))[0]
# Copy passed lists (or set to empty lists)
self.dependencies = list(dependencies or [])
self.entry_type_guid = ENTRY_TYPE_GUIDS["project"]
if config_platform_overrides:
self.config_platform_overrides = config_platform_overrides
else:
self.config_platform_overrides = {}
self.fixpath_prefix = fixpath_prefix
self.msbuild_toolset = None
def set_dependencies(self, dependencies):
self.dependencies = list(dependencies or [])
def get_guid(self):
if self.guid is None:
# Set GUID from path
# TODO(rspangler): This is fragile.
# 1. We can't just use the project filename sans path, since there could
# be multiple projects with the same base name (for example,
# foo/unittest.vcproj and bar/unittest.vcproj).
# 2. The path needs to be relative to $SOURCE_ROOT, so that the project
# GUID is the same whether it's included from base/base.sln or
# foo/bar/baz/baz.sln.
# 3. The GUID needs to be the same each time this builder is invoked, so
# that we don't need to rebuild the solution when the project changes.
# 4. We should be able to handle pre-built project files by reading the
# GUID from the files.
self.guid = MakeGuid(self.name)
return self.guid
def set_msbuild_toolset(self, msbuild_toolset):
self.msbuild_toolset = msbuild_toolset
# ------------------------------------------------------------------------------
class MSVSSolution(object):
"""Visual Studio solution."""
def __init__(
self, path, version, entries=None, variants=None, websiteProperties=True
):
"""Initializes the solution.
Args:
path: Path to solution file.
version: Format version to emit.
entries: List of entries in solution. May contain Folder or Project
objects. May be None, if the folder is empty.
variants: List of build variant strings. If none, a default list will
be used.
websiteProperties: Flag to decide if the website properties section
is generated.
"""
self.path = path
self.websiteProperties = websiteProperties
self.version = version
# Copy passed lists (or set to empty lists)
self.entries = list(entries or [])
if variants:
# Copy passed list
self.variants = variants[:]
else:
# Use default
self.variants = ["Debug|Win32", "Release|Win32"]
# TODO(rspangler): Need to be able to handle a mapping of solution config
# to project config. Should we be able to handle variants being a dict,
# or add a separate variant_map variable? If it's a dict, we can't
# guarantee the order of variants since dict keys aren't ordered.
# TODO(rspangler): Automatically write to disk for now; should delay until
# node-evaluation time.
self.Write()
def Write(self, writer=gyp.common.WriteOnDiff):
"""Writes the solution file to disk.
Raises:
IndexError: An entry appears multiple times.
"""
# Walk the entry tree and collect all the folders and projects.
all_entries = set()
entries_to_check = self.entries[:]
while entries_to_check:
e = entries_to_check.pop(0)
# If this entry has been visited, nothing to do.
if e in all_entries:
continue
all_entries.add(e)
# If this is a folder, check its entries too.
if isinstance(e, MSVSFolder):
entries_to_check += e.entries
all_entries = sorted(all_entries, key=attrgetter("path"))
# Open file and print header
f = writer(self.path)
f.write(
"Microsoft Visual Studio Solution File, "
"Format Version %s\r\n" % self.version.SolutionVersion()
)
f.write("# %s\r\n" % self.version.Description())
# Project entries
sln_root = os.path.split(self.path)[0]
for e in all_entries:
relative_path = gyp.common.RelativePath(e.path, sln_root)
# msbuild does not accept an empty folder_name.
# use '.' in case relative_path is empty.
folder_name = relative_path.replace("/", "\\") or "."
f.write(
'Project("%s") = "%s", "%s", "%s"\r\n'
% (
e.entry_type_guid, # Entry type GUID
e.name, # Folder name
folder_name, # Folder name (again)
e.get_guid(), # Entry GUID
)
)
# TODO(rspangler): Need a way to configure this stuff
if self.websiteProperties:
f.write(
"\tProjectSection(WebsiteProperties) = preProject\r\n"
'\t\tDebug.AspNetCompiler.Debug = "True"\r\n'
'\t\tRelease.AspNetCompiler.Debug = "False"\r\n'
"\tEndProjectSection\r\n"
)
if isinstance(e, MSVSFolder):
if e.items:
f.write("\tProjectSection(SolutionItems) = preProject\r\n")
for i in e.items:
f.write("\t\t%s = %s\r\n" % (i, i))
f.write("\tEndProjectSection\r\n")
if isinstance(e, MSVSProject):
if e.dependencies:
f.write("\tProjectSection(ProjectDependencies) = postProject\r\n")
for d in e.dependencies:
f.write("\t\t%s = %s\r\n" % (d.get_guid(), d.get_guid()))
f.write("\tEndProjectSection\r\n")
f.write("EndProject\r\n")
# Global section
f.write("Global\r\n")
# Configurations (variants)
f.write("\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n")
for v in self.variants:
f.write("\t\t%s = %s\r\n" % (v, v))
f.write("\tEndGlobalSection\r\n")
# Sort config guids for easier diffing of solution changes.
config_guids = []
config_guids_overrides = {}
for e in all_entries:
if isinstance(e, MSVSProject):
config_guids.append(e.get_guid())
config_guids_overrides[e.get_guid()] = e.config_platform_overrides
config_guids.sort()
f.write("\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n")
for g in config_guids:
for v in self.variants:
nv = config_guids_overrides[g].get(v, v)
# Pick which project configuration to build for this solution
# configuration.
f.write(
"\t\t%s.%s.ActiveCfg = %s\r\n"
% (
g, # Project GUID
v, # Solution build configuration
nv, # Project build config for that solution config
)
)
# Enable project in this solution configuration.
f.write(
"\t\t%s.%s.Build.0 = %s\r\n"
% (
g, # Project GUID
v, # Solution build configuration
nv, # Project build config for that solution config
)
)
f.write("\tEndGlobalSection\r\n")
# TODO(rspangler): Should be able to configure this stuff too (though I've
# never seen this be any different)
f.write("\tGlobalSection(SolutionProperties) = preSolution\r\n")
f.write("\t\tHideSolutionNode = FALSE\r\n")
f.write("\tEndGlobalSection\r\n")
# Folder mappings
# Omit this section if there are no folders
if any([e.entries for e in all_entries if isinstance(e, MSVSFolder)]):
f.write("\tGlobalSection(NestedProjects) = preSolution\r\n")
for e in all_entries:
if not isinstance(e, MSVSFolder):
continue # Does not apply to projects, only folders
for subentry in e.entries:
f.write("\t\t%s = %s\r\n" % (subentry.get_guid(), e.get_guid()))
f.write("\tEndGlobalSection\r\n")
f.write("EndGlobal\r\n")
f.close()
|
the-stack_106_13105
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test Recursive Min Eigen Optimizer."""
import unittest
from test import QiskitOptimizationTestCase
from qiskit.exceptions import MissingOptionalLibraryError
from qiskit.algorithms import NumPyMinimumEigensolver
from qiskit_optimization.algorithms import (MinimumEigenOptimizer, CplexOptimizer,
RecursiveMinimumEigenOptimizer)
from qiskit_optimization.algorithms.recursive_minimum_eigen_optimizer import IntermediateResult
from qiskit_optimization.problems import QuadraticProgram
from qiskit_optimization.converters import (IntegerToBinary, InequalityToEquality,
LinearEqualityToPenalty, QuadraticProgramToQubo)
class TestRecursiveMinEigenOptimizer(QiskitOptimizationTestCase):
"""Recursive Min Eigen Optimizer Tests."""
def test_recursive_min_eigen_optimizer(self):
"""Test the recursive minimum eigen optimizer."""
try:
filename = 'op_ip1.lp'
# get minimum eigen solver
min_eigen_solver = NumPyMinimumEigensolver()
# construct minimum eigen optimizer
min_eigen_optimizer = MinimumEigenOptimizer(min_eigen_solver)
recursive_min_eigen_optimizer = RecursiveMinimumEigenOptimizer(min_eigen_optimizer,
min_num_vars=4)
# load optimization problem
problem = QuadraticProgram()
lp_file = self.get_resource_path(filename, 'algorithms/resources')
problem.read_from_lp_file(lp_file)
# solve problem with cplex
cplex = CplexOptimizer()
cplex_result = cplex.solve(problem)
# solve problem
result = recursive_min_eigen_optimizer.solve(problem)
# analyze results
self.assertAlmostEqual(cplex_result.fval, result.fval)
except MissingOptionalLibraryError as ex:
self.skipTest(str(ex))
def test_min_eigen_optimizer_history(self):
"""Tests different options for history."""
try:
filename = 'op_ip1.lp'
# load optimization problem
problem = QuadraticProgram()
lp_file = self.get_resource_path(filename, 'algorithms/resources')
problem.read_from_lp_file(lp_file)
# get minimum eigen solver
min_eigen_solver = NumPyMinimumEigensolver()
# construct minimum eigen optimizer
min_eigen_optimizer = MinimumEigenOptimizer(min_eigen_solver)
# no history
recursive_min_eigen_optimizer = \
RecursiveMinimumEigenOptimizer(min_eigen_optimizer,
min_num_vars=4,
history=IntermediateResult.NO_ITERATIONS)
result = recursive_min_eigen_optimizer.solve(problem)
self.assertIsNotNone(result.replacements)
self.assertIsNotNone(result.history)
self.assertIsNotNone(result.history[0])
self.assertEqual(len(result.history[0]), 0)
self.assertIsNone(result.history[1])
# only last iteration in the history
recursive_min_eigen_optimizer = \
RecursiveMinimumEigenOptimizer(min_eigen_optimizer,
min_num_vars=4,
history=IntermediateResult.LAST_ITERATION)
result = recursive_min_eigen_optimizer.solve(problem)
self.assertIsNotNone(result.replacements)
self.assertIsNotNone(result.history)
self.assertIsNotNone(result.history[0])
self.assertEqual(len(result.history[0]), 0)
self.assertIsNotNone(result.history[1])
# full history
recursive_min_eigen_optimizer = \
RecursiveMinimumEigenOptimizer(min_eigen_optimizer,
min_num_vars=4,
history=IntermediateResult.ALL_ITERATIONS)
result = recursive_min_eigen_optimizer.solve(problem)
self.assertIsNotNone(result.replacements)
self.assertIsNotNone(result.history)
self.assertIsNotNone(result.history[0])
self.assertGreater(len(result.history[0]), 1)
self.assertIsNotNone(result.history[1])
except MissingOptionalLibraryError as ex:
self.skipTest(str(ex))
def test_converter_list(self):
"""Test converter list"""
op = QuadraticProgram()
op.integer_var(0, 3, "x")
op.binary_var('y')
op.maximize(linear={'x': 1, 'y': 2})
op.linear_constraint(linear={'y': 1, 'x': 1}, sense='LE', rhs=3, name='xy_leq')
# construct minimum eigen optimizer
min_eigen_solver = NumPyMinimumEigensolver()
min_eigen_optimizer = MinimumEigenOptimizer(min_eigen_solver)
# a single converter
qp2qubo = QuadraticProgramToQubo()
recursive_min_eigen_optimizer = RecursiveMinimumEigenOptimizer(min_eigen_optimizer,
min_num_vars=2,
converters=qp2qubo)
result = recursive_min_eigen_optimizer.solve(op)
self.assertEqual(result.fval, 4)
# a list of converters
ineq2eq = InequalityToEquality()
int2bin = IntegerToBinary()
penalize = LinearEqualityToPenalty()
converters = [ineq2eq, int2bin, penalize]
recursive_min_eigen_optimizer = RecursiveMinimumEigenOptimizer(min_eigen_optimizer,
min_num_vars=2,
converters=converters)
result = recursive_min_eigen_optimizer.solve(op)
self.assertEqual(result.fval, 4)
# invalid converters
with self.assertRaises(TypeError):
invalid = [qp2qubo, "invalid converter"]
RecursiveMinimumEigenOptimizer(min_eigen_optimizer,
min_num_vars=2,
converters=invalid)
if __name__ == '__main__':
unittest.main()
|
the-stack_106_13106
|
import os
import time
import uuid
from biomaj2galaxy import pass_context
from biomaj2galaxy.io import warn
from biomaj2galaxy.utils import check_input, get_dbkey_entry, wait_completion
import click
@click.command()
@click.argument("files", nargs=-1)
@click.option(
"-d",
"--dbkey",
help="Dbkey to use (i.e. genome build like \'hg19\')",
type=str
)
@click.option(
"-n",
"--dbkey-display-name",
help="Display name for the dbkey (default=guessed from BioMAJ env vars, ie '${dbname} (${remoterelease})')",
type=str
)
@click.option(
"-g",
"--genome-fasta",
help="Path to a fasta file corresponding to a full reference genome. It will be used in visualizations for example.",
type=str
)
@click.option(
"--genome-fasta-name",
help="Display name for the full reference genome (default=--dbkey-display-name or --dbkey)",
type=str
)
@click.option(
"-s",
"--fasta-sorting-method",
help="Method used for the sorting of genome fasta file",
type=click.Choice(['as_is', 'lexicographical', 'gatk', 'custom']),
default='as_is'
)
@click.option(
"--fasta-custom-sort-list",
help="Ordered comma separated list of sequence identifiers to use for sorting genome fasta file (requires \'-s custom\' option)",
type=str
)
@click.option(
"--fasta-custom-sort-handling",
help="How to handle non-specified identifiers (requires \'-s custom\' option)",
type=click.Choice(['discard', 'keep_append', 'keep_prepend']),
default='discard'
)
@click.option(
"--no-file-check",
help="Don't check the source files existence.\nUseful for files that are available on the Galaxy server, but not on the machine running this script.",
is_flag=True
)
@click.option(
"--star-with-gtf",
help="STAR indices were made with an annotation (i.e., --sjdbGTFfile and --sjdbOverhang were used).",
is_flag=True
)
@click.option(
"--star-version",
help="Version of STAR used to create the index (default: none)",
type=str
)
@click.option(
"--no-biomaj-env",
help="Add this flag if you don't want biomaj2galaxy to use BioMAJ env variables to guess file names.",
is_flag=True
)
@pass_context
def add(ctx, files, dbkey, dbkey_display_name, genome_fasta, genome_fasta_name, fasta_sorting_method, fasta_custom_sort_list, fasta_custom_sort_handling, no_file_check, star_with_gtf, star_version, no_biomaj_env):
"""Add data to a Galaxy data table. FILES is a list of path respecting this syntax: data_table_name:/path/to/data:Data name (e.g. "bowtie2:/db/some/where/my_genome:My supercool genome"). You can escape ':' by writing '\\:'"""
ADD_FASTA_TOOL_ID = 'toolshed.g2.bx.psu.edu/repos/devteam/data_manager_fetch_genome_dbkeys_all_fasta/data_manager_fetch_genome_all_fasta_dbkey/0.0.4'
DM_MANUAL_TOOL_ID = 'toolshed.g2.bx.psu.edu/repos/iuc/data_manager_manual/data_manager_manual/0.0.2'
# Fetch the list of known tables with their columns
tables_format = {}
tables = ctx.gi.tool_data.get_data_tables()
for t in tables:
tables_format[t['name']] = ctx.gi.tool_data.show_data_table(t['name'])['columns']
# A stupid fix for the twobit table which for some unknown reason doesn't have a 'name' column_name
# As this 'name' column is required for a data table, the galaxy code adds a non-existing one when it is not found in the table defintion.
if t['name'] == 'twobit' and 'name' in tables_format[t['name']]:
tables_format[t['name']].remove('name')
# Define some simpler synonyms for data tables
data_table_synonyms = {
'fasta': 'all_fasta',
'bowtie': 'bowtie_indexes',
'bowtie2': 'bowtie2_indexes',
'bwa': 'bwa_indexes',
'bwa_mem': 'bwa_mem_indexes',
'tophat2': 'tophat2_indexes',
'star': 'rnastar_index2x_versioned',
}
files_info = []
for f in files:
f = f.replace("\\:", '___colon___')
f_info = f.split(':')
f_info = [x.replace('___colon___', ':') for x in f_info]
if len(f_info) < 2 or len(f_info) > 3:
raise Exception('Malformed file information "%s"' % f_info)
if f_info[0] not in tables_format:
if f_info[0] in data_table_synonyms:
f_info[0] = data_table_synonyms[f_info[0]]
else:
raise Exception('Unknown data table name "%s"' % f_info[0])
f_info[1] = check_input([f_info[1]], check_existence=(not no_file_check), use_biomaj_env=(not no_biomaj_env))[0]
if len(f_info) == 3:
files_info.append({'table': f_info[0], 'path': f_info[1], 'name': f_info[2]})
else:
files_info.append({'table': f_info[0], 'path': f_info[1]})
# Check which tables we're touching
table_counts = {}
for f_info in files_info:
if f_info['table'] not in table_counts:
table_counts[f_info['table']] = 0
table_counts[f_info['table']] += 1
# Verify dbkey
dbkey_entry = get_dbkey_entry(ctx.gi, dbkey)
dbkey_exists = dbkey_entry is not None
need_dbkey = genome_fasta or (len(table_counts) == 0)
for c in table_counts.keys():
need_dbkey = 'dbkey' in tables_format[c]
if need_dbkey:
break
create_dbkey = dbkey and not dbkey_exists and need_dbkey
if create_dbkey:
print("Need to create the dbkey '" + dbkey + "'")
elif dbkey and dbkey_exists:
print("The dbkey '" + dbkey + "' already exists")
elif not dbkey and not need_dbkey:
print("No dbkey was specified, but it is not a problem as we don't need it.")
elif not dbkey and need_dbkey:
raise Exception("ERROR: You must specify a dbkey to perform the action(s) you requested.")
# Prepare a default display name that will be used if not specified
if not dbkey_display_name:
if 'dbname' in os.environ and 'remoterelease' in os.environ:
dbkey_display_name = "%s (%s)" % (os.environ['dbname'], os.environ['remoterelease'])
default_name = dbkey_display_name
if not default_name and dbkey_entry:
print("Trying to use dbkey_entry name: " + dbkey_entry[1])
default_name = dbkey_entry[1]
if not default_name:
default_name = dbkey
for f_info in files_info:
if 'name' not in f_info:
if not default_name:
f_info['name'] = os.path.basename(f_info['path'])
else:
f_info['name'] = default_name
# Add the genome fasta if asked
if genome_fasta:
if not create_dbkey:
# delete the existing dbkey to force the recomputing of len file
print("Deleting the dbkey entry before recreating it (needed to recompute the len file).")
ctx.gi.tool_data.delete_data_table('__dbkeys__', "\t".join(dbkey_entry))
if not genome_fasta_name:
genome_fasta_name = default_name
genome_fasta_abs = check_input([genome_fasta], check_existence=(not no_file_check), use_biomaj_env=(not no_biomaj_env))[0]
# the dbkey is not (or not longer) existing: create it while adding the ref genome to force the computing of the len file
print("Adding a new genome using fasta file '%s' -> '%s'" % (genome_fasta_name, genome_fasta_abs))
params = {}
params['dbkey_source|dbkey_source_selector'] = 'new'
params['dbkey_source|dbkey'] = dbkey
params['dbkey_source|dbkey_name'] = default_name
params['sequence_name'] = genome_fasta_name
params['reference_source|reference_source_selector'] = 'directory'
params['reference_source|fasta_filename'] = genome_fasta_abs
params['reference_source|create_symlink'] = 'true'
params['sorting|sort_selector'] = fasta_sorting_method
if fasta_sorting_method == 'custom':
params['sorting|handle_not_listed|handle_not_listed_selector'] = fasta_custom_sort_handling
n = 0
for i in fasta_custom_sort_list.split(','):
params['sorting|sequence_identifiers_' + n + '|identifier'] = i
n += 1
fetch_res = ctx.gi.tools.run_tool(None, ADD_FASTA_TOOL_ID, params)
datasetid = fetch_res['outputs'][0]['id']
jobid = None
if 'jobs' in fetch_res:
jobid = fetch_res['jobs'][0]['id']
wait_completion(ctx.gi, datasetid, jobid)
elif create_dbkey: # Create the dbkey without ref genome (no len computing)
print("Will create the dbkey '" + dbkey + "'")
files_info.append({'table': '__dbkeys__', 'name': dbkey_display_name})
table_counts['__dbkeys__'] = 1
# Now add all associated data
manual_dm_params = {}
index_entry = 0
for f_info in files_info:
if 'path' in f_info:
print("Adding a new entry to table '%s': '%s' -> '%s'" % (f_info['table'], f_info['name'], f_info['path']))
else:
print("Adding a new entry to table '%s': '%s' -> No path" % (f_info['table'], f_info['name']))
vals = {
'dbkey': dbkey,
'name': f_info['name'],
'path': f_info['path'] if 'path' in f_info else '',
'db_path': f_info['path'] if 'path' in f_info else '', # diamond data table
'url': f_info['path'] if 'path' in f_info else '',
'with-gtf': '1' if star_with_gtf else '0', # rnastar data table, old data table
'with_gene_model': '1' if star_with_gtf else '0', # rnastar data table, recent data table
'version': star_version if star_version else '0', # rnastar data table, recent data table
'len_path': f_info['path'] if 'path' in f_info else '', # __dbkeys__data table
}
if dbkey and table_counts[f_info['table']] == 1: # The id must be unique, only use dbkey if adding only one blastdb
vals['value'] = dbkey
else:
vals['value'] = dbkey + "_" + str(uuid.uuid4()) # Let it be generated
col_index = 0
for col in tables_format[f_info['table']]:
if col not in vals:
warn("Skipping unknown column named '%s' in table '%s'." % (col, f_info['table']))
manual_dm_params['data_tables_%s|columns_%s|data_table_column_name' % (index_entry, col_index)] = col
manual_dm_params['data_tables_%s|columns_%s|data_table_column_value' % (index_entry, col_index)] = vals[col]
manual_dm_params['data_tables_%s|columns_%s|is_path|is_path_selector' % (index_entry, col_index)] = 'no'
col_index += 1
manual_dm_params['data_tables_%s|data_table_name' % (index_entry)] = f_info['table']
index_entry += 1
fetch_res = ctx.gi.tools.run_tool(None, DM_MANUAL_TOOL_ID, manual_dm_params)
datasetid = fetch_res['outputs'][0]['id']
jobid = None
if 'jobs' in fetch_res:
jobid = fetch_res['jobs'][0]['id']
wait_completion(ctx.gi, datasetid, jobid)
# Reload all tables just in case
time.sleep(1) # Reloading too soon might not work for some strange reason
for table in table_counts:
print("Reloading table '%s'" % table)
ctx.gi.tool_data.reload_data_table(table)
|
the-stack_106_13107
|
# Copyright (c) 2020 Matthew Earl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
import asyncio
import errno
import logging
import multiprocessing
import os
import pickle
import socket
import subprocess
import threading
import gym
import numpy as np
from .. import client
from .. import demo
from .. import progress
logger = logging.getLogger(__name__)
_TIME_LIMIT = 35.
_QUAKE_EXE = os.path.expanduser("~/quakespasm/quakespasm/Quake/quakespasm")
_QUAKE_OPTION_ARGS = [
'-protocol', '15',
'-dedicated', '1',
'-basedir', os.path.expanduser('~/.quakespasm'),
#'+host_framerate', '0.013888',
'+host_framerate', '0.1',
'+sys_ticrate', '0.0',
'+sync_movements', '1',
'+nomonsters', '1',
'+map', 'e1m1',
]
def _get_quake_args(port_num):
return [_QUAKE_EXE, '-port', str(port_num)] + _QUAKE_OPTION_ARGS
def _get_player_origins(demo_file):
dv = demo.DemoView(demo_file, fetch_model_positions=False)
times = np.arange(0, dv.end_time, 0.05)
origins = np.stack([dv.get_view_at_time(t)[1] for t in times])
return origins, times
def _get_free_udp_port(start_port_num, num_ports):
for port_num in range(start_port_num, start_port_num + num_ports):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
sock.bind(("localhost", port_num))
except OSError as e:
if e.errno != errno.EADDRINUSE:
raise
else:
return port_num
finally:
sock.close()
class AsyncEnv(multiprocessing.Process):
def __init__(self):
super().__init__()
self.parent_conn, self.child_conn = multiprocessing.Pipe()
self._server_proc = None
self._client = None
async def _handle_rpc(self):
loop = asyncio.get_running_loop()
while True:
func, args = await loop.run_in_executor(None, lambda: self.child_conn.recv())
return_val = await getattr(self, func)(*args)
self.child_conn.send(return_val)
async def _run_coro(self):
# TODO: Obtain a file lock around checking the port and creating the server, to avoid race conditions, and get
# rid of the os.getpid() hack.
import random
port = 26000 + random.randint(0, 1000 - 1)
port = _get_free_udp_port(port, 1000)
server_proc = await asyncio.create_subprocess_exec(
*_get_quake_args(port),
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
stdin=subprocess.PIPE)
self._client = await client.AsyncClient.connect("localhost", port)
logger.info("Connected to %s %s", "localhost", port)
try:
self._before_spawn()
await self._client.wait_until_spawn()
self._reset_per_episode_state()
await self._handle_rpc()
finally:
await self._client.disconnect()
server_proc.terminate()
await server_proc.wait()
def run(self):
async def create_and_run_task():
self._coro = asyncio.create_task(self._run_coro())
try:
await self._coro
except asyncio.CancelledError:
pass
asyncio.run(create_and_run_task())
async def step(self, a):
raise NotImplementedError
async def _get_initial_observation(self):
raise NotImplementedError
def _reset_per_episode_state(self):
raise NotImplementedError
def _on_episode_end(self):
raise NotImplementedError
def _before_spawn(self):
raise NotImplementedError
async def reset(self):
self._on_episode_end()
self._before_spawn()
spawn_fut = self._client.wait_until_spawn()
await self._client.send_command("kill")
await spawn_fut
obs = await self._get_initial_observation()
self._reset_per_episode_state()
return obs
async def close(self):
self._coro.cancel()
class AsyncEnvAdaptor(gym.Env):
"""Turn an async env into a gym env."""
def __init__(self, async_env):
self._async_env_proc = async_env
self.action_space = self._async_env_proc.action_space
self.observation_space = self._async_env_proc.observation_space
self._rpc_lock = threading.Lock()
self._async_env_proc.start()
self._paths = []
self._current_path = None
def _make_rpc_call(self, method, args):
with self._rpc_lock:
self._async_env_proc.parent_conn.send((method, args))
result = self._async_env_proc.parent_conn.recv()
return result
def step(self, a):
obs, reward, done, info = self._make_rpc_call('step', (a,))
if self._current_path is None:
self._current_path = []
self._paths.append(self._current_path)
self._current_path.append(info)
return obs, reward, done, info
def reset(self):
self._current_path = None
return self._make_rpc_call('reset', ())
def render(self):
return self._make_rpc_call('render', ())
def close(self):
self._make_rpc_call('close', ())
self._async_env_proc.join()
class AsyncGuidedEnv(AsyncEnv):
key_to_dir = [(0, -1000), # 0: Forward
(1000, -1000), # 1: Forward-right
(1000, 0), # 2: Right
(1000, 1000), # 3: Back-right
(0, 1000), # 4: Back
(-1000, 1000), # 5: Back-left
(-1000, 0), # 6: Left
(-1000, -1000)] # 7: Forward-left
action_space = gym.spaces.Discrete(8)
observation_space = gym.spaces.Box(-np.inf, np.inf, shape=(11,), dtype=np.float32)
center_print_rewards = {
}
center_print_progress = {
}
movement_rewards = {
25: 3000, # bridge button
62: 3000, # spiral button 1
60: 3000, # spiral button 2
61: 3000, # spiral button 3
}
movement_progress = {
25: 4088,
62: 5795, # spiral button 1
60: 6273, # spiral button 2
61: 6734, # spiral button 3
}
def __init__(self, demo_file):
super().__init__()
guide_origins, _ = _get_player_origins(demo_file)
self._pm = progress.ProgressMap(guide_origins, 250)
self._highest_reward = None
self._total_reward = None
self._demo = None
def _action_to_move_args(self, a):
return (0, 0, 0, *self.key_to_dir[a], 0, 0, 0)
async def step(self, a):
self._client.move(*self._action_to_move_args(a))
await self._client.wait_for_movement(self._client.view_entity)
obs, reward, done, info = await self._get_step_return_and_update()
self._total_reward += reward
return obs, reward, done, info
def _before_spawn(self):
self._demo = self._client.record_demo()
def _on_episode_end(self):
self._demo.stop_recording()
if self._highest_reward is None or self._total_reward > self._highest_reward:
self._highest_reward = self._total_reward
with open(f"demos/reward_{self._highest_reward:08.02f}.demo.pickle", "wb") as f:
pickle.dump(self._demo, f)
def _reset_per_episode_state(self):
#self._prev_progress = 5464.89 # spiral
#self._prev_progress = 3688.33527211 # draw bridge
self._prev_progress = 0.
self._prev_dist = 0.
self._old_pos = None
self._center_prints_seen = set()
self._moved = set()
self._total_reward = 0.
def _limit_progress_by_center_prints(self, progress):
for k, v in self.center_print_progress.items():
if k not in self._center_prints_seen:
progress = min(progress, v)
return progress
def _limit_progress_by_moved(self, progress):
for k, v in self.movement_progress.items():
moved = (self._client.origins[k] != (0., 0., 0.))
if not moved:
progress = min(progress, v)
return progress
def _get_movement_rewards(self):
reward = 0
for k, v in self.movement_rewards.items():
if (k not in self._moved and
self._client.origins[k] != (0., 0., 0.)):
reward += v
self._moved |= {k}
return reward
async def _get_center_print_reward(self):
print_queue = self._client.center_print_queue
reward = 0
while print_queue.qsize():
string = await print_queue.get()
for k, v in self.center_print_rewards.items():
if k in string:
logger.info("Center print reward: %r %s", k, v)
reward += v
self._center_prints_seen |= {k}
return reward
async def _get_step_return_and_update(self):
pos = np.array(self._client.player_origin)
if self._old_pos is not None:
vel = pos - self._old_pos
else:
vel = np.zeros_like(pos)
(closest_point,), (progress,) = self._pm.get_progress(np.array([pos]))
progress = self._limit_progress_by_center_prints(progress)
progress = self._limit_progress_by_moved(progress)
if self._client.level_finished:
logger.warning("LEVEL FINISHED %s", self._client.time)
progress = self._pm.get_distance()
closest_point = self._pm.get_pos(progress)
dir_ = self._pm.get_dir(progress)
offset = pos - closest_point
dist = np.linalg.norm(offset)
obs = np.concatenate([pos, vel,
[k in self._moved
for k in self.movement_progress],
[self._client.time]])
#obs = np.concatenate([offset, vel, dir_,
# [progress],
# [len(self._moved)],
# [len(self._center_prints_seen)],
# [self._client.time]])
reward = ((progress - self._prev_progress) +
self._get_movement_rewards() +
await self._get_center_print_reward() -
1. * (dist - self._prev_dist))
if self._client.level_finished:
reward += 100
done = self._client.time > _TIME_LIMIT
info = {'time': self._client.time,
'pos': pos,
'vel': vel,
'progress': progress,
'offset': offset,
'dir': dir_,
'obs': obs,
'reward': reward,
'moved': list(self._moved),
#'center_prints_seen': list(self._center_prints_seen),
'finished': self._client.level_finished}
self._old_pos = pos
self._prev_progress = progress
self._prev_dist = dist
return obs, reward, done, info
async def _get_initial_observation(self):
obs, reward, done, info = await self._get_step_return_and_update()
return obs
gym.envs.registration.register(
id='pyquake-v0',
entry_point='pyquake.rl.env:AsyncEnvAdaptor',
)
|
the-stack_106_13108
|
from syntax import *
# ------------------------ EVALUATION ------------------------
class NoRuleApplies(RuntimeError):
pass
def isval(term):
if type(term) is TmTrue:
return True
elif isinstance(term, TmFalse):
return True
elif isinstance(term, TmAbs):
return True
elif isinstance(term, TmRecord):
return all(map(lambda t: isval(t[1]), term.fields))
else:
return False
class Evaluate(Visitor):
def visit_TmApp(term, ctx):
if isval(term.left) and isval(term.right):
return termSubstTop(term.right, term.left.term)
elif isval(term.left):
right = evaluate1(term.right, ctx)
return TmApp(term.info, term.left, right)
else:
left = evaluate1(term.left, ctx)
return TmApp(term.info, left, term.right)
def visit_TmIf(term, ctx):
if isinstance(term.term_condition, TmTrue):
return term.term_then
elif isinstance(term.term_condition, TmFalse):
return term.term_else
else:
new_term_condition = evaluate(ctx, term.term_condition)
return TmIf(
term.info,
new_term_condition,
term.term_then, term.term_else)
def visit_TmRecord(term, ctx):
for (num, field) in enumerate(term.fields):
f_term = evaluate1(field[1], ctx)
term.fields[num] = (field[0], f_term)
return term
def visit_TmProj(term, ctx):
if type(term.term) is TmRecord:
if isinstance(term.name, int):
return term.term.fields[-term.name][1]
# lookup by name
# TODO: for increase perfomance may be used 'dict'
for (name, value) in reversed(term.term.fields):
if name == term.name:
return value
raise NoRuleApplies("Not found")
else:
term.term = evaluate1(term.term, ctx)
return term
raise NoRuleApplies
def visit__(term, ctx):
raise NoRuleApplies
evaluate1 = Evaluate.visit
def evaluate(ctx, term):
try:
return evaluate(ctx, evaluate1(term, ctx))
except NoRuleApplies:
return term
def evalbinding(ctx, b):
type_b = type(b)
if type_b is TmAbbBind:
term = evaluate(ctx, b.term)
return TmAbbBind(term, b.type)
return b
def istyabb(ctx, i):
# FIXME: may be implement with using gettyabb
b = getbinding(ctx, i)
if isinstance(b, TyAbbBind):
return True
return False
def gettyabb(ctx, i):
b = getbinding(ctx, i)
if isinstance(b, TyAbbBind):
return b.type
raise NoRuleApplies
def computety(ctx, tyT):
if isinstance(tyT, TyVar):
return gettyabb(ctx, tyT.index)
else:
raise NoRuleApplies
def simplifyty(ctx, tyT):
# TODO: unfold into cycle
try:
tyT1 = computety(ctx, tyT)
return simplifyty(ctx, tyT1)
except NoRuleApplies:
return tyT
def tyeqv(ctx, tyS, tyT):
tyS = simplifyty(ctx, tyS)
tyT = simplifyty(ctx, tyT)
if type(tyS) == type(tyT):
if type(tyS) is TyId:
return tyS.name == tyT.name
elif type(tyS) is TyVar:
return tyS.index == tyT.index
elif type(tyS) is TyArr:
return tyeqv(tyS.left, tyT.left) & tyeq(tyS.right, tyT.right)
elif type(tyS) is TyRecord:
if len(tyS.fields) != len(tyT.fields):
return False
# NOTE: positionally dependent
# See notes in 11.8 Records
for ((name1, tyS1), (name2,tyT1)) in zip(tyS.fields, tyT.fields):
if not tyeqv(ctx, tyS1, tyT1):
return False
return True
elif type(tyS) is TyVariant:
raise NotImplementedError(tyS, tyS)
elif type(tyS) in [TyString, TyUnit, TyFloat, TyBool, TyNat]:
return True
else:
raise NotImplementedError(tyS, tyS)
else:
return False
# ------------------------ TYPING ------------------------
class Typeof(Visitor):
def visit_TmVar(term, ctx):
return getTypeFromContext(ctx, term.index)
def visit_TmAbs(term, ctx):
addbinding(ctx, term.name, VarBind(term.type))
typeLeft = term.type
typeRight = typeof(term.term, ctx)
ctx.pop()
return TyArr(typeLeft, typeRight)
def visit_TmApp(term, ctx):
typeLeft = typeof(term.left, ctx)
typeRight = typeof(term.right, ctx)
typeLeft_ = simplifyty(ctx, typeLeft)
if isinstance(typeLeft_, TyArr):
if tyeqv(ctx, typeRight, typeLeft_.left):
return typeLeft_.right
else:
raise RuntimeError(
"Parameter type mismatch",
term.info, typeLeft, typeRight)
else:
raise RuntimeError("Arrow type expected")
def visit_TmTrue(term, ctx):
return TyBool()
def visit_TmFalse(term, ctx):
return TyBool()
def visit_TmString(term, ctx):
return TyString()
def visit_TmIf(term, ctx):
typeCond = typeof(term.term_condition, ctx)
if isinstance(typeCond, TyBool):
typeThen = typeof(term.term_then, ctx)
typeElse = typeof(term.term_else, ctx)
if type(typeThen) == type(typeElse):
return typeThen
else:
raise RuntimeError(
term.info, "arms of conditional have different types")
else:
raise RuntimeError(term.info, "guard of conditional not a boolean")
def visit_TmRecord(term, ctx):
fieldtys = [(li, typeof(ti, ctx)) for li, ti in term.fields]
return TyRecord(fieldtys)
def visit_TmProj(term, ctx):
s_term = simplifyty(ctx, typeof(term.term, ctx))
if type(s_term) is not TyRecord:
raise RuntimeError(term.info, "Expected record type")
for (name, tf) in s_term.fields:
if name == term.name:
return tf
raise RuntimeError(
term.info, "label " + str(term.name) + " not found")
def visit_TmTag(term, ctx):
s_term = simplifyty(ctx, term.type)
tyTi = typeof(term.term, ctx)
if type(s_term) is not TyVariant:
raise RuntimeError(term.info, "Annotation is not a variant type")
tyTiExpected = None
for (name, tf) in s_term.fields:
if name == term.tag:
tyTiExpected = tf
if tyTiExpected is None:
raise RuntimeError(
term.info, "label " + str(term.name) + " not found")
if tyeqv(ctx, tyTi, tyTiExpected):
return term.type
else:
raise RuntimeError(
term.info,
"field does not have expected type - expected %s in fact %s"
% (tyTiExpected, tyTi))
typeof = Typeof.visit
|
the-stack_106_13109
|
import sys
import requests
if sys.version_info[0] == 2:
from base64 import encodestring as encodebytes
else:
from base64 import encodebytes
import braintree
from braintree import version
from braintree.util.xml_util import XmlUtil
from braintree.exceptions.authentication_error import AuthenticationError
from braintree.exceptions.authorization_error import AuthorizationError
from braintree.exceptions.down_for_maintenance_error import DownForMaintenanceError
from braintree.exceptions.not_found_error import NotFoundError
from braintree.exceptions.server_error import ServerError
from braintree.exceptions.too_many_requests_error import TooManyRequestsError
from braintree.exceptions.unexpected_error import UnexpectedError
from braintree.exceptions.upgrade_required_error import UpgradeRequiredError
from braintree.exceptions.unexpected_error import UnexpectedError
from braintree.exceptions.http.connection_error import ConnectionError
from braintree.exceptions.http.invalid_response_error import InvalidResponseError
from braintree.exceptions.http.timeout_error import TimeoutError
class Http(object):
@staticmethod
def is_error_status(status):
return status not in [200, 201, 422]
@staticmethod
def raise_exception_from_status(status, message=None):
if status == 401:
raise AuthenticationError()
elif status == 403:
raise AuthorizationError(message)
elif status == 404:
raise NotFoundError()
elif status == 426:
raise UpgradeRequiredError()
elif status == 429:
raise TooManyRequestsError()
elif status == 500:
raise ServerError()
elif status == 503:
raise DownForMaintenanceError()
else:
raise UnexpectedError("Unexpected HTTP_RESPONSE " + str(status))
def __init__(self, config, environment=None):
self.config = config
self.environment = environment or self.config.environment
def post(self, path, params={}):
return self.__http_do("POST", path, params)
def delete(self, path):
return self.__http_do("DELETE", path)
def get(self, path):
return self.__http_do("GET", path)
def put(self, path, params={}):
return self.__http_do("PUT", path, params)
def __http_do(self, http_verb, path, params=None):
http_strategy = self.config.http_strategy()
request_body = XmlUtil.xml_from_dict(params) if params else ''
full_path = path if path.startswith(self.config.base_url()) else (self.config.base_url() + path)
try:
status, response_body = http_strategy.http_do(http_verb, full_path, self.__headers(), request_body)
except Exception as e:
if self.config.wrap_http_exceptions:
http_strategy.handle_exception(e)
else:
raise
if Http.is_error_status(status):
Http.raise_exception_from_status(status)
else:
if len(response_body.strip()) == 0:
return {}
else:
return XmlUtil.dict_from_xml(response_body)
def http_do(self, http_verb, path, headers, request_body):
response = self.__request_function(http_verb)(
path if path.startswith(self.config.base_url()) else self.config.base_url() + path,
headers=headers,
data=request_body,
verify=self.environment.ssl_certificate,
timeout=self.config.timeout
)
return [response.status_code, response.text]
def handle_exception(self, exception):
if isinstance(exception, requests.exceptions.ConnectionError):
raise ConnectionError(exception)
elif isinstance(exception, requests.exceptions.HTTPError):
raise InvalidResponseError(exception)
elif isinstance(exception, requests.exceptions.Timeout):
raise TimeoutError(exception)
else:
raise UnexpectedError(exception)
def __request_function(self, method):
if method == "GET":
return requests.get
elif method == "POST":
return requests.post
elif method == "PUT":
return requests.put
elif method == "DELETE":
return requests.delete
def __authorization_header(self):
if self.config.has_client_credentials():
return b"Basic " + encodebytes(
self.config.client_id.encode('ascii') +
b":" +
self.config.client_secret.encode('ascii')
).replace(b"\n", b"").strip()
elif self.config.has_access_token():
return b"Bearer " + self.config.access_token.encode('ascii')
else:
return b"Basic " + encodebytes(
self.config.public_key.encode('ascii') +
b":" +
self.config.private_key.encode('ascii')
).replace(b"\n", b"").strip()
def __headers(self):
return {
"Accept": "application/xml",
"Authorization": self.__authorization_header(),
"Content-type": "application/xml",
"User-Agent": "Braintree Python " + version.Version,
"X-ApiVersion": braintree.configuration.Configuration.api_version()
}
|
the-stack_106_13111
|
from urllib.request import urlopen
from bs4 import BeautifulSoup
import ssl
import tabula
import urllib.request
import PyPDF2
import pandas as pd
# Controlar a data das atualizações do ficheiro
arq='data_atualização.txt'
def arquivoExiste(nome):
try:
a=open(nome, 'rt')
a.close()
except FileNotFoundError:
return False
else:
return True
def criarArquivo(nome):
try:
a=open(nome,'wt+') # sinal + é o que cria o ficheiro se ele não existir
a.close()
except:
print('Houve um erro na criação do arquivo!')
else:
print(f'Arquivo {nome} criado com sucesso!')
def Acrescenta(arq,file, data):
try:
a=open(arq,'at')
except:
print('Houve um erro na abertura do arquivo!')
else:
try:
a.write(f'{file};{data}\n')
except:
print('Houve um erro ao escrever a data!')
else:
print(f'Novo registo adicionado.')
a.close()
if not arquivoExiste(arq):
criarArquivo(arq)
# Colocar sempre este código ao utilizar o package urllib
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
# url dos devedores da AT
url = "https://static.portaldasfinancas.gov.pt/app/devedores_static/de-devedores.html"
print(f'retrieving:{format(url)}')
html = urlopen(url, context=ctx).read()
soup = BeautifulSoup(html, "html.parser")
#print(soup)
#print(soup.prettify()) #importante para ver o formato do html e para depois idenfificar o anchors
anchors = soup('iframe')
#print(anchors)
lista=list()
for tag in anchors:
identif=tag.get('src')
identif=identif.replace('.html','.pdf')
#print(identif)
url=('https://static.portaldasfinancas.gov.pt/app/devedores_static/listaF'+ identif)
lista.append(url)
print(f'Número de ficheiros a extrair: {len(lista)}')
# A solução encontrada foi exportar os pdf's para depois os colocar num csv
print('***' * 30)
count=count_sing=count_colect=0
for i in lista:
#print(i) #informação do URL
filename=i[i.find('lista'):]
print('Nome do ficheiro pdf:', filename) #nome do ficheiro -pdf
urllib.request.urlretrieve(i, filename)
file =i[i.find('lista'):]
# Parte 1 - importar o pdf e transformar em um dataframe
##coloca todas as páginas num único dataframe para poder trabalhar - table[0]
try:
table = tabula.read_pdf(file, java_options="-Dfile.encoding=UTF8", pages='all', multiple_tables=True,
encoding='utf-8', output_format="dataframe")
except:
print(f'\033[31mErro no ficheiro {file}.\033[m')
print('***' * 30)
continue
#print(len(table))
# df é um dataframe
df = table[0]
# print(type(df))
# print(df)
# testes ao dataframe
# print(df.head(18))
# Parte 2 - Ler o pdf para descobrir o montante e a data do ficheiro
file_2 = open(file, "rb")
reader = PyPDF2.PdfFileReader(file)
if reader.isEncrypted:
reader.decrypt('')
page1 = reader.getPage(0)
print("N.º de páginas do pdf:", reader.numPages)
N_pages=reader.numPages
pdfData = page1.extractText()
# print(pdfData)
montante = pdfData[pdfData.find('Devedores'):pdfData.find('•')]
data = pdfData[pdfData.find('202'):pdfData.find('2021') + 10]
Acrescenta(arq, filename, data)
print('Montante:',montante)
print('Data:', data)
# Parte 3 - Trabalhar o dataframe e exportar o csv
df.loc[:, 'Montante'] = montante
df.loc[:, 'Data'] = data
print(f'\33[34mExtração do ficheiro {filename} concluída com sucesso!\33[m')
print('***' * 30)
count=count+1
if N_pages == 1:
df_filtered = df
else:
df_filtered = df[df.iloc[:, 0].str.isnumeric()] # retirar os cabeçalhos
if filename[6:7]=='S':
count_sing=count_sing+1
if count_sing == 1:
Contribuintes_singulares = pd.DataFrame(df_filtered)
else:
Contribuintes_singulares = pd.concat([Contribuintes_singulares,df_filtered])
if filename[6:7]=='C':
count_colect=count_colect+1
if count_colect == 1:
Contribuintes_colectivos = df_filtered
else:
Contribuintes_colectivos = pd.concat([Contribuintes_colectivos,df_filtered])
#df = pd.read_csv (r'Contribuintes_singulares.csv')
#df1 = pd.read_csv (r'Contribuintes_colectivos.csv')
#df = pd.read_csv (r'S:\IFM\Data\Controlo_qualidade\Daniel_Silva\Devedores_AT\Contribuintes_singulares.csv')
#df1 = pd.read_csv (r'S:\IFM\Data\Controlo_qualidade\Daniel_Silva\Devedores_AT\Contribuintes_colectivos.csv')
#Contribuintes_singulares = pd.concat([Contribuintes_singulares,df])
#Contribuintes_colectivos = pd.concat([Contribuintes_colectivos,df1])
#valores unicos
#Contribuintes_singulares = Contribuintes_singulares.drop_duplicates()
#Contribuintes_colectivos = Contribuintes_colectivos.drop_duplicates()
#Contribuintes_singulares.to_csv("S:\IFM\Data\Controlo_qualidade\Daniel_Silva\Devedores_AT\Contribuintes_singulares.csv", encoding='utf-8-sig', index=False)
#Contribuintes_colectivos.to_csv("S:\IFM\Data\Controlo_qualidade\Daniel_Silva\Devedores_AT\Contribuintes_colectivos.csv", encoding='utf-8-sig', index=False)
Contribuintes_singulares.to_csv("Contribuintes_singulares.csv", encoding='utf-8-sig', index=False)
Contribuintes_colectivos.to_csv("Contribuintes_colectivos.csv", encoding='utf-8-sig', index=False)
print(f'\033[32m{count} dos {len(lista)} ficheiros extraídos com sucesso!\033[m')
print(f'Ficheiro:Contribuintes_singulares.csv - {len(Contribuintes_singulares)} registos adicionados.')
print(f'Ficheiro:Contribuintes_colectivos.csv - {len(Contribuintes_colectivos)} registos adicionados.')
print('***' * 30)
|
the-stack_106_13113
|
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" CLIP model configuration """
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"openai/clip-vit-base-patch32": "https://huggingface.co/openai/clip-vit-base-patch32/resolve/main/config.json",
# See all CLIP models at https://huggingface.co/models?filter=clip
}
class CLIPTextConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a :class:`~transformers.CLIPModel`. It is used to
instantiate an CLIP model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the CLIP
`openai/clip-vit-base-patch32 <https://huggingface.co/openai/clip-vit-base-patch32>`__ architecture.
Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model
outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information.
Args:
vocab_size (:obj:`int`, `optional`, defaults to 49408):
Vocabulary size of the CLIP text model. Defines the number of different tokens that can be represented by
the :obj:`inputs_ids` passed when calling :class:`~transformers.CLIPModel`.
hidden_size (:obj:`int`, `optional`, defaults to 512):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (:obj:`int`, `optional`, defaults to 2048):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
num_hidden_layers (:obj:`int`, `optional`, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (:obj:`int`, `optional`, defaults to 8):
Number of attention heads for each attention layer in the Transformer encoder.
max_position_embeddings (:obj:`int`, `optional`, defaults to 77):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
hidden_act (:obj:`str` or :obj:`function`, `optional`, defaults to :obj:`"quick_gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string,
:obj:`"gelu"`, :obj:`"relu"`, :obj:`"selu"` and :obj:`"gelu_new"` :obj:`"quick_gelu"` are supported.
layer_norm_eps (:obj:`float`, `optional`, defaults to 1e-5):
The epsilon used by the layer normalization layers.
attention_dropout (:obj:`float`, `optional`, defaults to 0.0):
The dropout ratio for the attention probabilities.
dropout (:obj:`float`, `optional`, defaults to 0.0):
The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.
initializer_range (:obj:`float`, `optional`, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
initializer_factor (:obj:`float`, `optional`, defaults to 1):
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
testing).
gradient_checkpointing (:obj:`bool`, `optional`, defaults to :obj:`False`):
If True, use gradient checkpointing to save memory at the expense of slower backward pass.
Example::
>>> from transformers import CLIPTextModel, CLIPTextConfig
>>> # Initializing a CLIPTextModel with openai/clip-vit-base-patch32 style configuration
>>> configuration = CLIPTextConfig()
>>> # Initializing a CLIPTextConfig from the openai/clip-vit-base-patch32 style configuration
>>> model = CLIPTextModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
"""
model_type = "clip_text_model"
def __init__(
self,
vocab_size=49408,
hidden_size=512,
intermediate_size=2048,
num_hidden_layers=12,
num_attention_heads=8,
max_position_embeddings=77,
hidden_act="quick_gelu",
layer_norm_eps=0.00001,
dropout=0.0,
attention_dropout=0.0,
initializer_range=0.02,
initializer_factor=1.0,
pad_token_id=1,
bos_token_id=0,
eos_token_id=2,
gradient_checkpointing=False,
**kwargs
):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.dropout = dropout
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.max_position_embeddings = max_position_embeddings
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.initializer_factor = initializer_factor
self.attention_dropout = attention_dropout
self.gradient_checkpointing = gradient_checkpointing
class CLIPVisionConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a :class:`~transformers.CLIPModel`. It is used to
instantiate an CLIP model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the CLIP
`openai/clip-vit-base-patch32 <https://huggingface.co/openai/clip-vit-base-patch32>`__ architecture.
Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model
outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information.
Args:
hidden_size (:obj:`int`, `optional`, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (:obj:`int`, `optional`, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
num_hidden_layers (:obj:`int`, `optional`, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (:obj:`int`, `optional`, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
image_size (:obj:`int`, `optional`, defaults to 224):
The size (resolution) of each image.
patch_size (:obj:`int`, `optional`, defaults to 32):
The size (resolution) of each patch.
hidden_act (:obj:`str` or :obj:`function`, `optional`, defaults to :obj:`"quick_gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string,
:obj:`"gelu"`, :obj:`"relu"`, :obj:`"selu"` and :obj:`"gelu_new"` :obj:`"quick_gelu"` are supported.
layer_norm_eps (:obj:`float`, `optional`, defaults to 1e-5):
The epsilon used by the layer normalization layers.
dropout (:obj:`float`, `optional`, defaults to 0.0):
The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (:obj:`float`, `optional`, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (:obj:`float`, `optional`, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
initializer_factor (:obj:`float`, `optional`, defaults to 1):
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
testing).
gradient_checkpointing (:obj:`bool`, `optional`, defaults to :obj:`False`):
If True, use gradient checkpointing to save memory at the expense of slower backward pass.
Example::
>>> from transformers import CLIPVisionModel, CLIPVisionConfig
>>> # Initializing a CLIPVisionModel with openai/clip-vit-base-patch32 style configuration
>>> configuration = CLIPVisionConfig()
>>> # Initializing a CLIPVisionModel model from the openai/clip-vit-base-patch32 style configuration
>>> model = CLIPVisionModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
"""
model_type = "clip_vision_model"
def __init__(
self,
hidden_size=768,
intermediate_size=3072,
num_hidden_layers=12,
num_attention_heads=12,
image_size=224,
patch_size=32,
hidden_act="quick_gelu",
layer_norm_eps=0.00001,
dropout=0.0,
attention_dropout=0.0,
initializer_range=0.02,
initializer_factor=1.0,
gradient_checkpointing=False,
**kwargs
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.dropout = dropout
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.patch_size = patch_size
self.image_size = image_size
self.initializer_range = initializer_range
self.initializer_factor = initializer_factor
self.attention_dropout = attention_dropout
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act
self.gradient_checkpointing = gradient_checkpointing
class CLIPConfig(PretrainedConfig):
r"""
:class:`~transformers.CLIPConfig` is the configuration class to store the configuration of a
:class:`~transformers.CLIPModel`. It is used to instantiate CLIP model according to the specified arguments,
defining the text model and vision model configs.
Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model
outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information.
Args:
text_config_dict (:obj:`dict`, `optional`):
Dictionary of configuration options used to initialize :class:`~transformers.CLIPTextConfig`.
vision_config_dict (:obj:`dict`, `optional`):
Dictionary of configuration options used to initialize :class:`~transformers.CLIPVisionConfig`.
projection_dim (:obj:`int`, `optional`, defaults to 512):
Dimentionality of text and vision projection layers.
kwargs (`optional`):
Dictionary of keyword arguments.
"""
model_type = "clip"
is_composition = True
def __init__(self, text_config_dict=None, vision_config_dict=None, projection_dim=512, **kwargs):
super().__init__(text_config_dict=text_config_dict, vision_config_dict=vision_config_dict, **kwargs)
if text_config_dict is None:
text_config_dict = {}
logger.info("text_config_dict is None. Initializing the CLIPTextConfig with default values.")
if vision_config_dict is None:
vision_config_dict = {}
logger.info("vision_config_dict is None. initializing the CLIPVisionConfig with default values.")
self.text_config = CLIPTextConfig(**text_config_dict)
self.vision_config = CLIPVisionConfig(**vision_config_dict)
self.projection_dim = projection_dim
self.initializer_factor = 1.0
@classmethod
def from_text_vision_configs(cls, text_config: CLIPTextConfig, vision_config: CLIPVisionConfig, **kwargs):
r"""
Instantiate a :class:`~transformers.CLIPConfig` (or a derived class) from clip text model configuration and
clip vision model configuration.
Returns:
:class:`CLIPConfig`: An instance of a configuration object
"""
return cls(text_config_dict=text_config.to_dict(), vision_config_dict=vision_config.to_dict(), **kwargs)
def to_dict(self):
"""
Serializes this instance to a Python dictionary. Override the default
:meth:`~transformers.PretrainedConfig.to_dict`.
Returns:
:obj:`Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
"""
output = copy.deepcopy(self.__dict__)
output["text_config"] = self.text_config.to_dict()
output["vision_config"] = self.vision_config.to_dict()
output["model_type"] = self.__class__.model_type
return output
|
the-stack_106_13114
|
#!/usr/bin/python3.6
import requests, json
from bs4 import BeautifulSoup
import constants as cte
class Scraping:
def nationalPanorama(self):
url = 'https://xx9p7hp1p7.execute-api.us-east-1.amazonaws.com/prod/PortalSintese'
response = json.loads(requests.get(url).content)
response = response[0]
return {
'date': response['data'][-2:] + '/' + response['data'][5:-3]+ '/' + response['data'][:4],
'cases': int(response['casosAcumulado']),
'deaths': int(response['obitosAcumulado']),
'recovered': int(response['Recuperadosnovos']),
'percentageInfected': round((float(response['casosAcumulado']) / float(response['populacaoTCU2019'])) * 100, 2),
'states': self.statePanorama(),
'source': 'Ministério da Saude, 2020 (https://covid.saude.gov.br/).'
}
def statePanorama(self):
url = 'https://xx9p7hp1p7.execute-api.us-east-1.amazonaws.com/prod/PortalEstado'
response = json.loads(requests.get(url).content)
data = []
for state in response:
data.append({
'name': cte.stateName[state['nome']],
'flag': cte.stateFlag[state['nome']],
'cases': int(state['casosAcumulado']),
'deaths': int(state['obitosAcumulado']),
'percentageInfected': round((float(state['casosAcumulado']) / float(state['populacaoTCU2019'])) * 100, 2)
})
return sorted(data, key=lambda name: name['name'])
def worldPanorama(self):
URL = 'https://en.wikipedia.org/wiki/Template:COVID-19_pandemic_data#covid19-container'
response = requests.get(URL)
soup = BeautifulSoup(response.content, 'html.parser')
dataTable = soup.find_all('table', class_ = 'wikitable plainrowheaders sortable')
dataTable = dataTable[0]
self.totalCases = soup.find_all('tr', class_ = 'sorttop')
self.totalCases = self.totalCases[0].find_all('th')[2:-1]
print(self.totalCases)
key = ['cases', 'deaths']
rows = [row for row in dataTable.find_all('tr')[2:]]
locations = [row.find('a').text for row in rows[:-1]]
countryFlag = [row.find('img').get('srcset') for row in rows[:-2]]
print(len(countryFlag))
data = [ { key[i] : int(value.text[:-1].replace(',', '')) for i, value in enumerate(row.find_all('td')[:-2])} for row in rows]
totalPopulation = self.totalPopulation()
result = []
for i in range(len(countryFlag)):
if i < cte.NUMBER_COUNTRIES:
name = ''
for country in cte.countryName:
if country['name'] == locations[i]:
data[i]['name'] = country['translation']
name = country['translation']
for value in totalPopulation:
if value['name'] == name:
data[i]['percentageInfected'] = round((float(data[i]['cases']) / float(value['totalPopulation'].replace(' ', ''))) * 100, 2)
data[i]['flag'] = 'https:' + countryFlag[i].rsplit(' ', 3)[0]
result.append(data[i])
return {
'cases': int(self.totalCases[0].text[:-1].replace(',', '')),
'deaths': int(self.totalCases[1].text[:-1].replace(',', '')),
'recovered': int(self.totalCases[2].text[:-1].replace(',', '')),
'percentageInfected': round((float(self.totalCases[0].text.replace(',', '')) / float(7790000000)) * 100, 2),
'states': result
}
def totalPopulation(self):
URL = 'https://pt.wikipedia.org/wiki/Lista_de_pa%C3%ADses_por_popula%C3%A7%C3%A3o'
response = requests.get(URL)
soup = BeautifulSoup(response.content, 'html.parser')
dataTable = soup.find_all('table', class_ = 'wikitable sortable')
dataTable = dataTable[0]
key = ['name', 'totalPopulation']
rows = [row for row in dataTable.find_all('tr')[1:]]
data = [ { key[i] : value.text[:-1].replace(',', '') for i, value in enumerate(row.find_all('td')[1:3])} for row in rows]
for country in data:
if country['name'][0] == '\xa0':
country['name'] = country['name'][1:]
return data
|
the-stack_106_13115
|
import numpy as np
class Track:
"""
Track containing attributes to track various objects.
Args:
frame_id (int): Camera frame id.
track_id (int): Track Id
bbox (numpy.ndarray): Bounding box pixel coordinates as (xmin, ymin, width, height) of the track.
detection_confidence (float): Detection confidence of the object (probability).
class_id (str or int): Class label id.
lost (int): Number of times the object or track was not tracked by tracker in consecutive frames.
iou_score (float): Intersection over union score.
data_output_format (str): Output format for data in tracker.
Options include ``['mot_challenge', 'visdrone_challenge']``. Default is ``mot_challenge``.
kwargs (dict): Additional key word arguments.
"""
count = 0
metadata = dict(
data_output_formats=['mot_challenge', 'visdrone_challenge']
)
def __init__(
self,
track_id,
frame_id,
bbox,
detection_confidence,
class_id=None,
lost=0,
iou_score=0.,
data_output_format='mot_challenge',
**kwargs
):
assert data_output_format in Track.metadata['data_output_formats']
Track.count += 1
self.id = track_id
self.detection_confidence_max = 0.
self.lost = 0
self.age = 0
self.update(frame_id, bbox, detection_confidence, class_id=class_id, lost=lost, iou_score=iou_score, **kwargs)
if data_output_format == 'mot_challenge':
self.output = self.get_mot_challenge_format
elif data_output_format == 'visdrone_challenge':
self.output = self.get_vis_drone_format
else:
raise NotImplementedError
def update(self, frame_id, bbox, detection_confidence, class_id=None, lost=0, iou_score=0., **kwargs):
"""
Update the track.
Args:
frame_id (int): Camera frame id.
bbox (numpy.ndarray): Bounding box pixel coordinates as (xmin, ymin, width, height) of the track.
detection_confidence (float): Detection confidence of the object (probability).
class_id (int or str): Class label id.
lost (int): Number of times the object or track was not tracked by tracker in consecutive frames.
iou_score (float): Intersection over union score.
kwargs (dict): Additional key word arguments.
"""
self.class_id = class_id
self.bbox = np.array(bbox)
self.detection_confidence = detection_confidence
self.frame_id = frame_id
self.iou_score = iou_score
if lost == 0:
self.lost = 0
else:
self.lost += lost
for k, v in kwargs.items():
setattr(self, k, v)
self.detection_confidence_max = max(self.detection_confidence_max, detection_confidence)
self.age += 1
@property
def centroid(self):
"""
Return the centroid of the bounding box.
Returns:
numpy.ndarray: Centroid (x, y) of bounding box.
"""
return np.array((self.bbox[0] + 0.5 * self.bbox[2], self.bbox[1] + 0.5 * self.bbox[3]))
def get_mot_challenge_format(self):
"""
Get the tracker data in MOT challenge format as a tuple of elements containing
`(frame, id, bb_left, bb_top, bb_width, bb_height, conf, x, y, z)`
References:
- Website : https://motchallenge.net/
Returns:
tuple: Tuple of 10 elements representing `(frame, id, bb_left, bb_top, bb_width, bb_height, conf, x, y, z)`.
"""
mot_tuple = (
self.frame_id, self.id, self.bbox[0], self.bbox[1], self.bbox[2], self.bbox[3], self.detection_confidence,
-1, -1, -1
)
return mot_tuple
def get_vis_drone_format(self):
"""
Track data output in VISDRONE Challenge format with tuple as
`(frame_index, target_id, bbox_left, bbox_top, bbox_width, bbox_height, score, object_category,
truncation, occlusion)`.
References:
- Website : http://aiskyeye.com/
- Paper : https://arxiv.org/abs/2001.06303
- GitHub : https://github.com/VisDrone/VisDrone2018-MOT-toolkit
- GitHub : https://github.com/VisDrone/
Returns:
tuple: Tuple containing the elements as `(frame_index, target_id, bbox_left, bbox_top, bbox_width, bbox_height,
score, object_category, truncation, occlusion)`.
"""
mot_tuple = (
self.frame_id, self.id, self.bbox[0], self.bbox[1], self.bbox[2], self.bbox[3],
self.detection_confidence, self.class_id, -1, -1
)
return mot_tuple
def predict(self):
"""
Implement to prediction the next estimate of track.
"""
raise NotImplemented
@staticmethod
def print_all_track_output_formats():
print(Track.metadata['data_output_formats'])
|
the-stack_106_13117
|
import os
import subprocess
from struct import calcsize
try:
from mpi4py import MPI
except:
MPI = None
class Compiler_parameters(object):
def __init__(self):
self._compiler = ""
self._cppargs = []
self._ldargs = []
self._incdirs = []
self._libdirs = []
self._libs = []
self._dynlib_ext = ""
self._stclib_ext = ""
self._obj_ext = ""
self._exe_ext = ""
@property
def compiler(self):
return self._compiler
@property
def cppargs(self):
return self._cppargs
@property
def ldargs(self):
return self._ldargs
@property
def incdirs(self):
return self._incdirs
@property
def libdirs(self):
return self._libdirs
@property
def libs(self):
return self._libs
@property
def dynlib_ext(self):
return self._dynlib_ext
@property
def stclib_ext(self):
return self._stclib_ext
@property
def obj_ext(self):
return self._obj_ext
@property
def exe_ext(self):
return self._exe_ext
class GNU_parameters(Compiler_parameters):
def __init__(self, cppargs=None, ldargs=None, incdirs=None, libdirs=None, libs=None):
super(GNU_parameters, self).__init__()
if cppargs is None:
cppargs = []
if ldargs is None:
ldargs = []
if incdirs is None:
incdirs = []
if libdirs is None:
libdirs = []
if libs is None:
libs = []
libs.append("m")
Iflags = []
if isinstance(incdirs, list):
for i, dir in enumerate(incdirs):
Iflags.append("-I"+dir)
Lflags = []
if isinstance(libdirs, list):
for i, dir in enumerate(libdirs):
Lflags.append("-L"+dir)
lflags = []
if isinstance(libs, list):
for i, lib in enumerate(libs):
lflags.append("-l" + lib)
cc_env = os.getenv('CC')
mpicc = None
if MPI:
mpicc_env = os.getenv('MPICC')
mpicc = mpicc_env
mpicc = "mpicc" if mpicc is None and os._exists("mpicc") else None
mpicc = "mpiCC" if mpicc is None and os._exists("mpiCC") else None
os.system("%s --version" % (mpicc))
self._compiler = mpicc if MPI and mpicc is not None else cc_env if cc_env is not None else "gcc"
opt_flags = ['-g', '-O3']
arch_flag = ['-m64' if calcsize("P") == 8 else '-m32']
self._cppargs = ['-Wall', '-fPIC', '-std=gnu11']
self._cppargs += Iflags
self._cppargs += opt_flags + cppargs + arch_flag
self._ldargs = ['-shared']
self._ldargs += Lflags
self._ldargs += lflags
self._ldargs += ldargs
if len(Lflags) > 0:
self._ldargs += ['-Wl, -rpath=%s' % (":".join(libdirs))]
self._ldargs += arch_flag
self._incdirs = incdirs
self._libdirs = libdirs
self._libs = libs
self._dynlib_ext = "so"
self._stclib_ext = "a"
self._obj_ext = "o"
self._exe_ext = ""
class Clang_parameters(Compiler_parameters):
def __init__(self, cppargs=None, ldargs=None, incdirs=None, libdirs=None, libs=None):
super(Clang_parameters, self).__init__()
if cppargs is None:
cppargs = []
if ldargs is None:
ldargs = []
if incdirs is None:
incdirs = []
if libdirs is None:
libdirs = []
if libs is None:
libs = []
self._compiler = "cc"
self._cppargs = cppargs
self._ldargs = ldargs
self._incdirs = incdirs
self._libdirs = libdirs
self._libs = libs
self._dynlib_ext = "dynlib"
self._stclib_ext = "a"
self._obj_ext = "o"
self._exe_ext = "exe"
class MinGW_parameters(Compiler_parameters):
def __init__(self, cppargs=None, ldargs=None, incdirs=None, libdirs=None, libs=None):
super(MinGW_parameters, self).__init__()
if cppargs is None:
cppargs = []
if ldargs is None:
ldargs = []
if incdirs is None:
incdirs = []
if libdirs is None:
libdirs = []
if libs is None:
libs = []
self._compiler = "gcc"
self._cppargs = cppargs
self._ldargs = ldargs
self._incdirs = incdirs
self._libdirs = libdirs
self._libs = libs
self._dynlib_ext = "so"
self._stclib_ext = "a"
self._obj_ext = "o"
self._exe_ext = "exe"
class VS_parameters(Compiler_parameters):
def __init__(self, cppargs=None, ldargs=None, incdirs=None, libdirs=None, libs=None):
super(VS_parameters, self).__init__()
if cppargs is None:
cppargs = []
if ldargs is None:
ldargs = []
if incdirs is None:
incdirs = []
if libdirs is None:
libdirs = []
if libs is None:
libs = []
self._compiler = "cl"
self._cppargs = cppargs
self._ldargs = ldargs
self._incdirs = incdirs
self._libdirs = libdirs
self._libs = libs
self._dynlib_ext = "dll"
self._stclib_ext = "lib"
self._obj_ext = "obj"
self._exe_ext = "exe"
class CCompiler(object):
"""A compiler object for creating and loading shared libraries.
:arg cc: C compiler executable (uses environment variable ``CC`` if not provided).
:arg cppargs: A list of arguments to the C compiler (optional).
:arg ldargs: A list of arguments to the linker (optional)."""
def __init__(self, cc=None, cppargs=None, ldargs=None, incdirs=None, libdirs=None, libs=None, tmp_dir=os.getcwd()):
if cppargs is None:
cppargs = []
if ldargs is None:
ldargs = []
self._cc = os.getenv('CC') if cc is None else cc
self._cppargs = cppargs
self._ldargs = ldargs
self._dynlib_ext = ""
self._stclib_ext = ""
self._obj_ext = ""
self._exe_ext = ""
self._tmp_dir = tmp_dir
def compile(self, src, obj, log):
pass
def _create_compile_process_(self, cmd, src, log):
with open(log, 'w') as logfile:
try:
subprocess.check_call(cmd, stdout=logfile, stderr=logfile)
except OSError:
err = """OSError during compilation
Please check if compiler exists: %s""" % self._cc
raise RuntimeError(err)
except subprocess.CalledProcessError:
with open(log, 'r') as logfile2:
err = """Error during compilation:
Compilation command: %s
Source/Destination file: %s
Log file: %s
Log output: %s""" % (" ".join(cmd), src, logfile.name, logfile2.read())
raise RuntimeError(err)
return True
class CCompiler_SS(CCompiler):
"""
single-stage C-compiler; used for a SINGLE source file
"""
def __init__(self, cc=None, cppargs=None, ldargs=None, incdirs=None, libdirs=None, libs=None, tmp_dir=os.getcwd()):
super(CCompiler_SS, self).__init__(cc=cc, cppargs=cppargs, ldargs=ldargs, incdirs=incdirs, libdirs=libdirs, libs=libs, tmp_dir=tmp_dir)
def compile(self, src, obj, log):
cc = [self._cc] + self._cppargs + ['-o', obj, src] + self._ldargs
with open(log, 'w') as logfile:
logfile.write("Compiling: %s\n" % " ".join(cc))
self._create_compile_process_(cc, src, log)
class GNUCompiler_SS(CCompiler_SS):
"""A compiler object for the GNU Linux toolchain.
:arg cppargs: A list of arguments to pass to the C compiler
(optional).
:arg ldargs: A list of arguments to pass to the linker (optional)."""
def __init__(self, cppargs=None, ldargs=None, incdirs=None, libdirs=None, libs=None, tmp_dir=os.getcwd()):
c_params = GNU_parameters(cppargs, ldargs, incdirs, libdirs, libs)
super(GNUCompiler_SS, self).__init__(c_params.compiler, cppargs=c_params.cppargs, ldargs=c_params.ldargs, incdirs=c_params.incdirs, libdirs=c_params.libdirs, libs=c_params.libs, tmp_dir=tmp_dir)
self._dynlib_ext = c_params.dynlib_ext
self._stclib_ext = c_params.stclib_ext
self._obj_ext = c_params.obj_ext
self._exe_ext = c_params.exe_ext
def compile(self, src, obj, log):
lib_pathfile = os.path.basename(obj)
lib_pathdir = os.path.dirname(obj)
obj = os.path.join(lib_pathdir, lib_pathfile)
super(GNUCompiler_SS, self).compile(src, obj, log)
GNUCompiler = GNUCompiler_SS
|
the-stack_106_13119
|
# Copyright (c) 2019, Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from builtins import int
import os
import grpc
from oslo_log import log as logging
from ember_csi import config
from ember_csi import common
from ember_csi import constants
from ember_csi.v1_0_0 import csi_base as v1_base
from ember_csi.v1_1_0 import csi_pb2_grpc as csi
from ember_csi.v1_1_0 import csi_types as types
CONF = config.CONF
LOG = logging.getLogger(__name__)
def _add_expand_plugin_capabilities(grpc_capabilities, disabled_features):
if constants.EXPAND_FEATURE in disabled_features:
return
if constants.EXPAND_ONLINE_FEATURE not in disabled_features:
expansion_type = types.Expansion(type=types.ExpansionType.ONLINE)
else:
expansion_type = types.Expansion(type=types.ExpansionType.OFFLINE)
plugin_capability = types.Capability(volume_expansion=expansion_type)
# May have already been added by the other class (Node-Controller) if we
# are running as both (All)
if plugin_capability not in grpc_capabilities:
grpc_capabilities.append(plugin_capability)
class Controller(v1_base.Controller):
CSI = csi
TYPES = types
DELETE_SNAP_RESP = types.DeleteSnapResp()
CTRL_CAPABILITIES = [types.CtrlCapabilityType.CREATE_DELETE_VOLUME,
types.CtrlCapabilityType.PUBLISH_UNPUBLISH_VOLUME,
types.CtrlCapabilityType.LIST_VOLUMES,
types.CtrlCapabilityType.GET_CAPACITY,
types.CtrlCapabilityType.CREATE_DELETE_SNAPSHOT,
types.CtrlCapabilityType.LIST_SNAPSHOTS,
types.CtrlCapabilityType.CLONE_VOLUME,
types.CtrlCapabilityType.PUBLISH_READONLY,
types.CtrlCapabilityType.EXPAND_VOLUME]
def _disable_features(self, features):
# Snapshot disabling is handled by SnapshotBase
# Clone disabling is handled by vm-base.Controller
# Expand is reported both as Controller, Node, and Plugin capabilities
# Add expand plugin capabilities if not disabled
_add_expand_plugin_capabilities(self.PLUGIN_GRPC_CAPABILITIES,
self.disabled_features)
# Expand is enabled by default. Nothing to do if not disabled
if constants.EXPAND_FEATURE not in features:
return
# Don't report the controller capability if disabled
capab = self.TYPES.CtrlCapabilityType.EXPAND_VOLUME
if capab in self.CTRL_CAPABILITIES:
self.CTRL_CAPABILITIES.remove(capab)
@common.debuggable
@common.logrpc
@common.require('volume_id', 'capacity_range')
@common.Worker.unique('volume_id')
def ControllerExpandVolume(self, request, context):
vol = self._get_vol(request.volume_id)
if not vol:
context.abort(grpc.StatusCode.NOT_FOUND,
'Volume %s does not exist' % request.volume_id)
# Validate and get requested sizes
vol_size, min_size, max_size = self._calculate_size(request, context)
if vol.size > vol_size:
context.abort(grpc.StatusCode.OUT_OF_RANGE,
'Volume cannot shrink from %s to %s' % (vol.size,
vol_size))
# We may be receiving a second call after the first one failed.
# No need to save, it will be saved when we call extend and succeed.
if vol.status == 'error':
vol._ovo.status = vol.previous_status
used = vol.status == 'in-use'
# Fail if online expansion is disabled
if used and constants.EXPAND_ONLINE_FEATURE in self.disabled_features:
context.abort(grpc.StatusCode.FAILED_PRECONDITION,
'Online expansion is disabled')
if min_size <= vol.size <= max_size:
LOG.debug('No expansion necessary in the backend, volume already '
'has %sGB size', vol.size)
else:
LOG.debug('Expanding volume %s from %s to %s',
vol.id, vol.size, vol_size)
vol.extend(vol_size)
# Return size and tell CO we need a call node expansion to finish
# if it's currently attached (will be called now), or if it's a mount
# volume even if it's detached (it will be called after it is staged).
node_expansion = bool(used or self._get_fs_type(vol))
current_size = int(vol_size * constants.GB)
return types.CtrlExpandResp(capacity_bytes=current_size,
node_expansion_required=node_expansion)
class Node(v1_base.Node):
CSI = csi
TYPES = types
NODE_CAPABILITIES = [types.NodeCapabilityType.STAGE_UNSTAGE_VOLUME,
types.NodeCapabilityType.GET_VOLUME_STATS,
types.NodeCapabilityType.EXPAND_VOLUME]
NODE_TOPOLOGY = None
EXT_FS = ('ext2', 'ext3', 'ext4')
def _disable_features(self, features):
# Expand is reported both as Controller, Node, and Plugin capabilities
# Add expand plugin capabilities if not disabled
_add_expand_plugin_capabilities(self.PLUGIN_GRPC_CAPABILITIES,
self.disabled_features)
# Expand is enabled by default, so if we don't disable it as a whole
# or disable online we have nothing to do here.
if not (constants.EXPAND_FEATURE in features
or constants.EXPAND_ONLINE_FEATURE in features):
return
# Disabled expand or just online means that the node has nothing to do
capab = self.TYPES.NodeCapabilityType.EXPAND_VOLUME
if capab in self.NODE_CAPABILITIES:
self.NODE_CAPABILITIES.remove(capab)
@common.debuggable
@common.logrpc
@common.require('volume_id', 'volume_path')
@common.Worker.unique('volume_id')
def NodeExpandVolume(self, request, context):
vol = self._get_vol(request.volume_id)
if not vol:
context.abort(grpc.StatusCode.NOT_FOUND,
'Volume %s does not exist' % request.volume_id)
vol_size = vol.size
# If the size is given, check that it makes sense
if request.HasField('capacity_range'):
v_size, min_size, max_size = self._calculate_size(request, context)
if not (min_size <= vol_size <= max_size):
context.abort(grpc.StatusCode.OUT_OF_RANGE,
"New size requested (%s) doesn't match "
"controller resized volume (%s)" %
(v_size, vol.size))
device, private_bind = self._get_vol_device(request.volume_id)
# Volume is not mounted, nothing to do (could be a second call)
if not device:
context.abort(grpc.StatusCode.FAILED_PRECONDITION,
'Volume is not mounted, cannot resize')
# TODO: Check it's the right path
# The extend call will return the size in bytes, like we want
current_size = vol.connections[0].extend()
# Extend filesystem if necessary
self._resize_fs(context, vol, private_bind)
return types.NodeExpandResp(capacity_bytes=current_size)
def _resize_fs(self, context, vol, private_bind):
fs_type = self._get_fs_type(vol)
if not fs_type:
return
# We always do mounted resizing, for available and in-use volumes, so
# we don't have to differentiate between btrfs and xfs, and ext fs.
mounts = self._get_mount(private_bind)
target = mounts[0][1]
# All volumes are mounted on the stage directory, make sure we have the
# right path
if os.path.basename(target) != self.STAGED_NAME:
LOG.warning("target didn't have the /stage ending")
target = os.path.join(target, self.STAGED_NAME)
# Our volumes don't have partitions, so we don't need to extend them.
# For ext3 we need to have the resize_inode feature enabled to be able
# to do mounted resize, which is enabled by default in /etc/mkefs.conf
if fs_type in self.EXT_FS:
command = ('resize2fs', '-f', '-F', private_bind)
elif fs_type == 'btrfs':
command = ('btrfs', 'filesystem', 'resize', 'max', target)
elif fs_type == 'xfs':
command = ('xfs_growfs', '-d', target)
else:
context.abort(grpc.StatusCode.FAILED_PRECONDITION,
"Don't know how to extend %s filesystem")
self.sudo(*command)
class All(Controller, Node):
def __init__(self, server, persistence_config, backend_config,
ember_config=None, node_id=None, storage_nw_ip=None):
Controller.__init__(self, server,
persistence_config=persistence_config,
backend_config=backend_config,
ember_config=ember_config)
Node.__init__(self, server, node_id=node_id,
storage_nw_ip=storage_nw_ip)
|
the-stack_106_13121
|
from setuptools import setup
import os
HERE = os.path.dirname(__file__)
VERSION_FILE = os.path.join(HERE, 'VERSION.txt')
setup(name='jrnlsh',
version_config = {
'count_commits_from_version_file': True,
'template': '{tag}',
'dev_template': '{tag}.dev.{ccount}',
'dirty_template': '{tag}.dev.{ccount}',
'version_file': VERSION_FILE,
},
setup_requires=['setuptools-git-versioning'],
description='A simple shell wrapper for jrnl',
url='http://github.com/darkpixel/jrnlsh',
author='Aaron C. de Bruyn',
author_email='[email protected]',
license='MIT',
packages=['jrnlsh'],
entry_points='''
[console_scripts]
jrnlsh=jrnlsh:run_cli
''',
zip_safe=False)
|
the-stack_106_13122
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
"""This script requires the following environment variables:
- IAM_TOKEN
- IAM_REFRESH_TOKEN
- IAM_CLIENT_ID
- IAM_CLIENT_SECRET
- MARATHON_USER (used if Marathon cache manager is selected)
- MARATHON_PASSWD (used if Marathon cache manager is selected)
- ZOOKEEPER_HOST_LIST (used if Zookeeper cache manager is selected)
- CACHE_MANAGER [ZOOKEEPER, MARATHON, MEMORY]
"""
from __future__ import print_function
import json
import logging
import os
import subprocess
import sys
import time
from StringIO import StringIO
import requests
from urllib3._collections import HTTPHeaderDict
import pycurl
from cache import MarathonCache, MemoryCache, ZookeeperCache
if sys.version_info.major == 2:
from urlparse import urlsplit
else:
from urllib.parse import urlsplit
CONFIG_FILE_PATH = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"proxy_config.json"
)
class Container(object):
"""Simple object container to simulate JSON obj access."""
def __getattr__(self, name):
setattr(self, name, None)
return getattr(self, name)
def __repr__(self):
return str(vars(self))
class ProxyManager(object):
"""Manager of tokens."""
def __init__(self, env, cache_manager=None):
# Get all environment variables
self.iam = Container()
self.iam.token = env.get('IAM_TOKEN')
self.iam.client_id = env.get('IAM_CLIENT_ID')
self.iam.client_secret = env.get('IAM_CLIENT_SECRET')
self.marathon = Container()
self.marathon.user = env.get('MARATHON_USER'),
self.marathon.passwd = env.get('MARATHON_PASSWD')
# CACHE
self.cache_dir = '/tmp'
if cache_manager == 'ZOOKEEPER':
self.cache = ZookeeperCache(env.get('ZOOKEEPER_HOST_LIST'))
elif cache_manager == 'MARATHON':
self.cache = MarathonCache(
self.marathon.user, self.marathon.passwd)
else:
self.cache = MemoryCache()
# LOAD PROXY CONFIG FILE
with open(CONFIG_FILE_PATH) as config_file:
proxy_config = json.load(config_file)
# Configuration containers
self.config = Container()
self.config.local_cache = Container()
self.config.lock_file = Container()
self.config.tts = Container()
self.config.iam = Container()
self.config.user = Container()
# Configuration variables
self.config.local_cache.expiration_time = proxy_config.get(
'local_cache_expiration_time')
self.config.audience = proxy_config.get('audience')
self.config.lock_file.age = proxy_config.get('lock_file_age')
self.config.lock_file.path = "{}/lock".format(self.cache_dir)
self.config.tts.url = proxy_config.get('tts')
self.config.tts.output_data = '{}/output.json'.format(self.cache_dir)
self.config.iam.endpoint = proxy_config.get('iam_endpoint')
self.config.iam.token_endpoint = self.config.iam.endpoint + 'token'
self.config.iam.introspect_endpoint = self.config.iam.endpoint + 'introspect'
self.config.iam.credential_endpoint = proxy_config.get(
'credential_endpoint')
self.config.user.cert = "{}/usercert.crt".format(self.cache_dir)
self.config.user.key = "{}/userkey.key".format(self.cache_dir)
self.config.user.passwd = "{}/userpasswd.txt".format(self.cache_dir)
self.config.user.proxy = "{}/userproxy.pem".format(self.cache_dir)
self.exchanged_token = ""
def check_tts_data(self):
"""Checks and refresh tts data.
.. note::
Workflow:
- Check tts output data file
- if YES -> Check if expired
- if YES -> get_tts_data(True)
- if NO -> Token OK
- if NO -> get_exchange_token()
- if OK [returns (str) exchange_token]
- get_tts_data(exchange_token)
- if FAILS [returns int] -> Check CACHE for refresh token
- if YES -> get_tts_data(True) [True to use refresh token]
- if NO -> ERROR
"""
logging.debug("Check tts output data: %s", self.config.tts.output_data)
if os.path.exists(self.config.tts.output_data):
ctime = os.stat(self.config.tts.output_data).st_ctime
since = time.time() - ctime
logging.debug("Check expiration time: %s > %s",
since, self.config.local_cache.expiration_time)
if since > self.config.local_cache.expiration_time:
logging.debug("Token about to expire. Get tts data...")
tts_data = self.get_tts_data(True)
else:
logging.debug("Token OK.")
return True
else:
logging.debug("Token not exist, get exchange token...")
self.exchanged_token = self.get_exchange_token()
if isinstance(self.exchanged_token, int):
logging.error("Get exchange token error: %s",
self.exchanged_token)
if self.cache.refresh_token.value == None:
logging.error("Problem with Token Server")
return False
else:
logging.error("Exchange with refresh token")
tts_data = self.get_tts_data(True)
else:
logging.debug("Token OK.")
tts_data = self.get_tts_data(self.exchanged_token)
return tts_data
def get_certificate(self):
"""Retrieve the certificate.
:returns: The given tts token
:raises requests.exceptions: possible on redirect
:raises pycurl.exceptions: during the call of iam credential endpoint
.. todo::
Manage controls (gestisci controlli)
"""
data = json.dumps({"service_id": "x509"})
logging.debug("Create headers and buffers")
headers = StringIO()
buffers = StringIO()
logging.debug("Prepare CURL")
curl = pycurl.Curl()
curl.setopt(pycurl.URL, bytes(self.config.iam.credential_endpoint))
curl.setopt(pycurl.HTTPHEADER, [
'Authorization: Bearer {}'.format(str(self.exchanged_token).split('\n', 1)[0]),
'Content-Type: application/json'
])
curl.setopt(pycurl.POST, 1)
curl.setopt(pycurl.POSTFIELDS, data)
curl.setopt(curl.WRITEFUNCTION, buffers.write)
curl.setopt(curl.HEADERFUNCTION, headers.write)
curl.setopt(curl.VERBOSE, True)
try:
logging.debug("Perform CURL call")
curl.perform()
status = curl.getinfo(curl.RESPONSE_CODE)
logging.debug("Result status: %s", status)
logging.debug("Close CURL")
curl.close()
logging.debug("Get body content")
body = buffers.getvalue()
logging.debug("Body: %s", body)
if str(status) != "303":
logging.error(
"On 'get redirected with curl': http error: %s", str(status))
return False
except pycurl.error as error:
errno, errstr = error
logging.error('A pycurl error n. %s occurred: %s', errno, errstr)
return False
logging.debug("Manage redirect")
for item in headers.getvalue().split("\n"):
if "location" in item:
# Example item
# "location: https://watts-dev.data.kit.edu/api/v2/iam/credential_data/xxx"
logging.debug("Item url: %s", item)
url_path = urlsplit(item.strip().split()[1]).path
redirect = self.config.tts.url + url_path
logging.debug("Redirect location: %s", redirect)
headers = {'Authorization': 'Bearer ' +
self.exchanged_token.strip()}
response = requests.get(redirect, headers=headers)
try:
response.raise_for_status()
except requests.exceptions.HTTPError as err:
# Whoops it wasn't a 200
logging.error(
"Error in get certificate redirect: %s", str(err))
return False
with open('/tmp/output.json', 'w') as outf:
outf.write(response.content)
else:
logging.error("No location in redirect response")
return True
def get_exchange_token(self):
"""Retrieve the access token.
Exchange the access token with the given client id and secret.
The refresh token in cached and the exchange token is kept in memory.
.. todo::
Add controls (aggiungi controlli)
"""
logging.debug("Prepare header")
data = HTTPHeaderDict()
data.add('grant_type', 'urn:ietf:params:oauth:grant-type:token-exchange')
data.add('audience', self.config.audience)
data.add('subject_token', self.iam.token)
data.add('scope', 'openid profile offline_access')
logging.debug("Call get exchanged token with data: '%s'", str(data))
response = requests.post(self.config.iam.token_endpoint, data=data, auth=(
self.iam.client_id, self.iam.client_secret), verify=True)
try:
response.raise_for_status()
except requests.exceptions.HTTPError as err:
# Whoops it wasn't a 200
logging.error("Error in get exchange token: %s", err)
return response.status_code
result = json.loads(response.content)
logging.debug("Result: %s", result)
logging.debug("Override refresh token")
with open('/tmp/refresh_token', 'w') as outf:
outf.write(result["refresh_token"])
self.cache.refresh_token.value = result["refresh_token"]
return result["access_token"]
def introspection(self, iam_client_id, iam_client_secret, exchanged_token):
"""Get info through introspection with the given client id, secret and token.
.. todo::
Add controls (aggiungi controlli)
:param iam_client_id: param iam_client_secret:
:param exchanged_token:
:param iam_client_secret:
"""
iam_client_id = self.iam.client_id
iam_client_secret = self.iam.client_secret
data = HTTPHeaderDict()
data.add('token', exchanged_token)
response = requests.post(self.config.iam.introspect_endpoint, data=data, auth=(
iam_client_id, iam_client_secret), verify=False)
try:
response.raise_for_status()
except requests.exceptions.HTTPError as err:
# Whoops it wasn't a 200
logging.error("Error in introspection: %s", err)
logging.error("HTTP error. Response status: %s",
response.status_code)
return response.status_code
with open('/tmp/introspection', 'w') as outf:
outf.write(response.content)
def refresh_token(self, refresh_token):
"""Request with refresh token.
.. todo::
Manage result out of the function (gestisci result fuori dalla funzione)
:param refresh_token:
"""
data = HTTPHeaderDict()
data.add('client_id', self.iam.client_id)
data.add('client_secret', self.iam.client_secret)
data.add('grant_type', 'refresh_token')
data.add('refresh_token', refresh_token)
logging.debug("Refresh token. data: '%s'", str(data))
response = requests.post(
self.config.iam.token_endpoint, data=data, verify=True)
try:
response.raise_for_status()
except requests.exceptions.HTTPError as err:
# Whoops it wasn't a 200
logging.error("Error in refresh_token: %s", err)
logging.error("HTTP error. Response status: %s",
response.status_code)
return response.status_code
logging.debug("Response content: %s", response.content)
result = json.loads(response.content)
return result["access_token"]
def get_tts_data(self, exchange=False):
"""Get TTS data using a lock procedure.
Phases:
- get lock
- retrieve_tts_data
- release lock
:param exchange: Bool (Default value = False)
"""
logging.debug("Check lock file %s", self.config.lock_file.path)
if os.path.exists(self.config.lock_file.path):
ctime = os.stat(self.config.lock_file.path).st_ctime
age = time.time() - ctime
logging.debug("Check age of %s: %s < %s",
self.config.lock_file.path, age, self.config.lock_file.age)
if age < self.config.lock_file.age:
logging.debug("Update in progres. Go to sleep...")
time.sleep(self.config.lock_file.age - age)
else:
logging.debug("Stale lock file. Removing %s...",
self.config.lock_file.path)
os.remove(self.config.lock_file.path)
logging.debug("Update last use time of %s", self.config.lock_file.path)
open(self.config.lock_file.path, 'w+').close()
if exchange:
logging.debug("Exchange /tmp/refresh_token")
if self.cache.refresh_token.value == None:
with file('/tmp/refresh_token') as refresh_t_file:
refresh_token = refresh_t_file.read()
logging.debug("Refresh token")
self.exchanged_token = self.refresh_token(
refresh_token.strip())
if isinstance(self.exchanged_token, int):
logging.error("Error in refresh_token")
else:
self.exchanged_token = self.refresh_token(
self.cache.refresh_token.value)
if isinstance(self.exchanged_token, int):
logging.error("Error in refresh_token with Zookeeper")
else:
with open('/tmp/refresh_token', 'w') as outf:
outf.write(self.cache.refresh_token.value)
logging.debug("Refresh token")
if self.get_certificate():
logging.debug("Load json and prepare objects")
with open('/tmp/output.json') as tts_data_file:
tts_data = json.load(tts_data_file)
with open(self.config.user.cert, 'w+') as cur_file:
cur_file.write(
str(tts_data['credential']['entries'][0]['value']))
with open(self.config.user.key, 'w+') as cur_file:
cur_file.write(
str(tts_data['credential']['entries'][1]['value']))
with open(self.config.user.passwd, 'w+') as cur_file:
cur_file.write(
str(tts_data['credential']['entries'][2]['value']))
try:
logging.debug("Change user key mod")
os.chmod(self.config.user.key, 0o600)
except OSError as err:
logging.error(
"Permission denied to chmod passwd file: %s", err)
return False
logging.debug("Remove lock")
os.remove(self.config.lock_file.path)
return True
return False
def generate_proxy(self):
"""Generates proxy with grid-proxy-init only if there are not errors."""
if self.check_tts_data():
logging.debug("Generating proxy for %s", self.exchanged_token)
command = "grid-proxy-init -valid 160:00 -key {} -cert {} -out {} -pwstdin ".format(
self.config.user.key, self.config.user.cert, self.config.user.proxy
)
with open(self.config.user.passwd) as my_stdin:
my_passwd = my_stdin.read()
proxy_init = subprocess.Popen(
command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True
)
logging.debug("Execute proxy")
proxy_out, proxy_err = proxy_init.communicate(input=my_passwd)
logging.debug("Proxy result: %s", proxy_init.returncode)
if proxy_init.returncode > 0:
logging.error("grid-proxy-init failed for token %s",
self.exchanged_token)
logging.error("grid-proxy-init failed stdout %s", proxy_out)
logging.error("grid-proxy-init failed stderr %s", proxy_err)
else:
return self.config.user.proxy
else:
logging.error("Error occured in check_tts_data!")
def get():
"""Execute the get_proxy routine."""
logging.info("CALLING GET PROXY")
# imports tokens, id and secret
environment = {
'IAM_TOKEN': os.environ.get("IAM_TOKEN", None),
'IAM_REFRESH_TOKEN': os.environ.get("IAM_REFRESH_TOKEN", None),
'IAM_CLIENT_ID': os.environ.get("IAM_CLIENT_ID", None),
'IAM_CLIENT_SECRET': os.environ.get("IAM_CLIENT_SECRET", None),
'MARATHON_USER': os.environ.get("MARATHON_USER", None),
'MARATHON_PASSWD': os.environ.get("MARATHON_PASSWD", None),
'ZOOKEEPER_HOST_LIST': os.environ.get("ZOOKEEPER_HOST_LIST", None),
'CACHE_MANAGER': os.environ.get("CACHE_MANAGER", False)
}
# Store environment in config file
with open(CONFIG_FILE_PATH) as config_file:
proxy_config = json.load(config_file)
proxy_config['environment'] = environment
with open(CONFIG_FILE_PATH, "w") as config_file:
json.dump(proxy_config, config_file)
# Logging environment
logging.info("IAM_TOKEN = %s", environment.get('IAM_TOKEN'))
logging.info("IAM_REFRESH_TOKEN = %s",
environment.get('IAM_REFRESH_TOKEN'))
logging.info("IAM_CLIENT_= %s", environment.get('IAM_CLIENT_ID'))
logging.info("IAM_CLIENT_SECRET = %s",
environment.get('IAM_CLIENT_SECRET'))
logging.info("MARATHON_USER = %s", environment.get('MARATHON_USER'))
logging.info("MARATHON_PASSWD = %s", environment.get('MARATHON_PASSWD'))
logging.info("ZOOKEEPER_HOST_LIST = %s",
environment.get('ZOOKEEPER_HOST_LIST'))
logging.info("CACHE_MANAGER = %s", environment.get('CACHE_MANAGER'))
cache_manager = None
if environment.get('CACHE_MANAGER') == 'ZOOKEEPER' and environment.get('ZOOKEEPER_HOST_LIST') is not None:
cache_manager = 'ZOOKEEPER'
elif environment.get('CACHE_MANAGER') == 'MARATHON' and environment.get('MARATHON_USER') is not None and environment.get('MARATHON_PASSWD') is not None:
cache_manager = 'MARATHON'
elif environment.get('CACHE_MANAGER'):
# CACHE MANAGER is set and is not recognized
raise Exception("Unknown CACHE MANAGER")
proxy_manager = ProxyManager(environment, cache_manager)
proxy_file = proxy_manager.generate_proxy()
if proxy_file is not None:
header = {
'Content-Type': "application/octet-stream",
'filename': ".pem"
}
with open(proxy_file, 'rb') as file_:
data = file_.read()
return header, data
logging.error("Cannot find Proxy file: '%s'", proxy_file)
header = {
'Content-Type': "text/html"
}
return header, "<p>grid-proxy-info failed</p>"
|
the-stack_106_13124
|
from __future__ import print_function
from __future__ import division
import os
from string import Template
import cantera as ct
import numpy as np
def write(solution, factor, fname):
"""Function to write cantera solution object to inp file.
:param solution:
solution: Cantera solution object,
factor: vector of size n_reactions,
fname: Name of converted chemkin mechanism file
:return:
Name of trimmed Mechanism file (.inp)
>>> soln2ck.write(gas)
"""
trimmed_solution = solution
input_file_name_stripped = trimmed_solution.name
cwd = os.getcwd()
# output_file_name = os.path.join(cwd,
# 'pym_' +
# input_file_name_stripped +
# '.inp')
output_file_name = os.path.join(fname)
with open(output_file_name, 'w+') as f:
#Work functions
calories_constant = 4184.0 #number of calories in 1000 Joules of energy
def eliminate(input_string, char_to_replace, spaces='single'):
"""
Eliminate characters from a string
:param input_string
string to be modified
:param char_to_replace
array of character strings to be removed
"""
for char in char_to_replace:
input_string = input_string.replace(char, "")
if spaces == 'double':
input_string = input_string.replace(" ", " ")
return input_string
def replace_multiple(input_string, replace_list):
"""
Replace multiple characters in a string
:param input_string
string to be modified
:param replace list
list containing items to be replaced (value replaces key)
"""
for original_character, new_character in replace_list.items():
input_string = input_string.replace(original_character,
new_character)
return input_string
def build_arrhenius(equation_object, equation_type, uf):
"""
Builds Arrhenius coefficient string
:param equation_objects
cantera equation object
:param equation_type:
string of equation type
"""
coeff_sum = sum(equation_object.reactants.values())
# weiqi: add the uf
pre_exponential_factor = equation_object.rate.pre_exponential_factor*uf
temperature_exponent = '{:.3f}'.format(equation_object.rate.temperature_exponent)
activation_energy = '{:.2f}'.format(equation_object.rate.activation_energy / calories_constant)
if equation_type == 'ElementaryReaction':
if coeff_sum == 1:
pre_exponential_factor = str(
'{:.3E}'.format(pre_exponential_factor))
if coeff_sum == 2:
pre_exponential_factor = str(
'{:.3E}'.format(pre_exponential_factor*10**3))
if coeff_sum == 3:
pre_exponential_factor = str(
'{:.3E}'.format(pre_exponential_factor*10**6))
if equation_type == 'ThreeBodyReaction':
if coeff_sum == 1:
pre_exponential_factor = str(
'{:.3E}'.format(pre_exponential_factor*10**3))
if coeff_sum == 2:
pre_exponential_factor = str(
'{:.3E}'.format(pre_exponential_factor*10**6))
if (equation_type != 'ElementaryReaction'
and equation_type != 'ThreeBodyReaction'):
pre_exponential_factor = str(
'{:.3E}'.format(pre_exponential_factor))
arrhenius = [pre_exponential_factor,
temperature_exponent,
activation_energy]
return arrhenius
def build_modified_arrhenius(equation_object, t_range, uf):
"""
Builds Arrhenius coefficient strings for high and low temperature ranges
:param equation_objects
cantera equation object
:param t_range:
simple string ('high' or 'low') to designate temperature range
"""
coeff_sum = sum(equation_object.reactants.values())
if t_range == 'high':
pre_exponential_factor = equation_object.high_rate.pre_exponential_factor*uf
temperature_exponent = '{:.3f}'.format(equation_object.high_rate.temperature_exponent)
activation_energy = '{:.2f}'.format(equation_object.high_rate.activation_energy/calories_constant)
if coeff_sum == 1:
pre_exponential_factor = str(
'{:.3E}'.format(pre_exponential_factor))
if coeff_sum == 2:
pre_exponential_factor = str(
'{:.3E}'.format(pre_exponential_factor*10**3))
if coeff_sum == 3:
pre_exponential_factor = str(
'{:.3E}'.format(pre_exponential_factor*10**6))
arrhenius_high = [pre_exponential_factor,
temperature_exponent,
activation_energy]
return arrhenius_high
if t_range == 'low':
pre_exponential_factor = equation_object.low_rate.pre_exponential_factor*uf
temperature_exponent = '{:.3f}'.format(equation_object.low_rate.temperature_exponent)
activation_energy = '{:.2f}'.format(equation_object.low_rate.activation_energy/calories_constant)
if coeff_sum == 1:
pre_exponential_factor = str(
'{:.3E}'.format(pre_exponential_factor*10**3))
if coeff_sum == 2:
pre_exponential_factor = str(
'{:.3E}'.format(pre_exponential_factor*10**6))
if coeff_sum == 3:
pre_exponential_factor = str(
'{:.3E}'.format(pre_exponential_factor*10**9))
arrhenius_low = [pre_exponential_factor,
temperature_exponent,
activation_energy]
return arrhenius_low
def build_nasa(nasa_coeffs, row):
"""
Creates string of nasa polynomial coefficients
:param nasa_coeffs
cantera species thermo coefficients object
:param row
which row to write coefficients in
"""
line_coeffs = ''
lines = [[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14]]
line_index = lines[row-2]
for ix, c in enumerate(nasa_coeffs):
if ix in line_index:
if c >= 0:
line_coeffs += ' '
line_coeffs += str('{:.8e}'.format(c))
return line_coeffs
def build_species_string():
"""
formats species string for writing
"""
species_list_string = ''
line = 1
for sp_index, sp_string in enumerate(trimmed_solution.species_names):
sp = ' '
#get length of string next species is added
length_new = len(sp_string)
length_string = len(species_list_string)
total = length_new + length_string + 3
#if string will go over width, wrap to new line
if total >= 70*line:
species_list_string += '\n'
line += 1
species_list_string += sp_string + ((16-len(sp_string))*sp)
return species_list_string
title = ''
section_break = ('!'+ "-"*75 + '\n'
'! ' + title +'\n'
'!'+ "-"*75 + '\n')
#Write title block to file
title = 'Chemkin File converted from Solution Object by pyMARS'
f.write(section_break)
#Write phase definition to file
element_names = eliminate(str(trimmed_solution.element_names),
['[', ']', '\'', ','])
element_string = Template(
'ELEMENTS\n' +
'$element_names\n' +
'END\n')
f.write(element_string.substitute(element_names=element_names))
species_names = build_species_string()
species_string = Template(
'SPECIES\n' +
'$species_names\n'+
'END\n')
f.write(species_string.substitute(species_names=species_names))
#Write species to file
title = 'Species data'
f.write(section_break)
# f.write('THERMO ALL' + '\n' +
# ' 300.000 1000.000 5000.000' +'\n')
# phase_unknown_list = []
# #write data for each species in the Solution object
# for sp_index in range(len(trimmed_solution.species_names)):
# d = 3.33564e-30 #1 debye = d coulomb-meters
# species = trimmed_solution.species(sp_index)
# name = str(trimmed_solution.species(sp_index).name)
# nasa_coeffs = trimmed_solution.species(sp_index).thermo.coeffs
# #Species attributes from trimmed solution object
# t_low = '{0:.3f}'.format(species.thermo.min_temp)
# t_max = '{0:.3f}'.format(species.thermo.max_temp)
# t_mid = '{0:.3f}'.format(species.thermo.coeffs[0])
# temp_range = str(t_low) + ' ' + str(t_max) + ' ' + t_mid
# species_comp = ''
# for atom in species.composition:
# species_comp += '{:<4}'.format(atom)
# species_comp += str(int(species.composition[atom]))
# if type(species.transport).__name__ == 'GasTransportData':
# species_phase = 'G'
# else:
# phase_unknown_list.append(name)
# species_phase = 'G'
# line_1 = (
# '{:<18}'.format(name) +
# '{:<6}'.format(' ') +
# '{:<20}'.format(species_comp) +
# '{:<4}'.format(species_phase) +
# '{:<31}'.format(temp_range) +
# '{:<1}'.format('1') +
# '\n')
# f.write(line_1)
# line_2_coeffs = build_nasa(nasa_coeffs, 2)
# line_2 = line_2_coeffs + ' 2\n'
# f.write(line_2)
# line_3_coeffs = build_nasa(nasa_coeffs, 3)
# line_3 = line_3_coeffs + ' 3\n'
# f.write(line_3)
# line_4_coeffs = build_nasa(nasa_coeffs, 4)
# line_4 = line_4_coeffs + ' 4\n'
# f.write(line_4)
# f.write('END\n')
#Write reactions to file
title = 'Reaction Data'
f.write(section_break)
f.write('REACTIONS\n')
#write data for each reaction in the Solution Object
for reac_index in range(len(trimmed_solution.reaction_equations())):
# factor for the perturbation
uf = factor[reac_index]
# print( str(reac_index+1) + ' ' + trimmed_solution.reaction_equation(reac_index) + ' ' + str(uf) )
equation_string = str(trimmed_solution.reaction_equation(reac_index))
equation_string = eliminate(equation_string, ' ', 'single')
equation_object = trimmed_solution.reaction(reac_index)
equation_type = type(equation_object).__name__
m = str(reac_index+1)
coeff_sum = sum(equation_object.reactants.values())
# print(str(m) ,equation_string, equation_type, str(coeff_sum))
# print(uf)
if equation_type == 'ThreeBodyReaction':
arrhenius = build_arrhenius(equation_object, equation_type, uf)
main_line = (
'{:<51}'.format(equation_string) +
'{:>9}'.format(arrhenius[0]) +
'{:>9}'.format(arrhenius[1]) +
'{:>11}'.format(arrhenius[2]) +
'\n')
f.write(main_line)
#trimms efficiencies list
efficiencies = equation_object.efficiencies
trimmed_efficiencies = equation_object.efficiencies
for s in efficiencies:
if s not in trimmed_solution.species_names:
del trimmed_efficiencies[s]
replace_list_2 = {
'{':'',
'}':'/',
'\'':'',
':':'/',
',':'/'}
efficiencies_string = replace_multiple(
str(trimmed_efficiencies),
replace_list_2)
secondary_line = str(efficiencies_string) + '\n'
if bool(efficiencies) is True:
f.write(secondary_line)
if equation_type == 'ElementaryReaction':
arrhenius = build_arrhenius(equation_object, equation_type, uf)
main_line = (
'{:<51}'.format(equation_string) +
'{:>9}'.format(arrhenius[0]) +
'{:>9}'.format(arrhenius[1]) +
'{:>11}'.format(arrhenius[2]) +
'\n')
f.write(main_line)
if equation_type == 'FalloffReaction':
arr_high = build_modified_arrhenius(equation_object, 'high', uf)
main_line = (
'{:<51}'.format(equation_string) +
'{:>9}'.format(arr_high[0]) +
'{:>9}'.format(arr_high[1]) +
'{:>11}'.format(arr_high[2]) +
'\n')
f.write(main_line)
arr_low = build_modified_arrhenius(equation_object, 'low', uf)
second_line = (
' LOW /' +
' ' + arr_low[0] +
' ' + arr_low[1] +
' ' + arr_low[2] + '/\n')
f.write(second_line)
j = equation_object.falloff.parameters
#If optional Arrhenius data included:
try:
# third_line = (
# ' TROE/' +
# ' ' + str(j[0]) +
# ' ' + str(j[1]) +
# ' ' + str(j[2]) +
# ' ' + str(j[3]) +' /\n')
# for Juan Li H2
third_line = (
' TROE/' +
' ' + str(j[0]) +
' ' + str(j[1]) +
' ' + str(j[2]) +' /\n')
f.write(third_line)
except IndexError:
pass
#trimms efficiencies list
efficiencies = equation_object.efficiencies
trimmed_efficiencies = equation_object.efficiencies
for s in efficiencies:
if s not in trimmed_solution.species_names:
del trimmed_efficiencies[s]
replace_list_2 = {
'{':'',
'}':'/',
'\'':'',
':':'/',
',':'/'}
efficiencies_string = replace_multiple(
str(trimmed_efficiencies),
replace_list_2)
fourth_line = str(efficiencies_string) + '\n'
if bool(efficiencies) is True:
f.write(fourth_line)
#dupluicate option
if equation_object.duplicate is True:
duplicate_line = ' DUPLICATE' +'\n'
f.write(duplicate_line)
f.write('END')
return output_file_name
if __name__ == '__main__':
mech = 'konnov/chem.cti'
gas = ct.Solution(mech)
factor = np.ones(gas.n_reactions)
fname = 'test/chem.inp'
print(fname)
output_file_name = write(gas, factor, fname)
dk = 0.05
for i in range(gas.n_reactions):
factor = np.ones(gas.n_reactions)
factor[i] = 1+dk
fname = 'test/chem.inp_'+str(i)
write(gas, factor, fname)
|
the-stack_106_13125
|
from unittest import mock
from concurrent.futures import Future
import json
from taiga_ncurses.ui import views, signals
from taiga_ncurses.executor import Executor
from . import fixtures
# Auth
def login_view(username, password):
login_view = views.auth.LoginView("username", "password")
login_view._username_editor.set_edit_text(username)
login_view._password_editor.set_edit_text(password)
return login_view
def successful_login_response(username):
return {
'auth_token': 'eyJ1c2VyX2lkIjoxfQ:1Vmjdp:ILIJVRazEdK_pObFedQc2aZNWd0',
'color': '',
'default_language': '',
'default_timezone': '',
'description': '',
'email': '[email protected]',
'first_name': '',
'full_name': 'admin',
'id': 1,
'is_active': True,
'last_name': '',
'notify_changes_by_me': False,
'notify_level': 'all_owned_projects',
'photo': '',
'projects': [],
'username': username,
}
# Projects
def projects():
return json.loads(fixtures.PROJECTS)
def project(**kwargs):
defaults = json.loads(fixtures.PROJECT)
defaults.update(kwargs)
return defaults
def project_stats():
return json.loads(fixtures.PROJECT_STATS)
def project_issues_stats():
return json.loads(fixtures.PROJECT_ISSUES_STATS)
# Milestones
def milestone():
return json.loads(fixtures.MILESTONE)
def milestone_stats():
return json.loads(fixtures.MILESTONE_STATS)
# User Stories
def unassigned_user_stories():
return json.loads(fixtures.UNASSIGNED_USER_STORIES)
def user_stories():
return json.loads(fixtures.USER_STORIES)
def successful_create_user_story_response(subject):
return {
"tags": [],
"points": {"4": 1, "1": 1, "2": 1, "3": 1},
"total_points": 0.0,
"comment": "",
"id": 114,
"ref": 30,
"milestone": None,
"project": 1,
"owner": 1,
"status": 1,
"is_closed": False,
"order": 100,
"created_date": "2013-12-31T16:56:38.115Z",
"modified_date": "2013-12-31T16:56:38.115Z",
"finish_date": None,
"subject": subject,
"description": "",
"client_requirement": False,
"team_requirement": False,
"watchers": []
}
def successful_update_user_story_response(subject):
return {
"tags": [],
"points": {"4": 1, "1": 1, "2": 1, "3": 1},
"total_points": 0.0,
"comment": "",
"id": 114,
"ref": 30,
"milestone": None,
"project": 1,
"owner": 1,
"status": 1,
"is_closed": False,
"order": 100,
"created_date": "2013-12-31T16:56:38.115Z",
"modified_date": "2013-12-31T16:56:38.115Z",
"finish_date": None,
"subject": subject,
"description": "",
"client_requirement": False,
"team_requirement": False,
"watchers": []
}
def successful_create_user_stories_in_bulk_response():
return True
def successful_update_user_stories_order_response():
return True
def successful_delete_user_story_response():
return True
# Tasks
def tasks():
return json.loads(fixtures.MILESTONE_TASKS)
def successful_create_task_response(subject, user_story):
return {
"tags": "",
"comment": "",
"id": 35,
"user_story": user_story,
"ref": 36,
"owner": 3,
"status": 1,
"project": 1,
"milestone": 4,
"created_date": "2013-12-20T09:53:53.462Z",
"modified_date": "2013-12-26T16:54:54.931Z",
"finished_date": None,
"subject": subject,
"description": "Praesentium tempora molestias quis autem iste. Esse perspiciatis eos odio nemo, accusamus adipisci doloremque nesciunt temporibus consequatur dolore tempora dolorum, necessitatibus fugiat non veniam mollitia adipisci nesciunt quibusdam accusamus quidem quis consequuntur, error sunt fugit dolorem suscipit, rem numquam dicta nemo sapiente.",
"assigned_to": 9,
"is_iocaine": False,
"watchers": []
}
def successful_update_task_response(subject, user_story):
return {
"tags": "",
"comment": "",
"id": 35,
"user_story": user_story,
"ref": 36,
"owner": 3,
"status": 1,
"project": 1,
"milestone": 4,
"created_date": "2013-12-20T09:53:53.462Z",
"modified_date": "2013-12-26T16:54:54.931Z",
"finished_date": None,
"subject": subject,
"description": "Praesentium tempora molestias quis autem iste. Esse perspiciatis eos odio nemo, accusamus adipisci doloremque nesciunt temporibus consequatur dolore tempora dolorum, necessitatibus fugiat non veniam mollitia adipisci nesciunt quibusdam accusamus quidem quis consequuntur, error sunt fugit dolorem suscipit, rem numquam dicta nemo sapiente.",
"assigned_to": 9,
"is_iocaine": False,
"watchers": []
}
def successful_delete_task_response():
return True
# Issues
def issues():
return json.loads(fixtures.ISSUES)
def successful_create_issue_response(subject):
return {
"tags": [
"ratione",
"omnis",
"saepe",
"tempora",
"repellat"
],
"comment": "",
"is_closed": False,
"id": 1,
"ref": 2,
"owner": 2,
"status": 7,
"severity": 5,
"priority": 2,
"type": 2,
"milestone": None,
"project": 1,
"created_date": "2013-12-20T09:53:59.044Z",
"modified_date": "2013-12-20T09:53:59.609Z",
"finished_date": None,
"subject": subject,
"description": "Alias voluptatem nulla quo reiciendis dicta distinctio, quis vel facilis quae dolore rerum earum error nesciunt, ipsam itaque eius placeat doloribus voluptate sequi? Impedit iure adipisci et itaque debitis nihil vel ipsum esse ut perspiciatis. Facilis fuga exercitationem illo ipsam eveniet, tempora assumenda voluptate, tenetur saepe doloribus beatae neque quae quasi culpa reprehenderit et, totam temporibus deleniti consectetur rerum quis eaque commodi.",
"assigned_to": 1,
"watchers": []
}
def successful_update_issue_response(subject):
return {
"tags": [
"ratione",
"omnis",
"saepe",
"tempora",
"repellat"
],
"comment": "",
"is_closed": False,
"id": 1,
"ref": 2,
"owner": 2,
"status": 7,
"severity": 5,
"priority": 2,
"type": 2,
"milestone": None,
"project": 1,
"created_date": "2013-12-20T09:53:59.044Z",
"modified_date": "2013-12-20T09:53:59.609Z",
"finished_date": None,
"subject": subject,
"description": "Alias voluptatem nulla quo reiciendis dicta distinctio, quis vel facilis quae dolore rerum earum error nesciunt, ipsam itaque eius placeat doloribus voluptate sequi? Impedit iure adipisci et itaque debitis nihil vel ipsum esse ut perspiciatis. Facilis fuga exercitationem illo ipsam eveniet, tempora assumenda voluptate, tenetur saepe doloribus beatae neque quae quasi culpa reprehenderit et, totam temporibus deleniti consectetur rerum quis eaque commodi.",
"assigned_to": 1,
"watchers": []
}
def successful_delete_issue_response():
return True
# Wiki
def wiki_pages():
return json.loads(fixtures.WIKI_PAGES)
def future(value):
f = Future()
f.set_result(value)
return f
def patched_executor(login_response=future(successful_login_response("admin")),
projects=future(projects()),
project_detail=future(project()),
project_stats=future(project_stats()),
unassigned_user_stories=future(unassigned_user_stories()),
milestone=future(milestone()),
milestone_stats=future(milestone_stats()),
user_stories=future(user_stories()),
create_user_story_response=future(successful_create_user_story_response("Create us")),
update_user_story_response=future(successful_update_user_story_response("Update us")),
create_user_stories_in_bulk_response=future(
successful_create_user_stories_in_bulk_response()),
update_user_stories_order_response=future(successful_update_user_stories_order_response()),
delete_user_story_response=future(successful_delete_user_story_response()),
tasks=future(tasks()),
create_task_response=future(successful_create_task_response("Create task", 1)),
update_task_response=future(successful_update_task_response("Update task", 1)),
delete_task_response=future(successful_delete_task_response()),
project_issues_stats=future(project_issues_stats()),
issues=future(issues()),
create_issue_response=future(successful_create_issue_response("Create issue")),
update_issue_response=future(successful_update_issue_response("Update issue")),
delete_issue_response=future(successful_delete_issue_response()),
wiki_pages=future(wiki_pages())):
executor = Executor(mock.Mock())
executor.login = mock.Mock(return_value=login_response)
executor.projects = mock.Mock(return_value=projects)
executor.project_detail = mock.Mock(return_value=project_detail)
executor.project_stats = mock.Mock(return_value=project_stats)
executor.project_issues_stats = mock.Mock(return_value=project_issues_stats)
executor.user_stories = mock.Mock(return_value=user_stories)
executor.unassigned_user_stories = mock.Mock(return_value=unassigned_user_stories)
executor.create_user_story = mock.Mock(return_value=create_user_story_response)
executor.update_user_story = mock.Mock(return_value=update_user_story_response)
executor.create_user_stories_in_bulk = mock.Mock(return_value=create_user_stories_in_bulk_response)
executor.update_user_stories_order = mock.Mock(return_value=update_user_stories_order_response)
executor.delete_user_story = mock.Mock(return_value=delete_user_story_response)
executor.milestone = mock.Mock(return_value=milestone)
executor.milestone_stats = mock.Mock(return_value=milestone_stats)
executor.tasks = mock.Mock(return_value=tasks)
executor.create_task = mock.Mock(return_value=create_task_response)
executor.update_task = mock.Mock(return_value=update_task_response)
executor.delete_task = mock.Mock(return_value=delete_task_response)
executor.issues = mock.Mock(return_value=issues)
executor.create_issue = mock.Mock(return_value=create_issue_response)
executor.update_issue = mock.Mock(return_value=update_issue_response)
executor.delete_issue = mock.Mock(return_value=delete_issue_response)
executor.wiki_pages = mock.Mock(return_value=wiki_pages)
return executor
|
the-stack_106_13128
|
from enum import Enum
from dataclasses import dataclass
from typing import (
Callable,
Optional,
Union,
Any
)
from starlette.datastructures import URL
from hius.routing.exceptions import ProtocolError
PROTOCOL_MAPPING = {
'http': {True: 'https', False: 'http'},
'websocket': {True: 'wss', False: 'ws'}
}
class Match(Enum):
NONE = 0
PARTIAL = 1
FULL = 2
@dataclass
class Converter:
regex: str
method: Callable[[Any], Any] = lambda v: v
def __call__(self, value: Any) -> Any:
return self.method(value)
class URLPath:
__slots__ = 'protocol', 'path', 'host',
def __init__(self,
path: str,
protocol: str = None,
host: str = None) -> None:
self.protocol = self.__check_protocol(protocol)
self.path = path
self.host = host
def __eq__(self, other: Any) -> bool:
return self.path == str(other)
def __str__(self) -> str:
return self.path
def __check_protocol(self, protocol: str) -> Optional[str]:
if protocol in ('http', 'websocket', None):
return protocol
raise ProtocolError('protocol should be "http", "websocket" or None')
def format(self, **kwargs) -> 'URLPath':
self.path = self.path.format(**kwargs)
return self
def appendleft(self, path: Union[str, 'URLPath']) -> 'URLPath':
self.path = path + self.path
return self
def make_absolute_url(self, base_url: Union[str, URL]) -> URL:
if isinstance(base_url, str):
base_url = URL(base_url)
if self.protocol:
scheme = PROTOCOL_MAPPING[self.protocol][base_url.is_secure]
else:
scheme = base_url.scheme
if self.host:
netloc = self.host
else:
netloc = base_url.netloc
path = base_url.path.rstrip('/') + self.path
return URL(scheme=scheme, netloc=netloc, path=path)
|
the-stack_106_13129
|
from rest_framework import viewsets, mixins, status
from rest_framework.authentication import TokenAuthentication
from rest_framework.decorators import action
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from core.models import Tag, Ingredient, Recipe
from recipe import serializers
class BaseRecipeAttrViewSet(viewsets.GenericViewSet,
mixins.ListModelMixin,
mixins.CreateModelMixin):
"""Base viewset for user owned recipe attributes"""
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
def get_queryset(self):
"""Return objects for the current authenticated user only"""
assigned_only = bool(
int(self.request.query_params.get('assigned_only', 0))
)
queryset = self.queryset
if assigned_only:
queryset = queryset.filter(recipe__isnull=False)
return queryset.filter(
user=self.request.user
).order_by('-name').distinct()
def perform_create(self, serializer):
"""Create a new object"""
serializer.save(user=self.request.user)
class TagViewSet(BaseRecipeAttrViewSet):
"""Manage tags in the database"""
queryset = Tag.objects.all()
serializer_class = serializers.TagSerializer
class IngredientViewSet(BaseRecipeAttrViewSet):
"""Manage ingredients in the database"""
queryset = Ingredient.objects.all()
serializer_class = serializers.IngredientSerializer
class RecipeViewSet(viewsets.ModelViewSet):
"""Manage recipes in the database"""
serializer_class = serializers.RecipeSerializer
queryset = Recipe.objects.all()
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
def _params_to_ints(self, qs):
"""Convert a list of string IDs to a list of integers"""
return [int(str_id) for str_id in qs.split(',')]
def get_queryset(self):
"""Retrieve the recipes for the authenticated user"""
tags = self.request.query_params.get('tags')
ingredients = self.request.query_params.get('ingredients')
queryset = self.queryset
if tags:
tag_ids = self._params_to_ints(tags)
queryset = queryset.filter(tags__id__in=tag_ids)
if ingredients:
ingredient_ids = self._params_to_ints(ingredients)
queryset = queryset.filter(ingredients__id__in=ingredient_ids)
return queryset.filter(user=self.request.user)
def get_serializer_class(self):
"""Return appropriate serializer class"""
if self.action == 'retrieve':
return serializers.RecipeDetailSerializer
elif self.action == 'upload_image':
return serializers.RecipeImageSerializer
return self.serializer_class
def perform_create(self, serializer):
"""Create a new recipe"""
serializer.save(user=self.request.user)
@action(methods=['POST'], detail=True, url_path='upload-image')
def upload_image(self, request, pk=None):
"""Upload an image to a recipe"""
recipe = self.get_object()
serializer = self.get_serializer(
recipe,
data=request.data
)
if serializer.is_valid():
serializer.save()
return Response(
serializer.data,
status=status.HTTP_200_OK
)
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
|
the-stack_106_13132
|
import arcade
import os
res = os.path.dirname(os.path.abspath(__file__))
os.chdir(res)
SCREEN_WIDTH = 1366
SCREEN_HEIGHT = 768
SCREEN_TITLE = "Space Survivor"
class PauseView(arcade.View):
def __init__(self, game_view):
super().__init__()
self.game_view = game_view
self.player = arcade.Sprite()
def on_show(self):
arcade.set_background_color(arcade.color.GRAY)
def on_draw(self):
arcade.start_render()
# Draw player, for effect, on pause screen.
# The previous View (GameView) was passed in
# and saved in self.game_view.
self.player = self.game_view.player
self.player.draw()
# draw an orange filter over him
arcade.draw_lrtb_rectangle_filled(left=self.player.left,
right=self.player.right,
top=self.player.top,
bottom=self.player.bottom,
color=arcade.color.GRAY + (200,))
arcade.draw_text("PAUSED", SCREEN_WIDTH/2, SCREEN_HEIGHT/2+50,
arcade.color.BLACK, font_size=50, anchor_x="center")
# Show tip to return or reset
arcade.draw_text("Press P to return",
SCREEN_WIDTH/2,
SCREEN_HEIGHT/2,
arcade.color.BLACK,
font_size=30,
anchor_x="center")
arcade.draw_text("Press Enter to reset or Q to quit",
SCREEN_WIDTH/2,
SCREEN_HEIGHT/2-30,
arcade.color.BLACK,
font_size=20,
anchor_x="center")
def on_key_press(self, key, _modifiers):
# resume game
if key == arcade.key.P:
self.window.show_view(self.game_view)
self.game_view.toggle_pause()
# reset game
if key == arcade.key.ENTER:
self.game_view.setup()
self.window.show_view(self.game_view)
# quit the game
if key == arcade.key.Q:
arcade.close_window()
|
the-stack_106_13133
|
# -*- coding: utf-8 -*-
import json
from random import randint
from openeo_grass_gis_driver.actinia_processing.base import \
check_node_parents, DataObject, GrassDataType, \
create_output_name
from openeo_grass_gis_driver.models.process_graph_schemas import \
ProcessGraphNode, ProcessGraph
from openeo_grass_gis_driver.models.process_schemas import \
Parameter, ProcessDescription, ReturnValue, ProcessExample
from .base import PROCESS_DICT, PROCESS_DESCRIPTION_DICT, Node
__license__ = "Apache License, Version 2.0"
__author__ = "Markus Metz"
__copyright__ = "Copyright 2018, Sören Gebbert, mundialis"
__maintainer__ = "Soeren Gebbert"
__email__ = "[email protected]"
PROCESS_NAME = "trim_cube"
def create_process_description():
p_data = Parameter(
description="Any openEO process object that returns raster datasets "
"or space-time raster dataset",
schema={
"type": "object",
"subtype": "raster-cube"},
optional=False)
rv = ReturnValue(description="Processed EO data.",
schema={"type": "object", "subtype": "raster-cube"})
# Example
arguments = {
"data": {"from_node": "get_data_1"}
}
node = ProcessGraphNode(process_id=PROCESS_NAME, arguments=arguments)
graph = ProcessGraph(
title="title",
description="description",
process_graph={
"trim_1": node})
examples = [
ProcessExample(
title="Simple example",
description="Simple example",
process_graph=graph)]
pd = ProcessDescription(
id=PROCESS_NAME,
description="Removes slices solely containing no-data values. "
"If the dimension is irregular categorical then slices in the middle can be removed.",
summary="Remove slices with no-data values",
parameters={
"data": p_data},
returns=rv,
examples=examples)
return json.loads(pd.to_json())
PROCESS_DESCRIPTION_DICT[PROCESS_NAME] = create_process_description()
def create_process_chain_entry(input_object: DataObject,
output_object: DataObject):
"""Create a Actinia command of the process chain
:param input_object:
:param vector_object:
:return: A Actinia process chain description
"""
rn = randint(0, 1000000)
pc = {"id": "t_rast_algebra_%i" % rn,
"module": "t.rast.algebra",
"inputs": [{"param": "expression",
"value": "%(result)s = 1 * %(input)s" %
{"result": output_object.grass_name(),
"input": input_object.grass_name()}},
{"param": "basename",
"value": output_object.grass_name()},
]}
return pc
def get_process_list(node: Node):
"""Analyse the process node and return the Actinia process chain and the name of the processing result
:param node: The process node
:return: (output_objects, actinia_process_list)
"""
input_objects, process_list = check_node_parents(node=node)
output_objects = []
if "data" not in node.arguments:
raise Exception("Process %s requires parameter data" % PROCESS_NAME)
input_objects = node.get_parent_by_name(parent_name="data").output_objects
if not input_objects:
raise Exception("Process %s requires an input strds" % PROCESS_NAME)
input_object = list(input_objects)[-1]
output_object = DataObject(
name=create_output_name(input_object.name, PROCESS_NAME),
datatype=GrassDataType.STRDS)
output_objects.append(output_object)
pc = create_process_chain_entry(input_object, output_object)
process_list.append(pc)
return output_objects, process_list
PROCESS_DICT[PROCESS_NAME] = get_process_list
|
the-stack_106_13134
|
import copy
import datetime
from .. import base
from .. import util
from .. import config
from .. import debuginfo
from .. import validators
from ..auth import containerauth, always_ok
from ..dao import (
APIStorageException, containerstorage, snapshot, liststorage, openfmriutils
)
import containerhandler
import listhandler
log = config.log
class SnapshotHandler(containerhandler.ContainerHandler):
use_object_id = {
'projects': True,
'sessions': True,
'acquisitions': True
}
# This configurations are used by the SnapshotHandler class to load the storage and
# the permissions checker to handle a request.
#
# "children_cont" represents the children container.
# "list projection" is used to filter data in mongo.
# "use_object_id" implies that the container ids are converted to ObjectId
container_handler_configurations = {
'projects': {
'storage': containerstorage.ContainerStorage('project_snapshots', use_object_id=use_object_id['projects']),
'permchecker': containerauth.default_container,
'list_projection': {'metadata': 0, 'files': 0},
'children_cont': 'session_snapshots'
},
'sessions': {
'storage': containerstorage.ContainerStorage('session_snapshots', use_object_id=use_object_id['sessions']),
'permchecker': containerauth.default_container,
'parent_storage': containerstorage.ContainerStorage('project_snapshots', use_object_id=use_object_id['projects']),
'list_projection': {'metadata': 0, 'files': 0},
'children_cont': 'acquisition_snapshots'
},
'acquisitions': {
'storage': containerstorage.ContainerStorage('acquisition_snapshots', use_object_id=use_object_id['acquisitions']),
'permchecker': containerauth.default_container,
'parent_storage': containerstorage.ContainerStorage('sessions', use_object_id=use_object_id['sessions']),
'list_projection': {'metadata': 0, 'files': 0}
}
}
def post(self, *args, **kwargs):
self.abort(500, 'method not supported on snapshots')
def put(self, *args, **kwargs):
self.abort(500, 'method not supported on snapshots')
def delete(self, *args, **kwargs):
self.abort(500, 'method not supported on snapshots')
def create(self, **kwargs):
snap_id = kwargs.pop('cid', None)
if snap_id:
payload = {
'_id': snap_id
}
else:
payload = None
origin_storage = containerstorage.ContainerStorage('projects', use_object_id=True)
origin_id = self.get_param('project')
if not origin_id:
self.abort(404, 'project is required to create a snapshot')
self.config = self.container_handler_configurations['projects']
container = origin_storage.get_container(origin_id)
permchecker = self._get_permchecker(container, container)
result = permchecker(snapshot.create)('POST', _id=origin_id, payload=payload)
return {'_id': result.inserted_id}
def remove(self, cont_name, **kwargs):
if cont_name != 'projects':
self.abort(500, 'method supported only on project snapshots')
snap_id = kwargs.pop('cid')
self.config = self.container_handler_configurations[cont_name]
self.storage = self.config['storage']
container = self._get_container(snap_id)
permchecker = self._get_permchecker(container, None)
if not container:
self.abort(404, 'snapshot does not exist')
result = permchecker(snapshot.remove)('DELETE', _id=snap_id)
return {'deleted': 1}
def publish(self, cont_name, **kwargs):
if cont_name != 'projects':
self.abort(500, 'method supported only on project snapshots')
snap_id = kwargs.pop('cid')
payload_validator = validators.payload_from_schema_file(self, 'public.json')
payload = self.request.json_body
# use the validator for the POST route as the key 'value' is required
payload_validator(payload, 'POST')
self.config = self.container_handler_configurations[cont_name]
self.storage = self.config['storage']
container = self._get_container(snap_id)
if not container:
self.abort(404, 'snapshot does not exist')
permchecker = self._get_permchecker(container, container)
result = permchecker(snapshot.make_public)('PUT', _id=snap_id, payload=payload)
return result
def get_all_for_project(self, **kwargs):
proj_id = kwargs.pop('cid')
self.config = self.container_handler_configurations['projects']
self.storage = self.config['storage']
projection = self.config['list_projection']
if self.is_true('metadata'):
projection = None
# select which permission filter will be applied to the list of results.
if self.superuser_request:
permchecker = always_ok
elif self.public_request:
permchecker = containerauth.list_public_request
else:
permchecker = containerauth.list_permission_checker(self)
query = {
'original': util.ObjectId(proj_id)
}
try:
results = permchecker(self.storage.exec_op)('GET', query=query, projection=projection, public=self.public_request)
except APIStorageException as e:
self.abort(400, e.message)
if results is None:
self.abort(404, 'Element not found in container {} {}'.format(storage.cont_name, _id))
return results
def get_acquisitions_in_project(self, cont_name, **kwargs):
assert cont_name == 'projects'
_id = kwargs.pop('cid')
self.config = self.container_handler_configurations[cont_name]
self.storage = self.config['storage']
container= self._get_container(_id)
permchecker = self._get_permchecker(container)
try:
results = permchecker(openfmriutils.acquisitions_in_project_snapshot)('GET', _id)
except APIStorageException as e:
self.abort(400, e.message)
if results is None:
self.abort(404, 'Element not found in container {} {}'.format(cont_name, _id))
return results
def initialize_snap_list_configurations():
snap_list_handler_configurations = {}
for cont_name in ['projects', 'sessions', 'acquisitions']:
list_config = copy.copy(listhandler.list_handler_configurations[cont_name]['files'])
list_config['storage'] = liststorage.ListStorage(
cont_name[:-1] + '_snapshots',
'files',
use_object_id=list_config.get('use_object_id', False)
)
snap_list_handler_configurations[cont_name] = {
'files': list_config
}
return snap_list_handler_configurations
snap_list_handler_configurations = initialize_snap_list_configurations()
class SnapshotFileListHandler(listhandler.FileListHandler):
def __init__(self, request=None, response=None):
super(SnapshotFileListHandler, self).__init__(request, response)
self.list_handler_configurations = snap_list_handler_configurations
def post(self, **kwargs):
self.abort(400, 'operation not supported for snapshots')
def delete(self, **kwargs):
self.abort(400, 'operation not supported for snapshots')
|
the-stack_106_13137
|
import pandas as pd
import numpy as np
from sklearn.cross_validation import _BaseKFold
def getTrainTimes(t1,
testTimes):
"""SNIPPET 7.1 PURGING OBSERVATION IN THE TRAINING SET
Given testTimes, find the times of the training observations.
—t1.index: Time when the observation started.
—t1.value: Time when the observation ended.
—testTimes: Times of testing observations.
"""
trn = t1.copy(deep=True)
for i, j in testTimes.iteritems():
df0 = trn[(i <= trn.index) & (trn.index <= j)
].index # train starts within test
df1 = trn[(i <= trn) & (trn <= j)].index # train ends within test
df2 = trn[(trn.index <= i) & (j <= trn)].index # train envelops test
trn = trn.drop(df0.union(df1).union(df2))
return trn
def getEmbargoTimes(times,
pctEmbargo):
"""SNIPPET 7.2 EMBARGO ON TRAINING OBSERVATIONS
Get embargo time for each bar
"""
step = int(times.shape[0]*pctEmbargo)
if step == 0:
mbrg = pd.Series(times, index=times)
else:
mbrg = pd.Series(times[step:], index=times[:-step])
mbrg = mbrg.append(pd.Series(times[-1], index=times[-step:]))
return mbrg
class PurgedKFold(_BaseKFold):
"""SNIPPET 7.3 CROSS-VALIDATION CLASS WHEN OBSERVATIONS OVERLAP
Extend KFold class to work with labels that span intervals
The train is purged of observations overlapping test-label intervals
Test set is assumed contiguous (shuffle=False), w/o training samples in between
"""
def __init__(self, n, n_folds=3, t1=None, pctEmbargo=0.):
if not isinstance(t1, pd.Series):
raise ValueError('Label Through Dates must be a pd.Series')
super(PurgedKFold, self).__init__(
n, n_folds, shuffle=False, random_state=None)
self.t1 = t1
self.pctEmbargo = pctEmbargo
def split(self, X, y=None, groups=None):
if (X.index == self.t1.index).sum() != len(self.t1):
raise ValueError('X and ThruDateValues must have the same index')
indices = np.arange(X.shape[0])
mbrg = int(X.shape[0]*self.pctEmbargo)
test_starts = [(i[0], i[-1]+1) for i in
np.array_split(np.arange(X.shape[0]), self.n_folds)]
for i, j in test_starts:
t0 = self.t1.index[i] # start of test set
test_indices = indices[i:j]
maxT1Idx = self.t1.index.searchsorted(self.t1[test_indices].max())
train_indices = self.t1.index.searchsorted(
self.t1[self.t1 <= t0].index)
if maxT1Idx < X.shape[0]: # right train (with embargo)
train_indices = np.concatenate(
(train_indices, indices[maxT1Idx+mbrg:]))
yield train_indices, test_indices
def cvScore(clf,
X,
y,
sample_weight,
scoring='neg_log_loss',
t1=None,
cv=None,
cvGen=None,
pctEmbargo=None):
"""SNIPPET 7.4 USING THE PurgedKFold CLASS
"""
if scoring not in ['neg_log_loss', 'accuracy']:
raise Exception('wrong scoring method.')
from sklearn.metrics import log_loss, accuracy_score
#from clfSequential import PurgedKFold
if cvGen is None:
cvGen = PurgedKFold(n = X.shape[0], n_folds=cv, t1=t1,
pctEmbargo=pctEmbargo) # purged
score = []
for i, (train, test) in enumerate(cvGen.split(X=X)):
print(f'Fold: {i}')
fit = clf.fit(X=X.iloc[train, :], y=y.iloc[train],
sample_weight=sample_weight.iloc[train].values)
if scoring == 'neg_log_loss':
prob = fit.predict_proba(X.iloc[test, :])
score_ = -1 * \
log_loss(
y.iloc[test], prob, sample_weight=sample_weight.iloc[test].values, labels=clf.classes_)
else:
pred = fit.predict(X.iloc[test, :])
score_ = accuracy_score(
y.iloc[test], pred, sample_weight=sample_weight.iloc[test].values)
score.append(score_)
return np.array(score)
|
the-stack_106_13139
|
#!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_port
short_description: Add/Update/Delete ports from an OpenStack cloud.
extends_documentation_fragment: openstack
author: "Davide Agnello (@dagnello)"
version_added: "2.0"
description:
- Add, Update or Remove ports from an OpenStack cloud. A I(state) of
'present' will ensure the port is created or updated if required.
options:
network:
description:
- Network ID or name this port belongs to.
required: true
name:
description:
- Name that has to be given to the port.
required: false
default: None
fixed_ips:
description:
- Desired IP and/or subnet for this port. Subnet is referenced by
subnet_id and IP is referenced by ip_address.
required: false
default: None
admin_state_up:
description:
- Sets admin state.
required: false
default: None
mac_address:
description:
- MAC address of this port.
required: false
default: None
security_groups:
description:
- Security group(s) ID(s) or name(s) associated with the port (comma
separated string or YAML list)
required: false
default: None
no_security_groups:
description:
- Do not associate a security group with this port.
required: false
default: False
allowed_address_pairs:
description:
- "Allowed address pairs list. Allowed address pairs are supported with
dictionary structure.
e.g. allowed_address_pairs:
- ip_address: 10.1.0.12
mac_address: ab:cd:ef:12:34:56
- ip_address: ..."
required: false
default: None
extra_dhcp_opts:
description:
- "Extra dhcp options to be assigned to this port. Extra options are
supported with dictionary structure.
e.g. extra_dhcp_opts:
- opt_name: opt name1
opt_value: value1
- opt_name: ..."
required: false
default: None
device_owner:
description:
- The ID of the entity that uses this port.
required: false
default: None
device_id:
description:
- Device ID of device using this port.
required: false
default: None
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
availability_zone:
description:
- Ignored. Present for backwards compatability
required: false
'''
EXAMPLES = '''
# Create a port
- os_port:
state: present
auth:
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
username: admin
password: admin
project_name: admin
name: port1
network: foo
# Create a port with a static IP
- os_port:
state: present
auth:
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
username: admin
password: admin
project_name: admin
name: port1
network: foo
fixed_ips:
- ip_address: 10.1.0.21
# Create a port with No security groups
- os_port:
state: present
auth:
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
username: admin
password: admin
project_name: admin
name: port1
network: foo
no_security_groups: True
# Update the existing 'port1' port with multiple security groups (version 1)
- os_port:
state: present
auth:
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/d
username: admin
password: admin
project_name: admin
name: port1
security_groups: 1496e8c7-4918-482a-9172-f4f00fc4a3a5,057d4bdf-6d4d-472...
# Update the existing 'port1' port with multiple security groups (version 2)
- os_port:
state: present
auth:
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/d
username: admin
password: admin
project_name: admin
name: port1
security_groups:
- 1496e8c7-4918-482a-9172-f4f00fc4a3a5
- 057d4bdf-6d4d-472...
'''
RETURN = '''
id:
description: Unique UUID.
returned: success
type: string
name:
description: Name given to the port.
returned: success
type: string
network_id:
description: Network ID this port belongs in.
returned: success
type: string
security_groups:
description: Security group(s) associated with this port.
returned: success
type: list
status:
description: Port's status.
returned: success
type: string
fixed_ips:
description: Fixed ip(s) associated with this port.
returned: success
type: list
tenant_id:
description: Tenant id associated with this port.
returned: success
type: string
allowed_address_pairs:
description: Allowed address pairs with this port.
returned: success
type: list
admin_state_up:
description: Admin state up flag for this port.
returned: success
type: bool
'''
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
def _needs_update(module, port, cloud):
"""Check for differences in the updatable values.
NOTE: We don't currently allow name updates.
"""
compare_simple = ['admin_state_up',
'mac_address',
'device_owner',
'device_id']
compare_dict = ['allowed_address_pairs',
'extra_dhcp_opts']
compare_list = ['security_groups']
for key in compare_simple:
if module.params[key] is not None and module.params[key] != port[key]:
return True
for key in compare_dict:
if module.params[key] is not None and cmp(module.params[key],
port[key]) != 0:
return True
for key in compare_list:
if module.params[key] is not None and (set(module.params[key]) !=
set(port[key])):
return True
# NOTE: if port was created or updated with 'no_security_groups=True',
# subsequent updates without 'no_security_groups' flag or
# 'no_security_groups=False' and no specified 'security_groups', will not
# result in an update to the port where the default security group is
# applied.
if module.params['no_security_groups'] and port['security_groups'] != []:
return True
if module.params['fixed_ips'] is not None:
for item in module.params['fixed_ips']:
if 'ip_address' in item:
# if ip_address in request does not match any in existing port,
# update is required.
if not any(match['ip_address'] == item['ip_address']
for match in port['fixed_ips']):
return True
if 'subnet_id' in item:
return True
for item in port['fixed_ips']:
# if ip_address in existing port does not match any in request,
# update is required.
if not any(match.get('ip_address') == item['ip_address']
for match in module.params['fixed_ips']):
return True
return False
def _system_state_change(module, port, cloud):
state = module.params['state']
if state == 'present':
if not port:
return True
return _needs_update(module, port, cloud)
if state == 'absent' and port:
return True
return False
def _compose_port_args(module, cloud):
port_kwargs = {}
optional_parameters = ['name',
'fixed_ips',
'admin_state_up',
'mac_address',
'security_groups',
'allowed_address_pairs',
'extra_dhcp_opts',
'device_owner',
'device_id']
for optional_param in optional_parameters:
if module.params[optional_param] is not None:
port_kwargs[optional_param] = module.params[optional_param]
if module.params['no_security_groups']:
port_kwargs['security_groups'] = []
return port_kwargs
def get_security_group_id(module, cloud, security_group_name_or_id):
security_group = cloud.get_security_group(security_group_name_or_id)
if not security_group:
module.fail_json(msg="Security group: %s, was not found"
% security_group_name_or_id)
return security_group['id']
def main():
argument_spec = openstack_full_argument_spec(
network=dict(required=False),
name=dict(required=False),
fixed_ips=dict(type='list', default=None),
admin_state_up=dict(type='bool', default=None),
mac_address=dict(default=None),
security_groups=dict(default=None, type='list'),
no_security_groups=dict(default=False, type='bool'),
allowed_address_pairs=dict(type='list', default=None),
extra_dhcp_opts=dict(type='list', default=None),
device_owner=dict(default=None),
device_id=dict(default=None),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs(
mutually_exclusive=[
['no_security_groups', 'security_groups'],
]
)
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
name = module.params['name']
state = module.params['state']
try:
cloud = shade.openstack_cloud(**module.params)
if module.params['security_groups']:
# translate security_groups to UUID's if names where provided
module.params['security_groups'] = [
get_security_group_id(module, cloud, v)
for v in module.params['security_groups']
]
port = None
network_id = None
if name:
port = cloud.get_port(name)
if module.check_mode:
module.exit_json(changed=_system_state_change(module, port, cloud))
changed = False
if state == 'present':
if not port:
network = module.params['network']
if not network:
module.fail_json(
msg="Parameter 'network' is required in Port Create"
)
port_kwargs = _compose_port_args(module, cloud)
network_object = cloud.get_network(network)
if network_object:
network_id = network_object['id']
else:
module.fail_json(
msg="Specified network was not found."
)
port = cloud.create_port(network_id, **port_kwargs)
changed = True
else:
if _needs_update(module, port, cloud):
port_kwargs = _compose_port_args(module, cloud)
port = cloud.update_port(port['id'], **port_kwargs)
changed = True
module.exit_json(changed=changed, id=port['id'], port=port)
if state == 'absent':
if port:
cloud.delete_port(port['id'])
changed = True
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
|
the-stack_106_13140
|
# coding=utf-8
import tensorflow as tf
from tensorflow.python.framework import ops
import sys
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
grouping_module=tf.load_op_library(os.path.join(BASE_DIR, 'tf_grouping_so.so'))
def query_ball_point(radius, nsample, xyz1, xyz2):
'''
Input:
radius: float32, ball search radius
nsample: int32, number of points selected in each ball region
xyz1: (batch_size, ndataset, 3) float32 array, input points
xyz2: (batch_size, npoint, 3) float32 array, query points
Output:
idx: (batch_size, npoint, nsample) int32 array, indices to input points
pts_cnt: (batch_size, npoint) int32 array, number of unique points in each local region
'''
#return grouping_module.query_ball_point(radius, nsample, xyz1, xyz2)
return grouping_module.query_ball_point(xyz1, xyz2, radius, nsample)
ops.NoGradient('QueryBallPoint')
def select_top_k(k, dist):
'''
Input:
k: int32, number of k SMALLEST elements selected
dist: (b,m,n) float32 array, distance matrix, m query points, n dataset points
Output:
idx: (b,m,n) int32 array, first k in n are indices to the top k
dist_out: (b,m,n) float32 array, first k in n are the top k
'''
return grouping_module.selection_sort(dist, k)
ops.NoGradient('SelectionSort')
def group_point(points, idx):
'''
Input:
points: (batch_size, ndataset, channel) float32 array, points to sample from
idx: (batch_size, npoint, nsample) int32 array, indices to points
Output:
out: (batch_size, npoint, nsample, channel) float32 array, values sampled from points
'''
return grouping_module.group_point(points, idx)
@tf.RegisterGradient('GroupPoint')
def _group_point_grad(op, grad_out):
points = op.inputs[0]
idx = op.inputs[1]
return [grouping_module.group_point_grad(points, idx, grad_out), None]
def knn_points(k, xyz1, xyz2):
'''
Input:
k: int32, number of k in k-nn search
xyz1: (batch_size, ndataset, c) float32 array, input points
xyz2: (batch_size, npoint, c) float32 array, query points
Output:
val: (batch_size, npoint, k) float32 array, L2 distances
idx: (batch_size, npoint, k) int32 array, indices to input points
'''
# b = xyz1.get_shape()[0].value
# n = xyz1.get_shape()[1].value
# c = xyz1.get_shape()[2].value
# m = xyz2.get_shape()[1].value
b = xyz1.shape[0]
n = xyz1.shape[1]
c = xyz1.shape[2]
m = xyz2.shape[1]
# print(b, n, c, m)
# print(xyz1, (b,1,n,c))
xyz1 = tf.tile(tf.reshape(xyz1, (b,1,n,c)), [1,m,1,1])
xyz2 = tf.tile(tf.reshape(xyz2, (b,m,1,c)), [1,1,n,1])
dist = tf.reduce_sum((xyz1-xyz2)**2, -1)
# print(dist, k)
outi, out = select_top_k(k, dist)
idx = tf.slice(outi, [0,0,0], [-1,-1,k])
val = tf.slice(out, [0,0,0], [-1,-1,k])
# print(idx, val)
#val, idx = tf.nn.top_k(-dist, k=k) # ONLY SUPPORT CPU
return val, idx
if __name__=='__main__':
knn=True
import numpy as np
import time
np.random.seed(100)
pts = np.random.random((32,2048,3)).astype('float32')
np.random.seed(100)
tmp1 = np.random.random((32,2048,3)).astype('float32')
np.random.seed(100)
tmp2 = np.random.random((32,2048,3)).astype('float32')
with tf.device('/gpu:6'):
points = tf.constant(pts)
xyz1 = tf.constant(tmp1)
xyz2 = tf.constant(tmp2)
radius = 0.1
nsample = 16
if knn:
_, idx = knn_points(nsample, xyz1, xyz2)
grouped_points = group_point(points, idx)
else:
idx, _ = query_ball_point(radius, nsample, xyz1, xyz2)
grouped_points = group_point(points, idx)
#grouped_points_grad = tf.ones_like(grouped_points)
#points_grad = tf.gradients(grouped_points, points, grouped_points_grad)
with tf.Session('') as sess:
now = time.time()
ret = sess.run(grouped_points)
print(time.time() - now)
print(ret.shape, ret.dtype)
print(ret)
|
the-stack_106_13142
|
"""
intersections.py
------------------
Primarily mesh-plane intersections (slicing).
"""
import numpy as np
from .constants import log, tol
from . import util
from . import geometry
from . import grouping
from . import transformations
def mesh_plane(mesh,
plane_normal,
plane_origin,
return_faces=False,
cached_dots=None):
"""
Find a the intersections between a mesh and a plane,
returning a set of line segments on that plane.
Parameters
---------
mesh : Trimesh object
Source mesh to slice
plane_normal : (3,) float
Normal vector of plane to intersect with mesh
plane_origin: (3,) float
Point on plane to intersect with mesh
return_faces: bool
If True return face index each line is from
cached_dots : (n, 3) float
If an external function has stored dot
products pass them here to avoid recomputing
Returns
----------
lines : (m, 2, 3) float
List of 3D line segments in space
face_index : (m,) int
Index of mesh.faces for each line
Only returned if return_faces was True
"""
def triangle_cases(signs):
"""
Figure out which faces correspond to which intersection
case from the signs of the dot product of each vertex.
Does this by bitbang each row of signs into an 8 bit
integer.
code : signs : intersects
0 : [-1 -1 -1] : No
2 : [-1 -1 0] : No
4 : [-1 -1 1] : Yes; 2 on one side, 1 on the other
6 : [-1 0 0] : Yes; one edge fully on plane
8 : [-1 0 1] : Yes; one vertex on plane, 2 on different sides
12 : [-1 1 1] : Yes; 2 on one side, 1 on the other
14 : [0 0 0] : No (on plane fully)
16 : [0 0 1] : Yes; one edge fully on plane
20 : [0 1 1] : No
28 : [1 1 1] : No
Parameters
----------
signs: (n,3) int, all values are -1,0, or 1
Each row contains the dot product of all three vertices
in a face with respect to the plane
Returns
---------
basic: (n,) bool, which faces are in the basic intersection case
one_vertex: (n,) bool, which faces are in the one vertex case
one_edge: (n,) bool, which faces are in the one edge case
"""
signs_sorted = np.sort(signs, axis=1)
coded = np.zeros(len(signs_sorted), dtype=np.int8) + 14
for i in range(3):
coded += signs_sorted[:, i] << 3 - i
# one edge fully on the plane
# note that we are only accepting *one* of the on- edge cases,
# where the other vertex has a positive dot product (16) instead
# of both on- edge cases ([6,16])
# this is so that for regions that are co-planar with the the section plane
# we don't end up with an invalid boundary
key = np.zeros(29, dtype=np.bool)
key[16] = True
one_edge = key[coded]
# one vertex on plane, other two on different sides
key[:] = False
key[8] = True
one_vertex = key[coded]
# one vertex on one side of the plane, two on the other
key[:] = False
key[[4, 12]] = True
basic = key[coded]
return basic, one_vertex, one_edge
def handle_on_vertex(signs, faces, vertices):
# case where one vertex is on plane, two are on different sides
vertex_plane = faces[signs == 0]
edge_thru = faces[signs != 0].reshape((-1, 2))
point_intersect, valid = plane_lines(plane_origin,
plane_normal,
vertices[edge_thru.T],
line_segments=False)
lines = np.column_stack((vertices[vertex_plane[valid]],
point_intersect)).reshape((-1, 2, 3))
return lines
def handle_on_edge(signs, faces, vertices):
# case where two vertices are on the plane and one is off
edges = faces[signs == 0].reshape((-1, 2))
points = vertices[edges]
return points
def handle_basic(signs, faces, vertices):
# case where one vertex is on one side and two are on the other
unique_element = grouping.unique_value_in_row(
signs, unique=[-1, 1])
edges = np.column_stack(
(faces[unique_element],
faces[np.roll(unique_element, 1, axis=1)],
faces[unique_element],
faces[np.roll(unique_element, 2, axis=1)])).reshape(
(-1, 2))
intersections, valid = plane_lines(plane_origin,
plane_normal,
vertices[edges.T],
line_segments=False)
# since the data has been pre- culled, any invalid intersections at all
# means the culling was done incorrectly and thus things are
# mega-fucked
assert valid.all()
return intersections.reshape((-1, 2, 3))
# check input plane
plane_normal = np.asanyarray(plane_normal,
dtype=np.float64)
plane_origin = np.asanyarray(plane_origin,
dtype=np.float64)
if plane_origin.shape != (3,) or plane_normal.shape != (3,):
raise ValueError('Plane origin and normal must be (3,)!')
if cached_dots is not None:
dots = cached_dots
else:
# dot product of each vertex with the plane normal indexed by face
# so for each face the dot product of each vertex is a row
# shape is the same as mesh.faces (n,3)
dots = np.dot(plane_normal,
(mesh.vertices - plane_origin).T)[mesh.faces]
# sign of the dot product is -1, 0, or 1
# shape is the same as mesh.faces (n,3)
signs = np.zeros(mesh.faces.shape, dtype=np.int8)
signs[dots < -tol.merge] = -1
signs[dots > tol.merge] = 1
# figure out which triangles are in the cross section,
# and which of the three intersection cases they are in
cases = triangle_cases(signs)
# handlers for each case
handlers = (handle_basic,
handle_on_vertex,
handle_on_edge)
# the (m, 2, 3) line segments
lines = np.vstack([h(signs[c],
mesh.faces[c],
mesh.vertices)
for c, h in zip(cases, handlers)])
log.debug('mesh_cross_section found %i intersections',
len(lines))
if return_faces:
face_index = np.hstack([np.nonzero(c)[0] for c in cases])
return lines, face_index
return lines
def mesh_multiplane(mesh,
plane_origin,
plane_normal,
heights):
"""
A utility function for slicing a mesh by multiple
parallel planes, which caches the dot product operation.
Parameters
-------------
mesh : trimesh.Trimesh
Geometry to be sliced by planes
plane_normal : (3,) float
Normal vector of plane
plane_origin : (3,) float
Point on a plane
heights : (m,) float
Offset distances from plane to slice at
Returns
--------------
lines : (m,) sequence of (n, 2, 2) float
Lines in space for m planes
to_3D : (m, 4, 4) float
Transform to move each section back to 3D
face_index : (m,) sequence of (n,) int
Indexes of mesh.faces for each segment
"""
# check input plane
plane_normal = util.unitize(plane_normal)
plane_origin = np.asanyarray(plane_origin,
dtype=np.float64)
heights = np.asanyarray(heights, dtype=np.float64)
# dot product of every vertex with plane
vertex_dots = np.dot(plane_normal,
(mesh.vertices - plane_origin).T)
# reconstruct transforms for each 2D section
base_transform = geometry.plane_transform(origin=plane_origin,
normal=plane_normal)
base_transform = np.linalg.inv(base_transform)
# alter translation Z inside loop
translation = np.eye(4)
# store results
transforms = []
face_index = []
segments = []
# loop through user specified heights
for height in heights:
# offset the origin by the height
new_origin = plane_origin + (plane_normal * height)
# offset the dot products by height and index by faces
new_dots = (vertex_dots - height)[mesh.faces]
# run the intersection with the cached dot products
lines, index = mesh_plane(mesh=mesh,
plane_origin=new_origin,
plane_normal=plane_normal,
return_faces=True,
cached_dots=new_dots)
# get the transforms to 3D space and back
translation[2, 3] = height
to_3D = np.dot(base_transform, translation)
to_2D = np.linalg.inv(to_3D)
transforms.append(to_3D)
# transform points to 2D frame
lines_2D = transformations.transform_points(
lines.reshape((-1, 3)),
to_2D)
# if we didn't screw up the transform all
# of the Z values should be zero
assert np.allclose(lines_2D[:, 2], 0.0)
# reshape back in to lines and discard Z
lines_2D = lines_2D[:, :2].reshape((-1, 2, 2))
# store (n, 2, 2) float lines
segments.append(lines_2D)
# store (n,) int indexes of mesh.faces
face_index.append(face_index)
# (n, 4, 4) transforms from 2D to 3D
transforms = np.array(transforms, dtype=np.float64)
return segments, transforms, face_index
def plane_lines(plane_origin,
plane_normal,
endpoints,
line_segments=True):
"""
Calculate plane-line intersections
Parameters
---------
plane_origin : (3,) float
Point on plane
plane_normal : (3,) float
Plane normal vector
endpoints : (2, n, 3) float
Points defining lines to be tested
line_segments : bool
If True, only returns intersections as valid if
vertices from endpoints are on different sides
of the plane.
Returns
---------
intersections : (m, 3) float
Cartesian intersection points
valid : (n, 3) bool
Indicate whether a valid intersection exists
for each input line segment
"""
endpoints = np.asanyarray(endpoints)
plane_origin = np.asanyarray(plane_origin).reshape(3)
line_dir = util.unitize(endpoints[1] - endpoints[0])
plane_normal = util.unitize(np.asanyarray(plane_normal).reshape(3))
t = np.dot(plane_normal, (plane_origin - endpoints[0]).T)
b = np.dot(plane_normal, line_dir.T)
# If the plane normal and line direction are perpendicular, it means
# the vector is 'on plane', and there isn't a valid intersection.
# We discard on-plane vectors by checking that the dot product is nonzero
valid = np.abs(b) > tol.zero
if line_segments:
test = np.dot(plane_normal,
np.transpose(plane_origin - endpoints[1]))
different_sides = np.sign(t) != np.sign(test)
nonzero = np.logical_or(np.abs(t) > tol.zero,
np.abs(test) > tol.zero)
valid = np.logical_and(valid, different_sides)
valid = np.logical_and(valid, nonzero)
d = np.divide(t[valid], b[valid])
intersection = endpoints[0][valid]
intersection = intersection + np.reshape(d, (-1, 1)) * line_dir[valid]
return intersection, valid
def planes_lines(plane_origins,
plane_normals,
line_origins,
line_directions,
return_distance=False,
return_denom=False):
"""
Given one line per plane find the intersection points.
Parameters
-----------
plane_origins : (n,3) float
Point on each plane
plane_normals : (n,3) float
Normal vector of each plane
line_origins : (n,3) float
Point at origin of each line
line_directions : (n,3) float
Direction vector of each line
return_distance : bool
Return distance from origin to point also
return_denom : bool
Return denominator, so you can check for small values
Returns
----------
on_plane : (n,3) float
Points on specified planes
valid : (n,) bool
Did plane intersect line or not
distance : (n,) float
[OPTIONAL] Distance from point
denom : (n,) float
[OPTIONAL] Denominator
"""
# check input types
plane_origins = np.asanyarray(plane_origins, dtype=np.float64)
plane_normals = np.asanyarray(plane_normals, dtype=np.float64)
line_origins = np.asanyarray(line_origins, dtype=np.float64)
line_directions = np.asanyarray(line_directions, dtype=np.float64)
# vector from line to plane
origin_vectors = plane_origins - line_origins
projection_ori = util.diagonal_dot(origin_vectors, plane_normals)
projection_dir = util.diagonal_dot(line_directions, plane_normals)
valid = np.abs(projection_dir) > 1e-5
distance = np.divide(projection_ori[valid],
projection_dir[valid])
on_plane = line_directions[valid] * distance.reshape((-1, 1))
on_plane += line_origins[valid]
result = [on_plane, valid]
if return_distance:
result.append(distance)
if return_denom:
result.append(projection_dir)
return result
def slice_faces_plane(vertices,
faces,
plane_normal,
plane_origin,
cached_dots=None):
"""
Slice a mesh (given as a set of faces and vertices) with a plane, returning a
new mesh (again as a set of faces and vertices) that is the
portion of the original mesh to the positive normal side of the plane.
Parameters
---------
vertices : (n, 3) float
Vertices of source mesh to slice
faces : (n, 3) int
Faces of source mesh to slice
plane_normal : (3,) float
Normal vector of plane to intersect with mesh
plane_origin: (3,) float
Point on plane to intersect with mesh
cached_dots : (n, 3) float
If an external function has stored dot
products pass them here to avoid recomputing
Returns
----------
new_vertices : (n, 3) float
Vertices of sliced mesh
new_faces : (n, 3) int
Faces of sliced mesh
"""
if len(vertices) == 0:
return vertices, faces
if cached_dots is not None:
dots = cached_dots
else:
# dot product of each vertex with the plane normal indexed by face
# so for each face the dot product of each vertex is a row
# shape is the same as faces (n,3)
dots = np.einsum('i,ij->j', plane_normal,
(vertices - plane_origin).T)[faces]
# Find vertex orientations w.r.t. faces for all triangles:
# -1 -> vertex "inside" plane (positive normal direction)
# 0 -> vertex on plane
# 1 -> vertex "outside" plane (negative normal direction)
signs = np.zeros(faces.shape, dtype=np.int8)
signs[dots < -tol.merge] = 1
signs[dots > tol.merge] = -1
signs[np.logical_and(dots >= -tol.merge, dots <= tol.merge)] = 0
# Find all triangles that intersect this plane
# onedge <- indices of all triangles intersecting the plane
# inside <- indices of all triangles "inside" the plane (positive normal)
signs_sum = signs.sum(axis=1, dtype=np.int8)
signs_asum = np.abs(signs).sum(axis=1, dtype=np.int8)
# Cases:
# (0,0,0), (-1,0,0), (-1,-1,0), (-1,-1,-1) <- inside
# (1,0,0), (1,1,0), (1,1,1) <- outside
# (1,0,-1), (1,-1,-1), (1,1,-1) <- onedge
onedge = np.logical_and(signs_asum >= 2,
np.abs(signs_sum) <= 1)
inside = (signs_sum == -signs_asum)
# Automatically include all faces that are "inside"
new_faces = faces[inside]
# Separate faces on the edge into two cases: those which will become
# quads (two vertices inside plane) and those which will become triangles
# (one vertex inside plane)
triangles = vertices[faces]
cut_triangles = triangles[onedge]
cut_faces_quad = faces[np.logical_and(onedge, signs_sum < 0)]
cut_faces_tri = faces[np.logical_and(onedge, signs_sum >= 0)]
cut_signs_quad = signs[np.logical_and(onedge, signs_sum < 0)]
cut_signs_tri = signs[np.logical_and(onedge, signs_sum >= 0)]
# If no faces to cut, the surface is not in contact with this plane.
# Thus, return a mesh with only the inside faces
if len(cut_faces_quad) + len(cut_faces_tri) == 0:
if len(new_faces) == 0:
# if no new faces at all return empty arrays
empty = (np.zeros((0, 3), dtype=np.float64),
np.zeros((0, 3), dtype=np.int64))
return empty
try:
# count the number of occurrences of each value
counts = np.bincount(new_faces.flatten(), minlength=len(vertices))
unique_verts = counts > 0
unique_index = np.where(unique_verts)[0]
except TypeError:
# casting failed on 32 bit windows
log.error('casting failed!', exc_info=True)
# fall back to numpy unique
unique_index = np.unique(new_faces.flatten())
# generate a mask for cumsum
unique_verts = np.zeros(len(vertices), dtype=np.bool)
unique_verts[unique_index] = True
unique_faces = (np.cumsum(unique_verts) - 1)[new_faces]
return vertices[unique_index], unique_faces
# Extract the intersections of each triangle's edges with the plane
o = cut_triangles # origins
d = np.roll(o, -1, axis=1) - o # directions
num = (plane_origin - o).dot(plane_normal) # compute num/denom
denom = np.dot(d, plane_normal)
denom[denom == 0.0] = 1e-12 # prevent division by zero
dist = np.divide(num, denom)
# intersection points for each segment
int_points = np.einsum('ij,ijk->ijk', dist, d) + o
# Initialize the array of new vertices with the current vertices
new_vertices = vertices
# Handle the case where a new quad is formed by the intersection
# First, extract the intersection points belonging to a new quad
quad_int_points = int_points[(signs_sum < 0)[onedge], :, :]
num_quads = len(quad_int_points)
if num_quads > 0:
# Extract the vertex on the outside of the plane, then get the vertices
# (in CCW order of the inside vertices)
quad_int_inds = np.where(cut_signs_quad == 1)[1]
quad_int_verts = cut_faces_quad[
np.stack((range(num_quads), range(num_quads)), axis=1),
np.stack(((quad_int_inds + 1) % 3, (quad_int_inds + 2) % 3), axis=1)]
# Fill out new quad faces with the intersection points as vertices
new_quad_faces = np.append(
quad_int_verts,
np.arange(len(new_vertices),
len(new_vertices) +
2 * num_quads).reshape(num_quads, 2), axis=1)
# Extract correct intersection points from int_points and order them in
# the same way as they were added to faces
new_quad_vertices = quad_int_points[
np.stack((range(num_quads), range(num_quads)), axis=1),
np.stack((((quad_int_inds + 2) % 3).T, quad_int_inds.T),
axis=1), :].reshape(2 * num_quads, 3)
# Add new vertices to existing vertices, triangulate quads, and add the
# resulting triangles to the new faces
new_vertices = np.append(new_vertices, new_quad_vertices, axis=0)
new_tri_faces_from_quads = geometry.triangulate_quads(new_quad_faces)
new_faces = np.append(new_faces, new_tri_faces_from_quads, axis=0)
# Handle the case where a new triangle is formed by the intersection
# First, extract the intersection points belonging to a new triangle
tri_int_points = int_points[(signs_sum >= 0)[onedge], :, :]
num_tris = len(tri_int_points)
if num_tris > 0:
# Extract the single vertex for each triangle inside the plane and get the
# inside vertices (CCW order)
tri_int_inds = np.where(cut_signs_tri == -1)[1]
tri_int_verts = cut_faces_tri[range(
num_tris), tri_int_inds].reshape(num_tris, 1)
# Fill out new triangles with the intersection points as vertices
new_tri_faces = np.append(
tri_int_verts,
np.arange(len(new_vertices),
len(new_vertices) +
2 * num_tris).reshape(num_tris, 2),
axis=1)
# Extract correct intersection points and order them in the same way as
# the vertices were added to the faces
new_tri_vertices = tri_int_points[
np.stack((range(num_tris), range(num_tris)), axis=1),
np.stack((tri_int_inds.T, ((tri_int_inds + 2) % 3).T),
axis=1),
:].reshape(2 * num_tris, 3)
# Append new vertices and new faces
new_vertices = np.append(new_vertices, new_tri_vertices, axis=0)
new_faces = np.append(new_faces, new_tri_faces, axis=0)
# find the unique indices in the new faces
# using an integer- only unique function
unique, inverse = grouping.unique_bincount(new_faces.reshape(-1),
minlength=len(new_vertices),
return_inverse=True)
# use the unique indexes for our final vertex and faces
final_vert = new_vertices[unique]
final_face = inverse.reshape((-1, 3))
return final_vert, final_face
def slice_mesh_plane(mesh,
plane_normal,
plane_origin,
**kwargs):
"""
Slice a mesh with a plane, returning a new mesh that is the
portion of the original mesh to the positive normal side of the plane
Parameters
---------
mesh : Trimesh object
Source mesh to slice
plane_normal : (3,) float
Normal vector of plane to intersect with mesh
plane_origin: (3,) float
Point on plane to intersect with mesh
cap: bool
If True, cap the result with a triangulated polygon
cached_dots : (n, 3) float
If an external function has stored dot
products pass them here to avoid recomputing
Returns
----------
new_mesh : Trimesh object
Sliced mesh
"""
# check input for none
if mesh is None:
return None
# avoid circular import
from .base import Trimesh
# check input plane
plane_normal = np.asanyarray(plane_normal,
dtype=np.float64)
plane_origin = np.asanyarray(plane_origin,
dtype=np.float64)
# check to make sure origins and normals have acceptable shape
shape_ok = ((plane_origin.shape == (3,) or
util.is_shape(plane_origin, (-1, 3))) and
(plane_normal.shape == (3,) or
util.is_shape(plane_normal, (-1, 3))) and
plane_origin.shape == plane_normal.shape)
if not shape_ok:
raise ValueError('plane origins and normals must be (n, 3)!')
# start with original vertices and faces
vertices = mesh.vertices.copy()
faces = mesh.faces.copy()
# slice away specified planes
for origin, normal in zip(plane_origin.reshape((-1, 3)),
plane_normal.reshape((-1, 3))):
# save the new vertices and faces
vertices, faces = slice_faces_plane(vertices=vertices,
faces=faces,
plane_normal=normal,
plane_origin=origin,
**kwargs)
# create a mesh from the sliced result
new_mesh = Trimesh(vertices=vertices,
faces=faces,
process=False)
return new_mesh
|
the-stack_106_13145
|
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2018, Arm Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import math
import json
import re
from collections import OrderedDict
from shlex import quote
import copy
import itertools
import weakref
from statistics import mean
from operator import itemgetter
from devlib import TargetStableError
from lisa.wlgen.workload import Workload
from lisa.utils import Loggable, ArtifactPath, TASK_COMM_MAX_LEN, group_by_value, nullcontext, value_range
from lisa.pelt import PELT_SCALE
class CalibrationError(RuntimeError):
"""
Exception raised when the ``rt-app`` calibration is not consistent with the
CPU capacities in a way or another.
"""
pass
class RTA(Workload):
"""
An rt-app workload
:param json_file: Path to the rt-app json description
:type json_file: str
.. warning::
The class constructor only deals with pre-constructed json files.
For creating rt-app workloads through other means, see :meth:`by_profile`
and :meth:`by_str`.
For more information about rt-app itself, see
https://github.com/scheduler-tools/rt-app
"""
required_tools = Workload.required_tools + ['rt-app']
sched_policies = ['OTHER', 'FIFO', 'RR', 'DEADLINE']
ALLOWED_TASK_NAME_REGEX = r'^[a-zA-Z0-9_]+$'
def __init__(self, target, name=None, res_dir=None, json_file=None):
# Don't add code here, use the early/late init methods instead.
# This lets us factorize some code for the class methods that serve as
# alternate constructors.
self._early_init(target, name, res_dir, json_file)
self._late_init()
def _early_init(self, target, name, res_dir, json_file, log_stats=False, trace_events=None):
"""
Initialize everything that is not related to the contents of the json file
"""
super().__init__(target, name, res_dir)
self.log_stats = log_stats
self.trace_events = trace_events or []
if not json_file:
json_file = f'{self.name}.json'
self.local_json = ArtifactPath.join(self.res_dir, json_file)
self.remote_json = self.target.path.join(self.run_dir, json_file)
rta_cmd = self.target.which('rt-app')
if not rta_cmd:
raise RuntimeError("No rt-app executable found on the target")
self.command = f'{quote(rta_cmd)} {quote(self.remote_json)} 2>&1'
def _late_init(self, calibration=None, tasks_names=None):
"""
Complete initialization with a ready json file
:parameters: Attributes that have been pre-computed and ended up
in the json file. Passing them can prevent a needless file read.
"""
if calibration or not tasks_names:
with open(self.local_json) as fh:
desc = json.load(fh)
if calibration is None:
calibration = desc["global"]["calibration"]
if not tasks_names:
tasks_names = list(desc["tasks"].keys())
self.calibration = calibration
self.tasks = sorted(tasks_names)
# Move configuration file to target
self.target.push(self.local_json, self.remote_json)
def run(self, cpus=None, cgroup=None, as_root=False, update_cpu_capacities=None):
logger = self.get_logger()
plat_info = self.target.plat_info
writeable_capacities = plat_info['cpu-capacities']['writeable']
if update_cpu_capacities:
if not writeable_capacities:
raise ValueError('CPU capacities are not writeable on this target, please use update_cpu_capacities=False or None')
# If left to None, we update if possible
elif update_cpu_capacities is None:
update_cpu_capacities = writeable_capacities
if not writeable_capacities:
logger.warning('CPU capacities will not be updated on this platform')
if update_cpu_capacities:
rtapp_capacities = plat_info['cpu-capacities']['rtapp']
logger.info(f'Will update CPU capacities in sysfs: {rtapp_capacities}')
write_kwargs = [
dict(
path=f'/sys/devices/system/cpu/cpu{cpu}/cpu_capacity',
value=capa,
verify=True,
)
for cpu, capa in sorted(rtapp_capacities.items())
]
capa_cm = self.target.batch_revertable_write_value(write_kwargs)
else:
# There might not be any rtapp calibration available, specifically
# when we are being called to run the calibration workload.
try:
rtapp_capacities = plat_info['cpu-capacities']['rtapp']
orig_capacities = plat_info['cpu-capacities']['orig']
except KeyError:
pass
else:
# Spit out some warning in case we are not going to update the
# capacities, so we know what to expect
RTA.warn_capacities_mismatch(orig_capacities, rtapp_capacities)
capa_cm = nullcontext()
with capa_cm:
super().run(cpus, cgroup, as_root)
if self.log_stats:
logger.debug(f'Pulling logfiles to: {self.res_dir}')
for task in self.tasks:
# RT-app appends some number to the logs, so we can't predict the
# exact filename
logfile = self.target.path.join(self.run_dir, f'*{task}*.log')
self.target.pull(logfile, self.res_dir, globbing=True)
def _process_calibration(self, calibration):
"""
Select CPU or pload value for task calibration
"""
# This is done at init time rather than at run time, because the
# calibration value lives in the file
if isinstance(calibration, int):
pass
elif isinstance(calibration, str):
calibration = calibration.upper()
elif calibration is None:
calib_map = self.target.plat_info['rtapp']['calib']
calibration = min(calib_map.values())
else:
raise ValueError(f'Calibration value "{calibration}" is cannot be handled')
return calibration
@classmethod
def by_profile(cls, target, profile, name=None, res_dir=None, default_policy=None,
max_duration_s=None, calibration=None,
log_stats=False, trace_events=None):
"""
Create an rt-app workload using :class:`RTATask` instances
:param profile: The workload description in a {task_name : :class:`RTATask`}
shape
:type profile: dict
:param default_policy: Default scheduler policy. See :attr:`sched_policies`
:type default_policy: str
:param max_duration_s: Maximum duration of the workload. Will be determined
by the longest running task if not specified.
:type max_duration_s: int
:param calibration: The calibration value to be used by rt-app. This can
be an integer value or a CPU string (e.g. "CPU0").
:type calibration: int or str
:param log_stats: Generate a log file with stats for each task
:type log_stats: bool
:param trace_events: A list of trace events to generate.
For a full list of trace events which can be generated by rt-app,
refer to the tool documentation:
https://github.com/scheduler-tools/rt-app/blob/master/doc/tutorial.txt
By default, no events are generated.
:type trace_events: list(str)
A simple profile workload would be::
task = Periodic(duty_cycle_pct=5)
rta = RTA.by_profile(target, {"foo" : task})
rta.run()
"""
logger = cls.get_logger()
self = cls.__new__(cls)
self._early_init(target, name, res_dir, None, log_stats=log_stats,
trace_events=trace_events)
# Sanity check for task names rt-app uses pthread_setname_np(3) which
# limits the task name to 16 characters including the terminal '\0' and
# the rt-app suffix.
max_size = TASK_COMM_MAX_LEN - len('-XX-XXXX')
too_long_tids = sorted(
tid for tid in profile.keys()
if len(tid) > max_size
)
if too_long_tids:
raise ValueError(
f'Task names too long, please configure your tasks with names shorter than {max_size} characters: {too_long_tids}')
invalid_tids = sorted(
tid for tid in profile.keys()
if not re.match(cls.ALLOWED_TASK_NAME_REGEX, tid)
)
if invalid_tids:
raise ValueError(
f'Task names not matching "{cls.ALLOWED_TASK_NAME_REGEX}": {invalid_tids}')
rta_profile = {
# Keep a stable order for tasks definition, to get stable IDs
# allocated by rt-app
'tasks': OrderedDict(),
'global': {}
}
calibration = self._process_calibration(calibration)
global_conf = {
'default_policy': 'SCHED_OTHER',
'duration': -1 if not max_duration_s else max_duration_s,
'calibration': calibration,
# TODO: this can only be enabled when rt-app is running as root.
# unfortunately, that's currently decided when calling
# run(as_root=True), at which point we already generated and pushed
# the JSON
'lock_pages': False,
'log_size': 'file' if log_stats else 'disable',
'ftrace': ','.join(self.trace_events),
}
if max_duration_s:
logger.warning(f'Limiting workload duration to {max_duration_s} [s]')
if default_policy:
if default_policy in self.sched_policies:
global_conf['default_policy'] = f'SCHED_{default_policy}'
else:
raise ValueError(f'scheduling class {default_policy} not supported')
logger.info(f"Calibration value: {global_conf['calibration']}")
logger.info(f"Default policy: {global_conf['default_policy']}")
rta_profile['global'] = global_conf
# Setup tasks parameters
for tid, task in sorted(profile.items(), key=itemgetter(0)):
task_conf = {}
if not task.sched_policy:
task_conf['policy'] = global_conf['default_policy']
sched_descr = 'sched: using default policy'
else:
task_conf['policy'] = f'SCHED_{task.sched_policy}'
if task.priority is not None:
task_conf['prio'] = task.priority
sched_descr = f'sched: {task.sched_policy}'
logger.info('------------------------')
logger.info(f'task [{tid}], {sched_descr}')
task_conf['delay'] = int(task.delay_s * 1e6)
logger.info(f' | start delay: {task.delay_s:.6f} [s]')
task_conf['loop'] = task.loops
logger.info(f' | loops count: {task.loops}')
task_conf['phases'] = OrderedDict()
rta_profile['tasks'][tid] = task_conf
for pid, phase in enumerate(task.phases, start=1):
phase_name = f'phase_{pid:0>6}'
logger.info(f' + {phase_name}')
rta_profile['tasks'][tid]['phases'][phase_name] = phase.get_rtapp_repr(tid, plat_info=target.plat_info)
# Generate JSON configuration on local file
with open(self.local_json, 'w') as outfile:
json.dump(rta_profile, outfile, indent=4, separators=(',', ': '))
outfile.write('\n')
self._late_init(calibration=calibration,
tasks_names=list(profile.keys()))
return self
@classmethod
def process_template(cls, template, duration=None, pload=None, log_dir=None,
work_dir=None):
"""
Turn a raw string rt-app description into a JSON dict.
Also, process some tokens and replace them.
:param template: The raw string to process
:type template: str
:param duration: The value to replace ``__DURATION__`` with
:type duration: int
:param pload: The value to replace ``__PVALUE__`` with
:type pload: int or str
:param log_dir: The value to replace ``__LOGDIR__`` with
:type log_dir: str
:param work_dir: The value to replace ``__WORKDIR__`` with
:type work_dir: str
:returns: a JSON dict
"""
replacements = {
'__DURATION__': duration,
'__PVALUE__': pload,
'__LOGDIR__': log_dir,
'__WORKDIR__': work_dir,
}
json_str = template
for placeholder, value in replacements.items():
if placeholder in template and placeholder is None:
raise ValueError(f'Missing value for {placeholder} placeholder')
else:
json_str = json_str.replace(placeholder, json.dumps(value))
return json.loads(json_str)
@classmethod
def by_str(cls, target, str_conf, name=None, res_dir=None, max_duration_s=None,
calibration=None):
"""
Create an rt-app workload using a pure string description
:param str_conf: The raw string description. This must be a valid json
description, with the exception of some tokens (see
:meth:`process_template`) that will be replaced automagically.
:type str_conf: str
:param max_duration_s: Maximum duration of the workload.
:type max_duration_s: int
:param calibration: The calibration value to be used by rt-app. This can
be an integer value or a CPU string (e.g. "CPU0").
:type calibration: int or str
"""
self = cls.__new__(cls)
self._early_init(target, name, res_dir, None)
calibration = self._process_calibration(calibration)
json_conf = self.process_template(
str_conf, max_duration_s, calibration, self.run_dir, self.run_dir)
with open(self.local_json, 'w') as fh:
json.dump(json_conf, fh)
tasks_names = [tid for tid in json_conf['tasks']]
self._late_init(calibration=calibration, tasks_names=tasks_names)
return self
@classmethod
def _calibrate(cls, target, res_dir):
res_dir = res_dir if res_dir else target .get_res_dir(
"rta_calib", symlink=False
)
pload_regexp = re.compile(r'pLoad = ([0-9]+)ns')
pload = {}
logger = cls.get_logger()
# Create calibration task
if target.is_rooted:
max_rtprio = int(target.execute('ulimit -Hr').splitlines()[0])
logger.debug(f'Max RT prio: {max_rtprio}')
priority = max_rtprio + 1 if max_rtprio <= 10 else 10
sched_policy = 'FIFO'
else:
logger.warning('Will use default scheduler class instead of RT since the target is not rooted')
priority = None
sched_policy = None
for cpu in target.list_online_cpus():
logger.info(f'CPU{cpu} calibration...')
# RT-app will run a calibration for us, so we just need to
# run a dummy task and read the output
calib_task = Periodic(
duty_cycle_pct=100,
duration_s=0.001,
period_ms=1,
priority=priority,
sched_policy=sched_policy,
)
rta = cls.by_profile(target, name=f"rta_calib_cpu{cpu}",
profile={'task1': calib_task},
calibration=f"CPU{cpu}",
res_dir=res_dir)
with rta, target.freeze_userspace():
# Disable CPU capacities update, since that leads to infinite
# recursion
rta.run(as_root=target.is_rooted, update_cpu_capacities=False)
for line in rta.output.split('\n'):
pload_match = re.search(pload_regexp, line)
if pload_match is None:
continue
pload[cpu] = int(pload_match.group(1))
logger.debug(f'>>> CPU{cpu}: {pload[cpu]}')
# Avoid circular import issue
from lisa.platforms.platinfo import PlatformInfo
snippet_plat_info = PlatformInfo({
'rtapp': {
'calib': pload,
},
})
logger.info(f'Platform info rt-app calibration configuration:\n{snippet_plat_info.to_yaml_map_str()}')
plat_info = target.plat_info
# Sanity check calibration values for asymmetric systems if we have
# access to capacities
try:
orig_capacities = plat_info['cpu-capacities']['orig']
except KeyError:
return pload
capa_ploads = {
capacity: {cpu: pload[cpu] for cpu in cpus}
for capacity, cpus in group_by_value(orig_capacities).items()
}
# Find the min pload per capacity level, i.e. the fastest detected CPU.
# It is more likely to represent the right pload, as it has suffered
# from less IRQ slowdown or similar disturbances that might be random.
capa_pload = {
capacity: min(ploads.values())
for capacity, ploads in capa_ploads.items()
}
# Sort by capacity
capa_pload_list = sorted(capa_pload.items())
# unzip the list of tuples
_, pload_list = zip(*capa_pload_list)
# If sorting according to capa was not equivalent to reverse sorting
# according to pload (small pload=fast cpu)
if list(pload_list) != sorted(pload_list, reverse=True):
raise CalibrationError('Calibration values reports big cores less capable than LITTLE cores')
# Check that the CPU capacities seen by rt-app are similar to the one
# the kernel uses
orig_capacities = plat_info['cpu-capacities']['orig']
true_capacities = cls.get_cpu_capacities_from_calibrations(orig_capacities, pload)
cls.warn_capacities_mismatch(orig_capacities, true_capacities)
return pload
@classmethod
def warn_capacities_mismatch(cls, orig_capacities, new_capacities):
"""
Compare ``orig_capacities`` and ``new_capacities`` and log warnings if
they are not consistent.
:param orig_capacities: Original CPU capacities, as a map of CPU to capacity.
:type orig_capacities: dict(int, int)
:param new_capacities: New CPU capacities, as a map of CPU to capacity.
:type new_capacities: dict(int, int)
"""
logger = cls.get_logger()
capacities = {
cpu: (orig_capacities[cpu], new_capacities[cpu])
for cpu in orig_capacities.keys() & new_capacities.keys()
}
logger.info(f'CPU capacities according to rt-app workload: {new_capacities}')
capa_factors_pct = {
cpu: new / orig * 100
for cpu, (orig, new) in capacities.items()
}
dispersion_pct = max(abs(100 - factor) for factor in capa_factors_pct.values())
if dispersion_pct > 2:
logger.warning(f'The calibration values are not inversely proportional to the CPU capacities, the duty cycles will be up to {dispersion_pct:.2f}% off on some CPUs: {capa_factors_pct}')
if dispersion_pct > 20:
logger.warning(f'The calibration values are not inversely proportional to the CPU capacities. Either rt-app calibration failed, or the rt-app busy loops has a very different instruction mix compared to the workload used to establish the CPU capacities: {capa_factors_pct}')
# Map of CPUs X to list of CPUs Ys that are faster than it although CPUs
# of Ys have a smaller orig capacity than X
if len(capacities) > 1:
faster_than_map = {
cpu1: sorted(
cpu2
for cpu2, (orig2, new2) in capacities.items()
if new2 > new1 and orig2 < orig1
)
for cpu1, (orig1, new1) in capacities.items()
}
else:
faster_than_map = {}
# Remove empty lists
faster_than_map = {
cpu: faster_cpus
for cpu, faster_cpus in faster_than_map.items()
if faster_cpus
}
if faster_than_map:
raise CalibrationError(f'Some CPUs of higher capacities are slower than other CPUs of smaller capacities: {faster_than_map}')
@classmethod
def get_cpu_capacities_from_calibrations(cls, orig_capacities, calibrations):
"""
Compute the CPU capacities out of the rt-app calibration values.
:returns: A mapping of CPU to capacity.
:param orig_capacities: Original capacities as a mapping of CPU ID to
capacity.
:type orig_capacities: dict(int, int)
:param calibrations: Mapping of CPU to pload value.
:type calibrations: dict
"""
# calibration values are inversely proportional to the CPU capacities
inverse_calib = {cpu: 1 / calib for cpu, calib in calibrations.items()}
def compute_capa(cpu):
# True CPU capacity for the rt-app workload, rather than for the
# whatever workload was used to compute the CPU capacities exposed by
# the kernel
return inverse_calib[cpu] / max(inverse_calib.values()) * PELT_SCALE
rtapp_capacities = {cpu: compute_capa(cpu) for cpu in calibrations.keys()}
# Average in a capacity class, since the kernel will only use one
# value for the whole class anyway
new_capacities = {}
# Group the CPUs by original capacity
for capa, capa_class in group_by_value(orig_capacities).items():
avg_capa = mean(
capa
for cpu, capa in rtapp_capacities.items()
if cpu in capa_class
)
new_capacities.update({cpu: avg_capa for cpu in capa_class})
# Make sure that the max cap is 1024 and that we use integer values
new_max_cap = max(new_capacities.values())
new_capacities = {
# Make sure the max cap will be 1024 and not 1023 due to rounding
# errors
cpu: math.ceil(capa / new_max_cap * 1024)
for cpu, capa in new_capacities.items()
}
return new_capacities
@classmethod
def get_cpu_calibrations(cls, target, res_dir=None):
"""
Get the rt-ap calibration value for all CPUs.
:param target: Target to run calibration on.
:type target: lisa.target.Target
:returns: Dict mapping CPU numbers to rt-app calibration values.
"""
if not target.is_module_available('cpufreq'):
cls.get_logger().warning(
'cpufreq module not loaded, skipping setting frequency to max')
cm = nullcontext()
else:
cm = target.cpufreq.use_governor('performance')
with cm, target.disable_idle_states():
return cls._calibrate(target, res_dir)
@classmethod
def _compute_task_map(cls, trace, names):
prefix_regexps = {
prefix: re.compile(rf"^{re.escape(prefix)}(-[0-9]+)*$")
for prefix in names
}
task_map = {
prefix: sorted(
task_id
for task_id in trace.task_ids
if re.match(regexp, task_id.comm)
)
for prefix, regexp in prefix_regexps.items()
}
missing = sorted(prefix for prefix, task_ids in task_map.items() if not task_ids)
if missing:
raise RuntimeError(f"Missing tasks matching the following rt-app profile names: {', '.join(missing)}")
return task_map
# Mapping of Trace objects to their task map.
# We don't want to keep traces alive just for this cache, so we use weak
# references for the keys.
_traces_task_map = weakref.WeakKeyDictionary()
@classmethod
def resolve_trace_task_names(cls, trace, names):
"""
Translate an RTA profile task name to a list of
:class:`lisa.trace.TaskID` as found in a :class:`lisa.trace.Trace`.
:returns: A dictionnary of ``rt-app`` profile names to list of
:class:`lisa.trace.TaskID` The list will contain more than one item
if the task forked.
:param trace: Trace to look at.
:type trace: lisa.trace.Trace
:param names: ``rt-app`` task names as specified in profile keys
:type names: list(str)
"""
task_map = cls._traces_task_map.setdefault(trace, {})
# Update with the names that have not been discovered yet
not_computed_yet = set(names) - task_map.keys()
if not_computed_yet:
task_map.update(cls._compute_task_map(trace, not_computed_yet))
# Only return what we were asked for, so the client code does not
# accidentally starts depending on whatever was requested in earlier
# calls
return {
name: task_ids
for name, task_ids in task_map.items()
if name in names
}
def get_trace_task_names(self, trace):
"""
Get a dictionnary of :class:`lisa.trace.TaskID` used in the given trace
for this task.
"""
return self.resolve_trace_task_names(trace, self.tasks)
class Phase(Loggable):
"""
Descriptor for an rt-app load phase
:param duration_s: the phase duration in [s].
:type duration_s: float
:param period_ms: the phase period in [ms].
:type period_ms: float
:param duty_cycle_pct: the generated load in percents.
:type duty_cycle_pct: float
:param cpus: the CPUs on which task execution is restricted during this phase.
If unspecified, that phase will be allowed to run on any CPU,
regardless of the affinity of the previous phases.
:type cpus: list(int) or None
:param barrier_after: if provided, the name of the barrier to sync against
when reaching the end of this phase. Currently only
supported when duty_cycle_pct=100
:type barrier_after: str
:param uclamp_min: the task uclamp.min value to set for the task for the
duration of the phase.
:type uclamp_min: int
:param uclamp_max: the task uclamp.max value to set for the task for the
duration of the phase.
:type uclamp_max: int
:param numa_nodes_membind: the list of NUMA Nodes.
Task will only allocate memory from these nodes during this phase.
If unspecified, that phase will be allowed to allocate memory from any
NUMA node, regardless of the previous phase settings.
:type numa_nodes_membind: list(int) or None
"""
def __init__(self, duration_s, period_ms, duty_cycle_pct, cpus=None, barrier_after=None,
uclamp_min=None, uclamp_max=None, numa_nodes_membind=None):
if barrier_after and duty_cycle_pct != 100:
# This could be implemented but currently don't foresee any use.
raise ValueError('Barriers only supported when duty_cycle_pct=100')
self.duration_s = duration_s
self.period_ms = period_ms
self.duty_cycle_pct = duty_cycle_pct
self.cpus = cpus
self.barrier_after = barrier_after
self.uclamp_min = uclamp_min
self.uclamp_max = uclamp_max
self.numa_nodes_membind = numa_nodes_membind
def get_rtapp_repr(self, task_name, plat_info):
"""
Get a dictionnary representation of the phase as expected by rt-app
:param task_name: Name of the phase's task (needed for timers)
:type task_name: str
:param plat_info: Platform info of the target that is going to be used
to run the phase.
:type plat_info: lisa.platforms.platinfo.PlatformInfo
:returns: OrderedDict
"""
logger = self.get_logger()
phase = OrderedDict()
# Convert time parameters to integer [us] units
duration = int(self.duration_s * 1e6)
# A duty-cycle of 0[%] translates to a 'sleep' phase
if self.duty_cycle_pct == 0:
logger.info(' | sleep {:.6f} [s]'.format(duration / 1e6))
phase['loop'] = 1
phase['sleep'] = duration
# A duty-cycle of 100[%] translates to a 'run-only' phase
elif self.duty_cycle_pct == 100:
logger.info(' | batch {:.6f} [s]'.format(duration / 1e6))
phase['loop'] = 1
phase['run'] = duration
if self.barrier_after:
phase['barrier'] = self.barrier_after
# A certain number of loops is requires to generate the
# proper load
else:
period = int(self.period_ms * 1e3)
cloops = -1
if duration >= 0:
cloops = duration // period
sleep_time = period * (100 - self.duty_cycle_pct) // 100
# rtapp fails to handle floating values correctly
# https://github.com/scheduler-tools/rt-app/issues/82
running_time = int(period - sleep_time)
logger.info(' | duration {:.6f} [s] ({} loops)'.format(
duration / 1e6, cloops))
logger.info(f' | period {int(period):>3} [us], duty_cycle {self.duty_cycle_pct:>3,.2f} %')
logger.info(f' | run_time {int(running_time):>6} [us], sleep_time {int(sleep_time):>6} [us]')
phase['loop'] = cloops
phase['run'] = running_time
phase['timer'] = {'ref': task_name, 'period': period}
# Set the affinity to all CPUs in the system, i.e. do not set any affinity
if self.cpus is None:
cpus = list(range(plat_info['cpus-count']))
else:
cpus = self.cpus
phase['cpus'] = cpus
if self.uclamp_min is not None:
phase['util_min'] = self.uclamp_min
logger.info(f' | util_min {self.uclamp_min:>7}')
if self.uclamp_max is not None:
phase['util_max'] = self.uclamp_max
logger.info(f' | util_max {self.uclamp_max:>7}')
# Allow memory allocation from all NUMA nodes in the system
if self.numa_nodes_membind is None:
nodes_membind = list(range(plat_info['numa-nodes-count']))
else:
nodes_membind = self.numa_nodes_membind
phase['nodes_membind'] = nodes_membind
return phase
class RTATask:
"""
Base class for conveniently constructing params to :meth:`RTA.by_profile`
:param delay_s: the delay in seconds before starting.
:type delay_s: float
:param loops: Number of times to repeat the described task (including
initial delay). -1 indicates infinite looping
:type loops: int
:param sched_policy: the scheduler policy for this task. Defaults to
``SCHED_OTHER``, see :manpage:`sched` for information on scheduler policies.
:type sched_policy: str or None
:param priority: the scheduler priority for this task. See :manpage:`sched`
for information on scheduler priorities.
:type priority: int or None
This class represents an rt-app task which may contain multiple :class:`Phase`.
It implements ``__add__`` so that using ``+`` on two tasks concatenates their
phases. For example ``Ramp() + Periodic()`` would yield an ``RTATask`` that
executes the default phases for :class:`Ramp` followed by the default phases for
:class:`Periodic`.
"""
def __init__(self, delay_s=0, loops=1, sched_policy=None, priority=None):
self.delay_s = delay_s
self.loops = loops
if isinstance(sched_policy, str):
sched_policy = sched_policy.upper()
if sched_policy not in RTA.sched_policies:
raise ValueError(f'scheduling class {sched_policy} not supported')
self.sched_policy = sched_policy
self.priority = priority
self.phases = []
def __add__(self, task):
# Do not modify the original object which might still be used for other
# purposes
new = copy.deepcopy(self)
# Piggy back on the __iadd__ implementation
new += task
return new
def __iadd__(self, task):
if task.delay_s:
# This won't work, because rt-app's "delay" field is per-task and
# not per-phase. We might be able to implement it by adding a
# "sleep" event here, but let's not bother unless such a need
# arises.
raise ValueError("Can't compose rt-app tasks "
"when the second has nonzero 'delay_s'")
self.phases.extend(task.phases)
return self
class Ramp(RTATask):
"""
Configure a ramp load.
This class defines a task which load is a ramp with a configured number
of steps according to the input parameters.
:param start_pct: the initial load percentage.
:type start_pct: float
:param end_pct: the final load percentage.
:type end_pct: float
:param delta_pct: the load increase/decrease at each step, in percentage
points.
:type delta_pct: float
:param time_s: the duration in seconds of each load step.
:type time_s: float
:param period_ms: the period used to define the load in [ms].
:type period_ms: float
.. seealso:: See :class:`RTATask` for the documentation of the following
parameters:
* **delay_s**
* **loops**
* **sched_policy**
* **priority**
.. seealso:: See :class:`Phase` for the documentation of the following
parameters:
* **cpus**
* **uclamp_min**
* **uclamp_max**
* **numa_nodes_membind**
"""
def __init__(self, start_pct=0, end_pct=100, delta_pct=10, time_s=1,
period_ms=100, delay_s=0, loops=1, sched_policy=None,
priority=None, cpus=None, uclamp_min=None, uclamp_max=None,
numa_nodes_membind=None):
super().__init__(delay_s, loops, sched_policy, priority)
if not (0 <= start_pct <= 100 and 0 <= end_pct <= 100):
raise ValueError('start_pct and end_pct must be in [0..100] range')
# Make sure the delta goes in the right direction
sign = +1 if start_pct <= end_pct else -1
delta_pct = sign * abs(delta_pct)
steps = list(value_range(start_pct, end_pct, delta_pct, inclusive=True))
# clip the last step
steps[-1] = end_pct
phases = []
for load in steps:
if load == 0:
phase = Phase(time_s, 0, 0, cpus, uclamp_min=uclamp_min,
uclamp_max=uclamp_max, numa_nodes_membind=numa_nodes_membind)
else:
phase = Phase(time_s, period_ms, load, cpus,
uclamp_min=uclamp_min, uclamp_max=uclamp_max,
numa_nodes_membind=numa_nodes_membind)
phases.append(phase)
self.phases = phases
class Step(Ramp):
"""
Configure a step load.
This class defines a task which load is a step with a configured initial and
final load. Using the ``loops`` param, this can be used to create a workload
that alternates between two load values.
:param start_pct: the initial load percentage.
:type start_pct: float
:param end_pct: the final load percentage.
:type end_pct: float
:param time_s: the duration in seconds of each load step.
:type time_s: float
:param period_ms: the period used to define the load in [ms].
:type period_ms: float
.. seealso:: See :class:`RTATask` for the documentation of the following
parameters:
* **delay_s**
* **loops**
* **sched_policy**
* **priority**
.. seealso:: See :class:`Phase` for the documentation of the following
parameters:
* **cpus**
* **uclamp_min**
* **uclamp_max**
* **numa_nodes_membind**
"""
def __init__(self, start_pct=0, end_pct=100, time_s=1, period_ms=100,
delay_s=0, loops=1, sched_policy=None, priority=None, cpus=None,
uclamp_min=None, uclamp_max=None, numa_nodes_membind=None):
delta_pct = abs(end_pct - start_pct)
super().__init__(start_pct, end_pct, delta_pct, time_s,
period_ms, delay_s, loops, sched_policy,
priority, cpus, uclamp_min, uclamp_max, numa_nodes_membind)
class Pulse(RTATask):
"""
Configure a pulse load.
This class defines a task which load is a pulse with a configured
initial and final load.
The main difference with the 'step' class is that a pulse workload is
by definition a 'step down', i.e. the workload switch from an initial
load to a final one which is always lower than the initial one.
Moreover, a pulse load does not generate a sleep phase in case of 0[%]
load, i.e. the task ends as soon as the non null initial load has
completed.
:param start_pct: the initial load percentage.
:type start_pct: float
:param end_pct: the final load percentage.
:type end_pct: float
:param time_s: the duration in seconds of each load step.
:type time_s: float
:param period_ms: the period used to define the load in [ms].
:type period_ms: float
.. seealso:: See :class:`RTATask` for the documentation of the following
parameters:
* **delay_s**
* **loops**
* **sched_policy**
* **priority**
.. seealso:: See :class:`Phase` for the documentation of the following
parameters:
* **cpus**
* **uclamp_min**
* **uclamp_max**
* **numa_nodes_membind**
"""
def __init__(self, start_pct=100, end_pct=0, time_s=1, period_ms=100,
delay_s=0, loops=1, sched_policy=None, priority=None, cpus=None,
uclamp_min=None, uclamp_max=None, numa_nodes_membind=None):
super().__init__(delay_s, loops, sched_policy, priority)
if end_pct > start_pct:
raise ValueError('end_pct must be lower than start_pct')
if not (0 <= start_pct <= 100 and 0 <= end_pct <= 100):
raise ValueError('end_pct and start_pct must be in [0..100] range')
loads = [start_pct]
if end_pct:
loads += [end_pct]
self.phases = [
Phase(time_s, period_ms, load, cpus, uclamp_min=uclamp_min,
uclamp_max=uclamp_max, numa_nodes_membind=numa_nodes_membind)
for load in loads
]
class Periodic(Pulse):
"""
Configure a periodic load. This is the simplest type of RTA task.
This class defines a task which load is periodic with a configured
period and duty-cycle.
:param duty_cycle_pct: the generated load in percents.
:type duty_cycle_pct: float
:param duration_s: the phase duration in [s].
:type duration_s: float
:param period_ms: the period used to define the load in [ms].
:type period_ms: float
.. seealso:: See :class:`RTATask` for the documentation of the following
parameters:
* **delay_s**
* **loops**
* **sched_policy**
* **priority**
.. seealso:: See :class:`Phase` for the documentation of the following
parameters:
* **cpus**
* **uclamp_min**
* **uclamp_max**
* **numa_nodes_membind**
"""
def __init__(self, duty_cycle_pct=50, duration_s=1, period_ms=100,
delay_s=0, sched_policy=None, priority=None, cpus=None,
uclamp_min=None, uclamp_max=None, numa_nodes_membind=None):
super().__init__(duty_cycle_pct, 0, duration_s,
period_ms, delay_s, 1, sched_policy,
priority, cpus,
uclamp_min=uclamp_min,
uclamp_max=uclamp_max,
numa_nodes_membind=numa_nodes_membind)
class RunAndSync(RTATask):
"""
Configure a task that runs 100% then waits on a barrier
:param barrier: name of barrier to wait for. Sleeps until any other tasks
that refer to this barrier have reached the barrier too.
:type barrier: str
:param time_s: time to run for in [s]
:type time_s: float
.. seealso:: See :class:`RTATask` for the documentation of the following
parameters:
* **delay_s**
* **loops**
* **sched_policy**
* **priority**
.. seealso:: See :class:`Phase` for the documentation of the following
parameters:
* **cpus**
* **uclamp_min**
* **uclamp_max**
* **numa_nodes_membind**
"""
def __init__(self, barrier, time_s=1, delay_s=0, loops=1, sched_policy=None,
priority=None, cpus=None, uclamp_min=None, uclamp_max=None, numa_nodes_membind=None):
super().__init__(delay_s, loops, sched_policy, priority)
# This should translate into a phase containing a 'run' event and a
# 'barrier' event
self.phases = [Phase(time_s, None, 100, cpus, barrier_after=barrier,
uclamp_min=uclamp_min, uclamp_max=uclamp_max, numa_nodes_membind=numa_nodes_membind)]
# vim :set tabstop=4 shiftwidth=4 textwidth=80 expandtab
|
the-stack_106_13146
|
#!/usr/bin/env python
#
# Copyright (c) 2015 Pavel Lazar pavel.lazar (at) gmail.com
#
# The Software is provided WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED.
#####################################################################
import re
import socket
import collections
import time
from control_exceptions import (UnknownHandlerOperation, ControlError, ControlSyntaxError, HandlerError,
NoRouterInstalledError, NoSuchElementError, NoSuchHandlerError, PermissionDeniedError,
UnimplementedCommandError, DataTooBigError)
class ResponseCodes:
OK = 200
OK_BUT_WITH_WARNINGS = 220
SYNTAX_ERROR = 500
UNIMPLEMENTED_COMMAND = 501
NO_SUCH_ELEMENT = 510
NO_SUCH_HANDLER = 511
HANDLER_ERROR = 520
DATA_TOO_BIG = 521
PERMISSION_DENIED = 530
NO_ROUTER_INSTALLED = 540
class Commands:
READ = 'READ'
READ_DATA = 'READDATA'
READ_UNTIL = 'READUNTIL'
WRITE = 'WRITE'
WRITE_DATA = 'WRITEDATA'
WRITE_UNTIL = 'WRITEUNTIL'
CHECK_READ = 'CHECKREAD'
CHECK_WRITE = 'CHECKWRITE'
LLRPC = 'LLRPC'
QUIT = 'QUIT'
_EXCPTIONS_CODE_MAPPING = {
ResponseCodes.SYNTAX_ERROR: ControlSyntaxError,
ResponseCodes.UNIMPLEMENTED_COMMAND: UnimplementedCommandError,
ResponseCodes.NO_SUCH_ELEMENT: NoSuchElementError,
ResponseCodes.NO_SUCH_HANDLER: NoSuchHandlerError,
ResponseCodes.HANDLER_ERROR: HandlerError,
ResponseCodes.DATA_TOO_BIG: DataTooBigError,
ResponseCodes.PERMISSION_DENIED: PermissionDeniedError,
ResponseCodes.NO_ROUTER_INSTALLED: NoRouterInstalledError
}
CONNECT_RETRIES = 50
CHATTER_SOCKET_REGEXP = re.compile(r'ChatterSocket\(.*\)')
CONTROL_SOCKET_REGEXP = re.compile(r'ControlSocket\(.*\)')
class ClickControlClient(object):
def __init__(self):
self._socket = None
self.cotrol_socket_element_name = None
self.protocol_version = None
self._buffer = ''
self.family = None
self.address = None
self._socket = None
self.connected = False
def connect(self, address, family=socket.AF_INET):
self.family = family
self.address = address
self._socket = socket.socket(family=self.family)
self._socket.connect(self.address)
self.connected = True
self._read_and_parse_banner()
def _read_and_parse_banner(self):
banner = self._readline()
self.cotrol_socket_element_name, self.protocol_version = banner.split('/')
def close(self):
if self.connected:
# self._write_line("QUIT")
self._socket.close()
self.connected = False
self._socket = None
def engine_version(self):
return self._read_global('version')
def loaded_packages(self):
packages = self._read_global('packages').strip()
if packages:
return packages.split('\n')
else:
return []
def load_package(self, package):
old_config = self.running_config()
new_config = 'require(package "{package}");\n'.format(package=package) + old_config
self.hotswap(new_config)
def supported_elements(self):
return self._read_global('classes').strip().split('\n')
def running_config(self):
return self._read_global('config')
def hotswap(self, new_config):
new_config = self._migrate_control_elements(new_config)
self._write_global('hotconfig', data=new_config)
for _ in xrange(CONNECT_RETRIES):
try:
self.close()
self.connect(self.address, self.family)
# try to pull the config again to make sure we are reconnected
self.running_config()
break
except socket.error:
time.sleep(0.1)
def elements_names(self):
raw = self._read_global('list')
# The first line is the number of elements
elements = raw.strip().split('\n')[1:]
return elements
def element_handlers(self, element_name):
handlers = self._element_handlers_with_attributes(element_name)
# each handler has the form (<handler_name>, <rw attributes>")
# we need only the name
return [name for name, attribute in handlers]
def _element_handlers_with_attributes(self, element_name):
handlers = self.read_handler(element_name, 'handlers').strip().split('\n')
# each handler has the form "<handler_name> <rw attributes>"
return [tuple(handler.strip().split('\t')) for handler in handlers]
def element_class(self, element_name):
return self.read_handler(element_name, 'class')
def element_config(self, element_name):
return self.read_handler(element_name, 'config')
def element_ports(self, element_name):
return self.read_handler(element_name, 'ports')
def element_input_counts(self, element_name):
return self.read_handler(element_name, 'icounts').strip().split('\n')
def element_output_counts(self, element_name):
return self.read_handler(element_name, 'ocounts').strip().split('\n')
def is_readable_handler(self, element_name, handler_name):
cmd = self._build_cmd(Commands.CHECK_READ, element_name, handler_name, '')
self._write_line(cmd)
response_code, response_msg = self._read_response()
return response_code == ResponseCodes.OK
def is_writeable_handler(self, element_name, handler_name):
cmd = self._build_cmd(Commands.CHECK_WRITE, element_name, handler_name, '')
self._write_line(cmd)
response_code, response_msg = self._read_response()
return response_code == ResponseCodes.OK
def write_handler(self, element_name, handler_name, params='', data=''):
if data:
cmd = self._build_cmd(Commands.WRITE_DATA, element_name, handler_name, str(len(data)))
else:
cmd = self._build_cmd(Commands.WRITE, element_name, handler_name, params)
self._write_line(cmd)
if data:
self._write_raw(data)
response_code, response_code_msg = self._read_response()
if response_code not in (ResponseCodes.OK, ResponseCodes.OK_BUT_WITH_WARNINGS):
self._raise_exception(element_name, handler_name, response_code, response_code_msg)
return response_code
def read_handler(self, element_name, handler_name, params=''):
cmd = self._build_cmd(Commands.READ, element_name, handler_name, params)
self._write_line(cmd)
response_code, response_code_msg = self._read_response()
if response_code not in (ResponseCodes.OK, ResponseCodes.OK_BUT_WITH_WARNINGS):
self._raise_exception(element_name, handler_name, response_code, response_code_msg)
data_size = self._read_data_size()
data = self._read_raw(data_size)
return data
def operations_sequence(self, operations, preserve_order=False):
# Currently there is no 'smart' way in click of doing a bunch of read or write calls
# so we just do them one after the other using the basic
results = collections.OrderedDict()
for operation in operations:
operation_type = operation['type']
element_name = operation['element_name']
handler_name = operation['handler_name']
params = operation.get('params', '')
key = self._build_full_handler_name(element_name, handler_name)
if operation_type == 'READ':
operation_function = self.read_handler
elif operation_type == 'WRITE':
operation_function = self.write_handler
else:
operation_function = lambda en, hn, pa: UnknownHandlerOperation(
"Unknown operation: %s" % operation_type)
try:
results[key] = operation_function(element_name, handler_name, params)
except ControlError as e:
results[key] = e
return results
def _read_global(self, handler_name, params=''):
return self.read_handler(None, handler_name, params)
def _write_global(self, handler_name, params='', data=''):
self.write_handler(None, handler_name, params, data)
def _config_requirements(self):
reqs = self._read_global('requirements').strip()
if reqs:
return reqs.split('\n')
else:
return []
def _raise_exception(self, element_name, handler_name, response_code, response_code_msg):
exception = _EXCPTIONS_CODE_MAPPING[response_code]
exception_msg = self._build_read_exception_message(element_name, handler_name, response_code_msg)
raise exception(exception_msg)
def _build_read_exception_message(self, element_name, handler_name, response_code_msg):
handler = self._build_full_handler_name(element_name, handler_name)
return 'Error reading {handler}: {msg}'.format(handler=handler, msg=response_code_msg)
def _build_cmd(self, command, element_name, handler_name, params):
handler = self._build_full_handler_name(element_name, handler_name)
cmd = '{cmd} {handler}'.format(cmd=command, handler=handler)
if params:
cmd += ' {params}'.format(params=params)
return cmd
def _build_full_handler_name(self, element_name, handler_name):
if element_name:
handler = "{element}.{handler}".format(element=element_name, handler=handler_name)
else:
handler = handler_name
return handler
def _read_response(self):
last_line = self._readline()
response = last_line[3:]
while last_line[3] == '-':
last_line = self._readline()
response += last_line[3:]
response_code = int(last_line[:3])
return response_code, response
def _read_data_size(self):
data_size_line = self._readline()
return int(data_size_line.split(' ')[1])
def _read_raw(self, length):
while len(self._buffer) < length:
self._buffer += self._socket.recv(length - len(self._buffer))
data, self._buffer = self._buffer[:length], self._buffer[length:]
return data
def _readline(self, delim='\r\n'):
return self._read_until(delim)
def _read_until(self, end='\r\n'):
end_index = self._buffer.find(end)
while end_index == -1:
data = self._socket.recv(2048)
self._buffer += data
end_index = self._buffer.find(end)
line, self._buffer = self._buffer[:end_index], self._buffer[end_index + len(end):]
return line
def _write_line(self, data, delim='\r\n'):
self._write_raw("{data}{delim}".format(data=data, delim=delim))
def _write_raw(self, data):
total_length = len(data)
offset = 0
while offset < total_length:
offset += self._socket.send(data[offset:])
def _migrate_control_elements(self, new_config):
old_config = self.running_config()
old_control_socket = CONTROL_SOCKET_REGEXP.findall(old_config)
old_chatter_socket = CHATTER_SOCKET_REGEXP.findall(old_config)
new_control_socket = CONTROL_SOCKET_REGEXP.findall(new_config)
new_chatter_socket = CHATTER_SOCKET_REGEXP.findall(new_config)
if not new_chatter_socket and old_chatter_socket:
# we add the old ChatterSocket only if it is not present in the new config but was in the old
chatter_socket = old_chatter_socket[0] + ';\n'
new_config = chatter_socket + new_config
if not new_control_socket and old_control_socket:
# we add the old ControlSocket only if it is not present in the new config but was in the old
control_socket = old_control_socket[0] + ';\n'
new_config = control_socket + new_config
return new_config
if __name__ == "__main__":
cs = ClickControlClient()
cs.connect(("127.0.0.1", 9000))
print("Click version: {version}".format(version=cs.engine_version()))
print(cs.read_handler(None, 'handlers'))
print("Packages:{packages}".format(packages=cs.loaded_packages()))
print("Supported elements: {elements}".format(elements=cs.supported_elements()))
print('Router config:\n{config}'.format(config=cs.running_config()))
for element in cs.elements_names():
s = "%s\n" % element
for handler_name, handler_attr in cs.element_handlers(element):
if 'r' in handler_attr and handler_name != 'handlers':
handler_value = cs.read_handler(element, handler_name)
s += "\t%s: %s\n" % (handler_name, repr(handler_value))
print(s)
|
the-stack_106_13147
|
import torch.nn as nn
import torch
import numpy as np
import math
from models.Self_Attn import buildSelf_Attn
class Fir_enc(nn.Module):
def __init__(self, input_size,hidden_size,n_layers,dropout,bidirectional,duration,config):
super(Fir_enc, self).__init__()
self.v2h=nn.Linear(input_size,hidden_size)
self.duration =duration
self.lstm = nn.LSTM(
input_size=hidden_size,
hidden_size=hidden_size,
num_layers=n_layers,
batch_first=True,
dropout=dropout,
bidirectional=bidirectional
)
self.attn = buildSelf_Attn(config)
self._init_hidden()
def _init_hidden(self):
nn.init.xavier_normal_(self.v2h.weight)
def forward(self, inputs,input_lengths):
assert inputs.dim() == 3
inputs=self.v2h(inputs)
vid_duration = inputs.shape[1]
ret = []
res_o=[]
for i in range(0, (vid_duration - self.duration), int(self.duration / 2)):
tmp_li = []
for id in range(self.duration):
tmp_li.append(i + id)
ret.append(torch.index_select(inputs, 1, torch.LongTensor(tmp_li).cuda()))
for i in range(len(ret)):
hidden,_ = self.lstm(ret[i])
context = self.attn(hidden)
res_o.append(context)
# res_o.append(self.lstm(ret[i])[1][0][2:,:,:])
inputs = torch.stack(res_o,1)
'''
没有self_attn的代码
# inputs = torch.stack(res_o, 2)
# inputs = torch.mean(inputs,0)
'''
# print(input_lengths.sub(self.duration))
# print(torch.ceil(torch.div(input_lengths.sub(self.duration).float(),4.0)).long())
# print(torch.ceil(input_lengths.sub(self.duration)/torch.tensor([2]).cuda()))
#length = math.ceil(float(video_duration-duration)/duration/2))
input_lengths = torch.ceil(torch.div(input_lengths.sub(self.duration).float(),float(self.duration/2))).long()
return inputs, input_lengths
def buildFir_enc(config):
if config.type == 'lstm':
return Fir_enc(
input_size=config.input_size,
hidden_size=config.hidden_size,
n_layers=config.n_layers,
dropout=config.dropout_p,
bidirectional=config.bidirectional,
duration=config.duration,
config = config
)
else:
raise NotImplementedError
|
the-stack_106_13148
|
import string
class MyTemplate(string.Template):
delimiter = '%'
idpattern = '[a-z]+_[a-z]+'
template_text = '''
Delimiter : %%
Replaced : %with_underscore
Ignored : %notunderscored
'''
d = {
'with_underscore': 'replaced',
'notunderscored': 'not replaced',
}
t = MyTemplate(template_text)
print('Modified ID pattern:')
print(t.safe_substitute(d))
|
the-stack_106_13149
|
"""The tests for the GeoNet NZ Quakes Feed integration."""
import datetime
from asynctest import patch, CoroutineMock
from homeassistant.components import geonetnz_quakes
from homeassistant.components.geo_location import ATTR_SOURCE
from homeassistant.components.geonetnz_quakes import DEFAULT_SCAN_INTERVAL
from homeassistant.components.geonetnz_quakes.geo_location import (
ATTR_EXTERNAL_ID,
ATTR_MAGNITUDE,
ATTR_LOCALITY,
ATTR_MMI,
ATTR_DEPTH,
ATTR_QUALITY,
)
from homeassistant.const import (
EVENT_HOMEASSISTANT_START,
CONF_RADIUS,
ATTR_LATITUDE,
ATTR_LONGITUDE,
ATTR_FRIENDLY_NAME,
ATTR_UNIT_OF_MEASUREMENT,
ATTR_ATTRIBUTION,
ATTR_TIME,
ATTR_ICON,
)
from homeassistant.setup import async_setup_component
from homeassistant.util.unit_system import IMPERIAL_SYSTEM
from tests.common import async_fire_time_changed
import homeassistant.util.dt as dt_util
from tests.components.geonetnz_quakes import _generate_mock_feed_entry
CONFIG = {geonetnz_quakes.DOMAIN: {CONF_RADIUS: 200}}
async def test_setup(hass):
"""Test the general setup of the integration."""
# Set up some mock feed entries for this test.
mock_entry_1 = _generate_mock_feed_entry(
"1234",
"Title 1",
15.5,
(38.0, -3.0),
locality="Locality 1",
attribution="Attribution 1",
time=datetime.datetime(2018, 9, 22, 8, 0, tzinfo=datetime.timezone.utc),
magnitude=5.7,
mmi=5,
depth=10.5,
quality="best",
)
mock_entry_2 = _generate_mock_feed_entry(
"2345", "Title 2", 20.5, (38.1, -3.1), magnitude=4.6
)
mock_entry_3 = _generate_mock_feed_entry(
"3456", "Title 3", 25.5, (38.2, -3.2), locality="Locality 3"
)
mock_entry_4 = _generate_mock_feed_entry("4567", "Title 4", 12.5, (38.3, -3.3))
# Patching 'utcnow' to gain more control over the timed update.
utcnow = dt_util.utcnow()
with patch("homeassistant.util.dt.utcnow", return_value=utcnow), patch(
"aio_geojson_client.feed.GeoJsonFeed.update", new_callable=CoroutineMock
) as mock_feed_update:
mock_feed_update.return_value = "OK", [mock_entry_1, mock_entry_2, mock_entry_3]
assert await async_setup_component(hass, geonetnz_quakes.DOMAIN, CONFIG)
# Artificially trigger update and collect events.
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
all_states = hass.states.async_all()
# 3 geolocation and 1 sensor entities
assert len(all_states) == 4
state = hass.states.get("geo_location.title_1")
assert state is not None
assert state.name == "Title 1"
assert state.attributes == {
ATTR_EXTERNAL_ID: "1234",
ATTR_LATITUDE: 38.0,
ATTR_LONGITUDE: -3.0,
ATTR_FRIENDLY_NAME: "Title 1",
ATTR_LOCALITY: "Locality 1",
ATTR_ATTRIBUTION: "Attribution 1",
ATTR_TIME: datetime.datetime(
2018, 9, 22, 8, 0, tzinfo=datetime.timezone.utc
),
ATTR_MAGNITUDE: 5.7,
ATTR_DEPTH: 10.5,
ATTR_MMI: 5,
ATTR_QUALITY: "best",
ATTR_UNIT_OF_MEASUREMENT: "km",
ATTR_SOURCE: "geonetnz_quakes",
ATTR_ICON: "mdi:pulse",
}
assert float(state.state) == 15.5
state = hass.states.get("geo_location.title_2")
assert state is not None
assert state.name == "Title 2"
assert state.attributes == {
ATTR_EXTERNAL_ID: "2345",
ATTR_LATITUDE: 38.1,
ATTR_LONGITUDE: -3.1,
ATTR_FRIENDLY_NAME: "Title 2",
ATTR_MAGNITUDE: 4.6,
ATTR_UNIT_OF_MEASUREMENT: "km",
ATTR_SOURCE: "geonetnz_quakes",
ATTR_ICON: "mdi:pulse",
}
assert float(state.state) == 20.5
state = hass.states.get("geo_location.title_3")
assert state is not None
assert state.name == "Title 3"
assert state.attributes == {
ATTR_EXTERNAL_ID: "3456",
ATTR_LATITUDE: 38.2,
ATTR_LONGITUDE: -3.2,
ATTR_FRIENDLY_NAME: "Title 3",
ATTR_LOCALITY: "Locality 3",
ATTR_UNIT_OF_MEASUREMENT: "km",
ATTR_SOURCE: "geonetnz_quakes",
ATTR_ICON: "mdi:pulse",
}
assert float(state.state) == 25.5
# Simulate an update - two existing, one new entry, one outdated entry
mock_feed_update.return_value = "OK", [mock_entry_1, mock_entry_4, mock_entry_3]
async_fire_time_changed(hass, utcnow + DEFAULT_SCAN_INTERVAL)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 4
# Simulate an update - empty data, but successful update,
# so no changes to entities.
mock_feed_update.return_value = "OK_NO_DATA", None
async_fire_time_changed(hass, utcnow + 2 * DEFAULT_SCAN_INTERVAL)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 4
# Simulate an update - empty data, removes all entities
mock_feed_update.return_value = "ERROR", None
async_fire_time_changed(hass, utcnow + 3 * DEFAULT_SCAN_INTERVAL)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 1
async def test_setup_imperial(hass):
"""Test the setup of the integration using imperial unit system."""
hass.config.units = IMPERIAL_SYSTEM
# Set up some mock feed entries for this test.
mock_entry_1 = _generate_mock_feed_entry("1234", "Title 1", 15.5, (38.0, -3.0))
# Patching 'utcnow' to gain more control over the timed update.
utcnow = dt_util.utcnow()
with patch("homeassistant.util.dt.utcnow", return_value=utcnow), patch(
"aio_geojson_client.feed.GeoJsonFeed.update", new_callable=CoroutineMock
) as mock_feed_update, patch(
"aio_geojson_client.feed.GeoJsonFeed.__init__",
new_callable=CoroutineMock,
create=True,
) as mock_feed_init, patch(
"aio_geojson_client.feed.GeoJsonFeed.last_timestamp",
new_callable=CoroutineMock,
create=True,
):
mock_feed_update.return_value = "OK", [mock_entry_1]
assert await async_setup_component(hass, geonetnz_quakes.DOMAIN, CONFIG)
# Artificially trigger update and collect events.
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 2
# Test conversion of 200 miles to kilometers.
assert mock_feed_init.call_args[1].get("filter_radius") == 321.8688
state = hass.states.get("geo_location.title_1")
assert state is not None
assert state.name == "Title 1"
assert state.attributes == {
ATTR_EXTERNAL_ID: "1234",
ATTR_LATITUDE: 38.0,
ATTR_LONGITUDE: -3.0,
ATTR_FRIENDLY_NAME: "Title 1",
ATTR_UNIT_OF_MEASUREMENT: "mi",
ATTR_SOURCE: "geonetnz_quakes",
ATTR_ICON: "mdi:pulse",
}
assert float(state.state) == 9.6
|
the-stack_106_13151
|
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Compare two or more nbxds to each other.
To use, create a class that implements get_tests(), and pass it in
as the test generator to TestManager. get_tests() should be a python
generator that returns TestInstance objects. See below for definition.
TestNode behaves as follows:
Configure with a BlockStore and TxStore
on_inv: log the message but don't request
on_headers: log the chain tip
on_pong: update ping response map (for synchronization)
on_getheaders: provide headers via BlockStore
on_getdata: provide blocks via BlockStore
"""
from .mininode import *
from .blockstore import BlockStore, TxStore
from .util import p2p_port, wait_until
import logging
logger=logging.getLogger("TestFramework.comptool")
global mininode_lock
class RejectResult():
"""Outcome that expects rejection of a transaction or block."""
def __init__(self, code, reason=b''):
self.code = code
self.reason = reason
def match(self, other):
if self.code != other.code:
return False
return other.reason.startswith(self.reason)
def __repr__(self):
return '%i:%s' % (self.code,self.reason or '*')
class TestNode(P2PInterface):
def __init__(self, block_store, tx_store):
super().__init__()
self.bestblockhash = None
self.block_store = block_store
self.block_request_map = {}
self.tx_store = tx_store
self.tx_request_map = {}
self.block_reject_map = {}
self.tx_reject_map = {}
# When the pingmap is non-empty we're waiting for
# a response
self.pingMap = {}
self.lastInv = []
self.closed = False
def on_close(self):
self.closed = True
def on_headers(self, message):
if len(message.headers) > 0:
best_header = message.headers[-1]
best_header.calc_sha256()
self.bestblockhash = best_header.sha256
def on_getheaders(self, message):
response = self.block_store.headers_for(message.locator, message.hashstop)
if response is not None:
self.send_message(response)
def on_getdata(self, message):
[self.send_message(r) for r in self.block_store.get_blocks(message.inv)]
[self.send_message(r) for r in self.tx_store.get_transactions(message.inv)]
for i in message.inv:
if i.type == 1 or i.type == 1 | (1 << 30): # MSG_TX or MSG_WITNESS_TX
self.tx_request_map[i.hash] = True
elif i.type == 2 or i.type == 2 | (1 << 30): # MSG_BLOCK or MSG_WITNESS_BLOCK
self.block_request_map[i.hash] = True
def on_inv(self, message):
self.lastInv = [x.hash for x in message.inv]
def on_pong(self, message):
try:
del self.pingMap[message.nonce]
except KeyError:
raise AssertionError("Got pong for unknown ping [%s]" % repr(message))
def on_reject(self, message):
if message.message == b'tx':
self.tx_reject_map[message.data] = RejectResult(message.code, message.reason)
if message.message == b'block':
self.block_reject_map[message.data] = RejectResult(message.code, message.reason)
def send_inv(self, obj):
mtype = 2 if isinstance(obj, CBlock) else 1
self.send_message(msg_inv([CInv(mtype, obj.sha256)]))
def send_getheaders(self):
# We ask for headers from their last tip.
m = msg_getheaders()
m.locator = self.block_store.get_locator(self.bestblockhash)
self.send_message(m)
def send_header(self, header):
m = msg_headers()
m.headers.append(header)
self.send_message(m)
# This assumes BIP31
def send_ping(self, nonce):
self.pingMap[nonce] = True
self.send_message(msg_ping(nonce))
def received_ping_response(self, nonce):
return nonce not in self.pingMap
def send_mempool(self):
self.lastInv = []
self.send_message(msg_mempool())
# TestInstance:
#
# Instances of these are generated by the test generator, and fed into the
# comptool.
#
# "blocks_and_transactions" should be an array of
# [obj, True/False/None, hash/None]:
# - obj is either a CBlock, CBlockHeader, or a CTransaction, and
# - the second value indicates whether the object should be accepted
# into the blockchain or mempool (for tests where we expect a certain
# answer), or "None" if we don't expect a certain answer and are just
# comparing the behavior of the nodes being tested.
# - the third value is the hash to test the tip against (if None or omitted,
# use the hash of the block)
# - NOTE: if a block header, no test is performed; instead the header is
# just added to the block_store. This is to facilitate block delivery
# when communicating with headers-first clients (when withholding an
# intermediate block).
# sync_every_block: if True, then each block will be inv'ed, synced, and
# nodes will be tested based on the outcome for the block. If False,
# then inv's accumulate until all blocks are processed (or max inv size
# is reached) and then sent out in one inv message. Then the final block
# will be synced across all connections, and the outcome of the final
# block will be tested.
# sync_every_tx: analogous to behavior for sync_every_block, except if outcome
# on the final tx is None, then contents of entire mempool are compared
# across all connections. (If outcome of final tx is specified as true
# or false, then only the last tx is tested against outcome.)
class TestInstance():
def __init__(self, objects=None, sync_every_block=True, sync_every_tx=False):
self.blocks_and_transactions = objects if objects else []
self.sync_every_block = sync_every_block
self.sync_every_tx = sync_every_tx
class TestManager():
def __init__(self, testgen, datadir):
self.test_generator = testgen
self.p2p_connections= []
self.block_store = BlockStore(datadir)
self.tx_store = TxStore(datadir)
self.ping_counter = 1
def add_all_connections(self, nodes):
for i in range(len(nodes)):
# Create a p2p connection to each node
node = TestNode(self.block_store, self.tx_store)
node.peer_connect('127.0.0.1', p2p_port(i))
self.p2p_connections.append(node)
def clear_all_connections(self):
self.p2p_connections = []
def wait_for_disconnections(self):
def disconnected():
return all(node.closed for node in self.p2p_connections)
wait_until(disconnected, timeout=10, lock=mininode_lock)
def wait_for_verack(self):
return all(node.wait_for_verack() for node in self.p2p_connections)
def wait_for_pings(self, counter):
def received_pongs():
return all(node.received_ping_response(counter) for node in self.p2p_connections)
wait_until(received_pongs, lock=mininode_lock)
# sync_blocks: Wait for all connections to request the blockhash given
# then send get_headers to find out the tip of each node, and synchronize
# the response by using a ping (and waiting for pong with same nonce).
def sync_blocks(self, blockhash, num_blocks):
def blocks_requested():
return all(
blockhash in node.block_request_map and node.block_request_map[blockhash]
for node in self.p2p_connections
)
# --> error if not requested
wait_until(blocks_requested, attempts=20*num_blocks, lock=mininode_lock)
# Send getheaders message
[ c.send_getheaders() for c in self.p2p_connections ]
# Send ping and wait for response -- synchronization hack
[ c.send_ping(self.ping_counter) for c in self.p2p_connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
# Analogous to sync_block (see above)
def sync_transaction(self, txhash, num_events):
# Wait for nodes to request transaction (50ms sleep * 20 tries * num_events)
def transaction_requested():
return all(
txhash in node.tx_request_map and node.tx_request_map[txhash]
for node in self.p2p_connections
)
# --> error if not requested
wait_until(transaction_requested, attempts=20*num_events, lock=mininode_lock)
# Get the mempool
[ c.send_mempool() for c in self.p2p_connections ]
# Send ping and wait for response -- synchronization hack
[ c.send_ping(self.ping_counter) for c in self.p2p_connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
# Sort inv responses from each node
with mininode_lock:
[ c.lastInv.sort() for c in self.p2p_connections ]
# Verify that the tip of each connection all agree with each other, and
# with the expected outcome (if given)
def check_results(self, blockhash, outcome):
with mininode_lock:
for c in self.p2p_connections:
if outcome is None:
if c.bestblockhash != self.p2p_connections[0].bestblockhash:
return False
elif isinstance(outcome, RejectResult): # Check that block was rejected w/ code
if c.bestblockhash == blockhash:
return False
if blockhash not in c.block_reject_map:
logger.error('Block not in reject map: %064x' % (blockhash))
return False
if not outcome.match(c.block_reject_map[blockhash]):
logger.error('Block rejected with %s instead of expected %s: %064x' % (c.block_reject_map[blockhash], outcome, blockhash))
return False
elif ((c.bestblockhash == blockhash) != outcome):
return False
return True
# Either check that the mempools all agree with each other, or that
# txhash's presence in the mempool matches the outcome specified.
# This is somewhat of a strange comparison, in that we're either comparing
# a particular tx to an outcome, or the entire mempools altogether;
# perhaps it would be useful to add the ability to check explicitly that
# a particular tx's existence in the mempool is the same across all nodes.
def check_mempool(self, txhash, outcome):
with mininode_lock:
for c in self.p2p_connections:
if outcome is None:
# Make sure the mempools agree with each other
if c.lastInv != self.p2p_connections[0].lastInv:
return False
elif isinstance(outcome, RejectResult): # Check that tx was rejected w/ code
if txhash in c.lastInv:
return False
if txhash not in c.tx_reject_map:
logger.error('Tx not in reject map: %064x' % (txhash))
return False
if not outcome.match(c.tx_reject_map[txhash]):
logger.error('Tx rejected with %s instead of expected %s: %064x' % (c.tx_reject_map[txhash], outcome, txhash))
return False
elif ((txhash in c.lastInv) != outcome):
return False
return True
def run(self):
# Wait until verack is received
self.wait_for_verack()
test_number = 0
tests = self.test_generator.get_tests()
for test_instance in tests:
test_number += 1
logger.info("Running test %d: %s line %s" % (test_number, tests.gi_code.co_filename, tests.gi_frame.f_lineno))
# We use these variables to keep track of the last block
# and last transaction in the tests, which are used
# if we're not syncing on every block or every tx.
[ block, block_outcome, tip ] = [ None, None, None ]
[ tx, tx_outcome ] = [ None, None ]
invqueue = []
for test_obj in test_instance.blocks_and_transactions:
b_or_t = test_obj[0]
outcome = test_obj[1]
# Determine if we're dealing with a block or tx
if isinstance(b_or_t, CBlock): # Block test runner
block = b_or_t
block_outcome = outcome
tip = block.sha256
# each test_obj can have an optional third argument
# to specify the tip we should compare with
# (default is to use the block being tested)
if len(test_obj) >= 3:
tip = test_obj[2]
# Add to shared block_store, set as current block
# If there was an open getdata request for the block
# previously, and we didn't have an entry in the
# block_store, then immediately deliver, because the
# node wouldn't send another getdata request while
# the earlier one is outstanding.
first_block_with_hash = True
if self.block_store.get(block.sha256) is not None:
first_block_with_hash = False
with mininode_lock:
self.block_store.add_block(block)
for c in self.p2p_connections:
if first_block_with_hash and block.sha256 in c.block_request_map and c.block_request_map[block.sha256] == True:
# There was a previous request for this block hash
# Most likely, we delivered a header for this block
# but never had the block to respond to the getdata
c.send_message(msg_block(block))
else:
c.block_request_map[block.sha256] = False
# Either send inv's to each node and sync, or add
# to invqueue for later inv'ing.
if (test_instance.sync_every_block):
# if we expect success, send inv and sync every block
# if we expect failure, just push the block and see what happens.
if outcome == True:
[ c.send_inv(block) for c in self.p2p_connections ]
self.sync_blocks(block.sha256, 1)
else:
[ c.send_message(msg_block(block)) for c in self.p2p_connections ]
[ c.send_ping(self.ping_counter) for c in self.p2p_connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
if (not self.check_results(tip, outcome)):
raise AssertionError("Test failed at test %d" % test_number)
else:
invqueue.append(CInv(2, block.sha256))
elif isinstance(b_or_t, CBlockHeader):
block_header = b_or_t
self.block_store.add_header(block_header)
[ c.send_header(block_header) for c in self.p2p_connections ]
else: # Tx test runner
assert(isinstance(b_or_t, CTransaction))
tx = b_or_t
tx_outcome = outcome
# Add to shared tx store and clear map entry
with mininode_lock:
self.tx_store.add_transaction(tx)
for c in self.p2p_connections:
c.tx_request_map[tx.sha256] = False
# Again, either inv to all nodes or save for later
if (test_instance.sync_every_tx):
[ c.send_inv(tx) for c in self.p2p_connections ]
self.sync_transaction(tx.sha256, 1)
if (not self.check_mempool(tx.sha256, outcome)):
raise AssertionError("Test failed at test %d" % test_number)
else:
invqueue.append(CInv(1, tx.sha256))
# Ensure we're not overflowing the inv queue
if len(invqueue) == MAX_INV_SZ:
[ c.send_message(msg_inv(invqueue)) for c in self.p2p_connections ]
invqueue = []
# Do final sync if we weren't syncing on every block or every tx.
if (not test_instance.sync_every_block and block is not None):
if len(invqueue) > 0:
[ c.send_message(msg_inv(invqueue)) for c in self.p2p_connections ]
invqueue = []
self.sync_blocks(block.sha256, len(test_instance.blocks_and_transactions))
if (not self.check_results(tip, block_outcome)):
raise AssertionError("Block test failed at test %d" % test_number)
if (not test_instance.sync_every_tx and tx is not None):
if len(invqueue) > 0:
[ c.send_message(msg_inv(invqueue)) for c in self.p2p_connections ]
invqueue = []
self.sync_transaction(tx.sha256, len(test_instance.blocks_and_transactions))
if (not self.check_mempool(tx.sha256, tx_outcome)):
raise AssertionError("Mempool test failed at test %d" % test_number)
[ c.disconnect_node() for c in self.p2p_connections ]
self.wait_for_disconnections()
self.block_store.close()
self.tx_store.close()
|
the-stack_106_13155
|
# encoding: utf-8
from nose.tools import assert_in
from ckan.tests.legacy.functional.api.base import *
class ApiTestCase(ApiTestCase, ControllerTestCase):
def test_get_api(self):
offset = self.offset('')
res = self.app.get(offset, status=[200])
self.assert_version_data(res)
def assert_version_data(self, res):
data = self.data_from_res(res)
assert 'version' in data, data
expected_version = self.get_expected_api_version()
self.assert_equal(data['version'], expected_version)
class TestApi3(Api3TestCase, ApiTestCase):
def test_readonly_is_get_able_with_normal_url_params(self):
'''Test that a read-only action is GET-able
Picks an action within `get.py` and checks that it works if it's
invoked with a http GET request. The action's data_dict is
populated from the url parameters.
'''
offset = self.offset('/action/package_search')
params = {'q': 'russian'}
res = self.app.get(offset, params=params, status=[200])
def test_sideeffect_action_is_not_get_able(self):
'''Test that a non-readonly action is not GET-able.
Picks an action outside of `get.py`, and checks that it 400s if an
attempt to invoke with a http GET request is made.
'''
offset = self.offset('/action/package_create')
data_dict = {
'type': 'dataset',
'name': 'a-name'
}
res = self.app.get(offset,
params=data_dict,
status=[400],
expect_errors=True)
assert_in('Bad request - JSON Error: Invalid request.'\
' Please use POST method for your request',
res.body)
|
the-stack_106_13157
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.animation import FuncAnimation
import matplotlib as mpl
from matplotlib.gridspec import GridSpec
from constants import *
from initial import *
import physics as p
from physics import Etot, predicted_orbit
import multiprocessing as mp
from tqdm import tqdm
def Draw_ani(t, x, y, vx, vy):
# Visualization Setup
fig = plt.figure(figsize = (8, 4.5), facecolor=COLOR)
gs = GridSpec(2, 4, figure=fig)
# Picture
ax = fig.add_subplot(gs[:, :2])
ax.set_facecolor(COLOR)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
ax.spines['bottom'].set_color(COLOR)
ax.spines['top'].set_color(COLOR)
ax.spines['right'].set_color(COLOR)
ax.spines['left'].set_color(COLOR)
# Solar system bodies
sun = plt.Circle((0, 0), Rsun, color='y')
mercury = plt.Circle((0, 0), 0.387*AU, edgecolor='cyan', fill=False)
venus = plt.Circle((0, 0), 0.723*AU, edgecolor='y', fill=False)
earth = plt.Circle((0, 0), 1.*AU, edgecolor='skyblue', fill=False)
mars = plt.Circle((0, 0), 1.524*AU, edgecolor='r', fill=False)
ax.add_patch(sun)
ax.add_patch(mercury)
ax.add_patch(venus)
ax.add_patch(earth)
ax.add_patch(mars)
ax.set_aspect('equal', 'box')
line, = ax.plot(x[0], y[0], color='silver', linestyle='-', linewidth=1)
predx, predy = predicted_orbit(x[0], y[0], vx[0], vy[0])
pred_traj, = ax.plot(predx, predy, color='silver', linestyle=':', linewidth=1)
dot, = ax.plot([], [], color='silver', marker='o', markersize=1, markeredgecolor='w', linestyle='')
#Vel = ax.text(0.05, 0.9, 'Velocity: {:.2e} m/s'.format(np.sqrt(vx[0]**2 + vy[0]**2)), horizontalalignment='left',
# verticalalignment='top', transform=ax.transAxes, color='w')
#E_tot = ax.text(0.05, 0.85, 'Specific Total Energy: {:.2e} J/kg'.format(p.Etot(x[0], y[0], vx[0], vy[0])), horizontalalignment='left',
# verticalalignment='top', transform=ax.transAxes, color='w')
#Time = ax.text(0.05, 0.95, 'Time: {:.2f} yr'.format(t[0]/86400/365), horizontalalignment='left',
# verticalalignment='top', transform=ax.transAxes, color='w')
ax.set_xlim([-Box_size,Box_size])
ax.set_ylim([-Box_size,Box_size])
#%%
# Velocity Plot
ax1 = fig.add_subplot(gs[0, 2:])
ax1.set_facecolor(COLOR)
velline, = ax1.plot(t[0]/yr2s, np.sqrt(vx[0]**2+vy[0]**2), color='silver')
ax1.spines['bottom'].set_color(LineColor)
ax1.spines['top'].set_color(LineColor)
ax1.spines['right'].set_color(LineColor)
ax1.spines['left'].set_color(LineColor)
ax1.set_xlim([0,tmax/yr2s])
ax1.set_ylim([0,np.max(np.sqrt(vx**2+vy**2))*1.2])
ax1.tick_params(labelcolor=LineColor, labelsize='medium', width=3, colors=LineColor)
ax1.ticklabel_format(axis='y', style='sci', useMathText=True, scilimits=(4,5))
ax1.set_xlabel('Time (yr)')
ax1.set_ylabel('Velocity (m/s)')
ax1.xaxis.label.set_color(LineColor)
ax1.yaxis.label.set_color(LineColor)
#%%
# Energy Plot
ax2 = fig.add_subplot(gs[1, 2:])
ax2.set_facecolor(COLOR)
Etotline, = ax2.plot(t[0]/yr2s, Etot(x[0], y[0], vx[0], vy[0]), color='silver')
ax2.spines['bottom'].set_color(LineColor)
ax2.spines['top'].set_color(LineColor)
ax2.spines['right'].set_color(LineColor)
ax2.spines['left'].set_color(LineColor)
ax2.set_xlim([0, tmax/yr2s])
ax2.set_ylim([np.min(Etot(x, y, vx, vy))*1.2, np.max(Etot(x, y, vx, vy))*1.2])
ax2.tick_params(labelcolor=LineColor, labelsize='medium', width=3, colors=LineColor)
ax2.ticklabel_format(style='sci', useMathText=True)
ax2.set_xlabel('Time (yr)')
ax2.set_ylabel('Specific total energy (J/kg)')
ax2.xaxis.label.set_color(LineColor)
ax2.yaxis.label.set_color(LineColor)
plt.tight_layout()
#%%
ms2AUyr = 86400*365/1.5e11
def update(i):
dot.set_data(x[i], y[i])
line.set_data(x[:i], y[:i])
velline.set_data(t[:i]/yr2s, np.sqrt(vx[:i]**2+vy[:i]**2))
Etotline.set_data(t[:i]/yr2s, Etot(x[:i], y[:i], vx[:i], vy[:i]))
predx, predy = predicted_orbit(x[i], y[i], vx[i], vy[i])
pred_traj.set_data(predx, predy)
r = np.sqrt(x[i]**2 + y[i]**2)
if Tracing:
ax.set_xlim([-1.5*r,1.5*r])
ax.set_ylim([-1.5*r,1.5*r])
O1 = ax.add_patch(sun)
O2 = ax.add_patch(mercury)
O3 = ax.add_patch(venus)
O4 = ax.add_patch(earth)
O5 = ax.add_patch(mars)
#Vel.set_text('Velocity: {:.2e} m/s'.format(np.sqrt(vx[i]**2 + vy[i]**2)))
#Vel.set_text('Velocity: {:.2e} AU/yr'.format(np.sqrt(vx[i]**2 + vy[i]**2)*ms2AUyr))
#E_tot.set_text('Total Energy: {:.2e} J/kg'.format(Etot(x[i], y[i], vx[i], vy[i])))
#Time.set_text('Time: {:.2f} yr'.format(t[i]/86400/365))
return [dot, line, velline, Etotline, pred_traj, O1, O2, O3, O4, O5]
ani = FuncAnimation(fig=fig,
func=update,
frames=frames,
interval=10000/frames,
blit=True,
repeat=False)
if SAVE_VIDEO:
Total_frames = VIDEO_FPS*VIDEO_LEN
Frames = np.linspace(0, frames, Total_frames).astype(int)
def save(start, end):
for i in tqdm(range(start, end)):
update(Frames[i])
plt.savefig("./Temp/Frame_{:04d}.png".format(i), dpi=300, facecolor=COLOR)
Pro_List = []
FPT = int(len(Frames)/(n_process-1))
resid = len(Frames)%FPT
for i in range(n_process-1): # Append the processes into the list
start = int(i*FPT)
end = int((i+1)*FPT)
print(start, end)
Pro_List.append(mp.Process(target=save, args=(start, end)))
Pro_List.append(mp.Process(target=save, args=((n_process-1)*FPT, len(Frames))))
for p in Pro_List: # Start running
p.start()
for p in Pro_List: # Wait for all the processes to finish before moving on
p.join()
return ani
|
the-stack_106_13158
|
import unittest
from unittest import TestCase
from beval.criteria import Ctx, to_criteria, Not, And, Eq, criteria_class, cTrue, cFalse
from test_helper import acura_small
class TestNot(TestCase):
def test_not_simple(self):
not_ = Not(cTrue)
(ans, err) = not_(Ctx({}))
self.assertFalse(ans)
self.assertIsNone(err)
not_ = Not(cFalse)
(ans, err) = not_(Ctx({}))
self.assertTrue(ans)
self.assertIsNone(err)
def test_not_and(self):
and_ = And(cTrue, cFalse)
not_ = Not(and_)
(ans, err) = not_(Ctx({}))
self.assertTrue(ans)
self.assertIsNone(err)
not__ = Not(not_)
(ans, err) = not__(Ctx({}))
self.assertFalse(ans)
self.assertIsNone(err)
def test_not_eq(self):
with acura_small as acura:
ctx = Ctx(acura)
eq_ = Eq("make", "Acura")
not_ = Not(eq_)
(ans, err) = eq_(ctx)
self.assertTrue(ans)
self.assertIsNone(err)
(ans, err) = not_(ctx)
self.assertFalse(ans)
self.assertIsNone(err)
def test_ser_not_eq(self):
expected = "not (make == 'Acura')"
not_ = Not(Eq("make", "Acura"))
text = str(not_)
self.assertEqual(text, expected)
not2_ = to_criteria(text)
self.assertIsInstance(not2_, Not)
text2 = str(not2_)
self.assertEqual(text, text2)
text3 = "not make == 'Acura'"
not3_ = to_criteria(text3)
self.assertIsInstance(not3_, Not)
text4 = str(not3_)
self.assertNotEquals(text3, text4)
self.assertEqual(text4, expected)
def test_ser_not_bool(self):
expected = "not (active)"
not_ = to_criteria(expected)
text = str(not_)
self.assertEqual(expected, text)
text2 = "not active"
not2_ = to_criteria(text2)
text3 = str(not2_)
self.assertNotEqual(text2, text3)
self.assertEqual(text3, expected)
expected = "not (True)"
not_ = to_criteria(expected)
text = str(not_)
self.assertEqual(expected, text)
text2 = "not True"
not2_ = to_criteria(text2)
text3 = str(not2_)
self.assertNotEqual(text2, text3)
self.assertEqual(text3, expected)
expected = "not (1)"
text2 = "not 1"
not2_ = to_criteria(text2)
text3 = str(not2_)
self.assertNotEqual(text2, text3)
self.assertEqual(text3, expected)
(ans, err) = not2_(Ctx({}))
self.assertFalse(ans)
self.assertIsNone(err)
if __name__ == '__main__':
unittest.main()
|
the-stack_106_13159
|
from mailwand import MailWand
if __name__ == '__main__':
# variable is 'Optional parameters'
# ↓這裡用來取代html中 '{Sir}' 是要被取代成 'people_name'
variable = {'{Sir}': 'people_name'}
# ↓如果是這樣就是會找 ./config/base.py 中設定的檔案們
MailWand_1 = MailWand(variable)
to_emails = ['[email protected]', '[email protected]'] # 要發送的對象們
MailWand_1.send_to_mail(to_emails) # 群發(但彼此看不到)
MailWand_1.close() # 關閉SMTP
# variable is 'Optional parameters'
# ↓這裡用來取代html中 '{Sir}' 是要被取代成 'people_name'
variable2 = {'{Sir}': 'people_name2'}
# ↓也可以直接指定屬性,目前一共9個,可在 ./config/base.py 中找到
MailWand_2 = MailWand(
variable2,
header='這是第2種', # email標題
images_path='./images2/', # 目前圖片只支援png
html_file='./template2.html' # html 檔案的位置
)
to_emails2 = ['[email protected]', '[email protected]']
MailWand_2.send_to_multiple_recipients_mail(to_emails2) # 群發(收件人會看到彼此)
MailWand_2.close() # 關閉SMTP
|
the-stack_106_13161
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Runs a ResNet model on the CIFAR-10 dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app as absl_app
from absl import flags
import tensorflow as tf # pylint: disable=g-bad-import-order
from official.utils.flags import core as flags_core
from official.utils.logs import logger
from official.resnet import resnet_model
from official.resnet import resnet_run_loop
_HEIGHT = 32
_WIDTH = 32
_NUM_CHANNELS = 3
_DEFAULT_IMAGE_BYTES = _HEIGHT * _WIDTH * _NUM_CHANNELS
# The record is the image plus a one-byte label
_RECORD_BYTES = _DEFAULT_IMAGE_BYTES + 1
_NUM_CLASSES = 10
_NUM_DATA_FILES = 5
_NUM_IMAGES = {
'train': 50000,
'validation': 10000,
}
DATASET_NAME = 'CIFAR-10'
###############################################################################
# Data processing
###############################################################################
def get_filenames(is_training, data_dir):
"""Returns a list of filenames."""
data_dir = os.path.join(data_dir, 'cifar-10-batches-bin')
assert os.path.exists(data_dir), (
'Run cifar10_download_and_extract.py first to download and extract the '
'CIFAR-10 data.')
if is_training:
return [
os.path.join(data_dir, 'data_batch_%d.bin' % i)
for i in range(1, _NUM_DATA_FILES + 1)
]
else:
return [os.path.join(data_dir, 'test_batch.bin')]
def parse_record(raw_record, is_training, dtype):
"""Parse CIFAR-10 image and label from a raw record."""
# Convert bytes to a vector of uint8 that is record_bytes long.
record_vector = tf.decode_raw(raw_record, tf.uint8)
# The first byte represents the label, which we convert from uint8 to int32
# and then to one-hot.
label = tf.cast(record_vector[0], tf.int32)
# The remaining bytes after the label represent the image, which we reshape
# from [depth * height * width] to [depth, height, width].
depth_major = tf.reshape(record_vector[1:_RECORD_BYTES],
[_NUM_CHANNELS, _HEIGHT, _WIDTH])
# Convert from [depth, height, width] to [height, width, depth], and cast as
# float32.
image = tf.cast(tf.transpose(depth_major, [1, 2, 0]), tf.float32)
image = preprocess_image(image, is_training)
image = tf.cast(image, dtype)
return image, label
def preprocess_image(image, is_training):
"""Preprocess a single image of layout [height, width, depth]."""
if is_training:
# Resize the image to add four extra pixels on each side.
image = tf.image.resize_image_with_crop_or_pad(
image, _HEIGHT + 8, _WIDTH + 8)
# Randomly crop a [_HEIGHT, _WIDTH] section of the image.
image = tf.random_crop(image, [_HEIGHT, _WIDTH, _NUM_CHANNELS])
# Randomly flip the image horizontally.
image = tf.image.random_flip_left_right(image)
# Subtract off the mean and divide by the variance of the pixels.
image = tf.image.per_image_standardization(image)
return image
def input_fn(is_training, data_dir, batch_size, num_epochs=1, num_gpus=None,
dtype=tf.float32):
"""Input function which provides batches for train or eval.
Args:
is_training: A boolean denoting whether the input is for training.
data_dir: The directory containing the input data.
batch_size: The number of samples per batch.
num_epochs: The number of epochs to repeat the dataset.
num_gpus: The number of gpus used for training.
dtype: Data type to use for images/features
Returns:
A dataset that can be used for iteration.
"""
filenames = get_filenames(is_training, data_dir)
dataset = tf.data.FixedLengthRecordDataset(filenames, _RECORD_BYTES)
return resnet_run_loop.process_record_dataset(
dataset=dataset,
is_training=is_training,
batch_size=batch_size,
shuffle_buffer=_NUM_IMAGES['train'],
parse_record_fn=parse_record,
num_epochs=num_epochs,
num_gpus=num_gpus,
examples_per_epoch=_NUM_IMAGES['train'] if is_training else None,
dtype=dtype
)
def get_synth_input_fn(dtype):
return resnet_run_loop.get_synth_input_fn(
_HEIGHT, _WIDTH, _NUM_CHANNELS, _NUM_CLASSES, dtype=dtype)
###############################################################################
# Running the model
###############################################################################
class Cifar10Model(resnet_model.Model):
"""Model class with appropriate defaults for CIFAR-10 data."""
def __init__(self, resnet_size, data_format=None, num_classes=_NUM_CLASSES,
resnet_version=resnet_model.DEFAULT_VERSION,
dtype=resnet_model.DEFAULT_DTYPE):
"""These are the parameters that work for CIFAR-10 data.
Args:
resnet_size: The number of convolutional layers needed in the model.
data_format: Either 'channels_first' or 'channels_last', specifying which
data format to use when setting up the model.
num_classes: The number of output classes needed from the model. This
enables users to extend the same model to their own datasets.
resnet_version: Integer representing which version of the ResNet network
to use. See README for details. Valid values: [1, 2]
dtype: The TensorFlow dtype to use for calculations.
Raises:
ValueError: if invalid resnet_size is chosen
"""
if resnet_size % 6 != 2:
raise ValueError('resnet_size must be 6n + 2:', resnet_size)
num_blocks = (resnet_size - 2) // 6
super(Cifar10Model, self).__init__(
resnet_size=resnet_size,
bottleneck=False,
num_classes=num_classes,
num_filters=16,
kernel_size=3,
conv_stride=1,
first_pool_size=None,
first_pool_stride=None,
block_sizes=[num_blocks] * 3,
block_strides=[1, 2, 2],
final_size=64,
resnet_version=resnet_version,
data_format=data_format,
dtype=dtype
)
def cifar10_model_fn(features, labels, mode, params):
"""Model function for CIFAR-10."""
features = tf.reshape(features, [-1, _HEIGHT, _WIDTH, _NUM_CHANNELS])
learning_rate_fn = resnet_run_loop.learning_rate_with_decay(
batch_size=params['batch_size'], batch_denom=128,
num_images=_NUM_IMAGES['train'], boundary_epochs=[100, 150, 200],
decay_rates=[1, 0.1, 0.01, 0.001])
# We use a weight decay of 0.0002, which performs better
# than the 0.0001 that was originally suggested.
weight_decay = 2e-4
# Empirical testing showed that including batch_normalization variables
# in the calculation of regularized loss helped validation accuracy
# for the CIFAR-10 dataset, perhaps because the regularization prevents
# overfitting on the small data set. We therefore include all vars when
# regularizing and computing loss during training.
def loss_filter_fn(_):
return True
return resnet_run_loop.resnet_model_fn(
features=features,
labels=labels,
mode=mode,
model_class=Cifar10Model,
resnet_size=params['resnet_size'],
weight_decay=weight_decay,
learning_rate_fn=learning_rate_fn,
momentum=0.9,
data_format=params['data_format'],
resnet_version=params['resnet_version'],
loss_scale=params['loss_scale'],
loss_filter_fn=loss_filter_fn,
dtype=params['dtype'],
fine_tune=params['fine_tune']
)
def define_cifar_flags():
resnet_run_loop.define_resnet_flags()
flags.adopt_module_key_flags(resnet_run_loop)
flags_core.set_defaults(data_dir='/tmp/cifar10_data',
model_dir='/tmp/cifar10_model',
resnet_size='32',
train_epochs=250,
epochs_between_evals=10,
batch_size=128)
def run_cifar(flags_obj):
"""Run ResNet CIFAR-10 training and eval loop.
Args:
flags_obj: An object containing parsed flag values.
"""
input_function = (flags_obj.use_synthetic_data and
get_synth_input_fn(flags_core.get_tf_dtype(flags_obj)) or
input_fn)
resnet_run_loop.resnet_main(
flags_obj, cifar10_model_fn, input_function, DATASET_NAME,
shape=[_HEIGHT, _WIDTH, _NUM_CHANNELS])
def main(_):
with logger.benchmark_context(flags.FLAGS):
run_cifar(flags.FLAGS)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
define_cifar_flags()
absl_app.run(main)
|
the-stack_106_13163
|
import colorsys
import copy
import os
from timeit import default_timer as timer
import gc
import numpy as np
import tensorflow as tf
from PIL import Image, ImageDraw, ImageFont
from tensorflow.keras.layers import Input, Lambda
from tensorflow.keras.models import Model, load_model
from .nets.yolo4 import yolo_body, yolo_eval
from .nets.utils import letterbox_image
from .parameter import Parameter
class YOLO(object):
_defaults = {
'model_path': Parameter.cus_model_path,
"anchors_path": Parameter.cus_anchors_path,
"classes_path": Parameter.cus_classes_path,
"score": 0.5,
"iou": 0.3,
"eager": True,
"max_boxes": 100,
"model_image_size": (416, 416),
"letterbox_image": False,
}
@classmethod
def get_defaults(cls, n):
if n in cls._defaults:
return cls._defaults[n]
else:
return "Unrecognized attribute name '" + n + "'"
#---------------------------------------------------#
# 初始化yolo
#---------------------------------------------------#
def __init__(self, **kwargs):
self.__dict__.update(self._defaults)
self.class_names = self._get_class()
self.anchors = self._get_anchors()
self.generate()
#---------------------------------------------------#
# 获得所有的分类
#---------------------------------------------------#
def _get_class(self):
classes_path = os.path.expanduser(self.classes_path)
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
#---------------------------------------------------#
# 获得所有的先验框
#---------------------------------------------------#
def _get_anchors(self):
anchors_path = os.path.expanduser(self.anchors_path)
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
#---------------------------------------------------#
# 载入模型
#---------------------------------------------------#
def generate(self):
model_path = os.path.expanduser(self.model_path)
assert model_path.endswith(
'.h5'), 'Keras model or weights must be a .h5 file.'
#---------------------------------------------------#
# 计算先验框的数量和种类的数量
#---------------------------------------------------#
num_anchors = len(self.anchors)
num_classes = len(self.class_names)
#---------------------------------------------------------#
# 载入模型
#---------------------------------------------------------#
self.yolo_model = yolo_body(
Input(shape=(None, None, 3)), num_anchors//3, num_classes)
self.yolo_model.load_weights(self.model_path)
print('{} model, anchors, and classes loaded.'.format(model_path))
# 画框设置不同的颜色
hsv_tuples = [(x / len(self.class_names), 1., 1.)
for x in range(len(self.class_names))]
self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
self.colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
self.colors))
# 打乱颜色
np.random.seed(10101)
np.random.shuffle(self.colors)
np.random.seed(None)
#---------------------------------------------------------#
# 在yolo_eval函数中,我们会对预测结果进行后处理
# 后处理的内容包括,解码、非极大抑制、门限筛选等
#---------------------------------------------------------#
self.input_image_shape = Input([2, ], batch_size=1)
inputs = [*self.yolo_model.output, self.input_image_shape]
outputs = Lambda(yolo_eval, output_shape=(1,), name='yolo_eval',
arguments={'anchors': self.anchors, 'num_classes': len(self.class_names), 'image_shape': self.model_image_size,
'score_threshold': self.score, 'eager': True, 'max_boxes': self.max_boxes, 'letterbox_image': self.letterbox_image})(inputs)
self.yolo_model = Model(
[self.yolo_model.input, self.input_image_shape], outputs)
# @tf.function
def get_pred(self, image_data, input_image_shape):
out_boxes, out_scores, out_classes = self.yolo_model(
[image_data, input_image_shape], training=False)
# del self.yolo_model
# gc.collect()
# tf.keras.backend.clear_session()
return out_boxes, out_scores, out_classes
#---------------------------------------------------#
# 检测图片
#---------------------------------------------------#
def detect_image(self, image, show_result=True):
#---------------------------------------------------------#
# 给图像增加灰条,实现不失真的resize
# 也可以直接resize进行识别
#---------------------------------------------------------#
boxed_image = image.convert('RGB')
boxed_image = boxed_image.resize(
(self.model_image_size[1], self.model_image_size[0]), Image.BICUBIC)
image_data = np.array(boxed_image, dtype='float32')
image_data /= 255.
#---------------------------------------------------------#
# 添加上batch_size维度
#---------------------------------------------------------#
image_data = np.expand_dims(image_data, 0) # Add batch dimension.
#---------------------------------------------------------#
# 将图像输入网络当中进行预测!
#---------------------------------------------------------#
input_image_shape = np.expand_dims(
np.array([image.size[1], image.size[0]], dtype='float32'), 0)
out_boxes, out_scores, out_classes = self.get_pred(
image_data, input_image_shape)
# if show_result:
# print('Found {} boxes for {}'.format(len(out_boxes), 'img'))
#---------------------------------------------------------#
# 设置字体
#---------------------------------------------------------#
font = ImageFont.truetype(font='model_process/yolo/simhei.ttf',
size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
thickness = max((image.size[0] + image.size[1]) // 300, 1)
bbox = []
obj_imgs = []
image_np = np.array(image.copy())
for i, c in list(enumerate(out_classes)):
predicted_class = self.class_names[c]
box = out_boxes[i]
score = out_scores[i]
top, left, bottom, right = box
top = top - 5
left = left - 5
bottom = bottom + 5
right = right + 5
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
# 画框框
label = '{} {:.2f}'.format(predicted_class, score)
draw = ImageDraw.Draw(image)
label_size = draw.textsize(label, font)
# label = label.encode('utf-8')
if show_result:
print(label, top, left, bottom, right)
# 放切好的部份
obj = image_np[top:bottom, left:right].copy()
obj = Image.fromarray(obj)
obj_imgs.append(obj)
bbox.append([left, top, right, bottom, predicted_class])
if top - label_size[1] >= 0:
text_origin = np.array([left, top - label_size[1]])
else:
text_origin = np.array([left, top + 1])
for i in range(thickness):
draw.rectangle(
[left + i, top + i, right - i, bottom - i],
outline=self.colors[c])
draw.rectangle(
[tuple(text_origin), tuple(text_origin + label_size)],
fill=self.colors[c])
draw.text(text_origin, label, fill=(0, 0, 0), font=font)
del draw
return image, bbox, obj_imgs
|
the-stack_106_13164
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: rax_cdb
short_description: create/delete or resize a Rackspace Cloud Databases instance
description:
- creates / deletes or resize a Rackspace Cloud Databases instance
and optionally waits for it to be 'running'. The name option needs to be
unique since it's used to identify the instance.
options:
name:
type: str
description:
- Name of the databases server instance
required: yes
flavor:
type: int
description:
- flavor to use for the instance 1 to 6 (i.e. 512MB to 16GB)
default: 1
volume:
type: int
description:
- Volume size of the database 1-150GB
default: 2
cdb_type:
type: str
description:
- type of instance (i.e. MySQL, MariaDB, Percona)
default: MySQL
aliases: ['type']
cdb_version:
type: str
description:
- version of database (MySQL supports 5.1 and 5.6, MariaDB supports 10, Percona supports 5.6)
- "The available choices are: C(5.1), C(5.6) and C(10)."
default: 5.6
aliases: ['version']
state:
type: str
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
wait:
description:
- wait for the instance to be in state 'running' before returning
type: bool
default: 'no'
wait_timeout:
type: int
description:
- how long before wait gives up, in seconds
default: 300
author: "Simon JAILLET (@jails)"
extends_documentation_fragment:
- community.general.rackspace
- community.general.rackspace.openstack
'''
EXAMPLES = '''
- name: Build a Cloud Databases
gather_facts: False
tasks:
- name: Server build request
local_action:
module: rax_cdb
credentials: ~/.raxpub
region: IAD
name: db-server1
flavor: 1
volume: 2
cdb_type: MySQL
cdb_version: 5.6
wait: yes
state: present
register: rax_db_server
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, rax_to_dict, setup_rax_module
def find_instance(name):
cdb = pyrax.cloud_databases
instances = cdb.list()
if instances:
for instance in instances:
if instance.name == name:
return instance
return False
def save_instance(module, name, flavor, volume, cdb_type, cdb_version, wait,
wait_timeout):
for arg, value in dict(name=name, flavor=flavor,
volume=volume, type=cdb_type, version=cdb_version
).items():
if not value:
module.fail_json(msg='%s is required for the "rax_cdb"'
' module' % arg)
if not (volume >= 1 and volume <= 150):
module.fail_json(msg='volume is required to be between 1 and 150')
cdb = pyrax.cloud_databases
flavors = []
for item in cdb.list_flavors():
flavors.append(item.id)
if not (flavor in flavors):
module.fail_json(msg='unexisting flavor reference "%s"' % str(flavor))
changed = False
instance = find_instance(name)
if not instance:
action = 'create'
try:
instance = cdb.create(name=name, flavor=flavor, volume=volume,
type=cdb_type, version=cdb_version)
except Exception as e:
module.fail_json(msg='%s' % e.message)
else:
changed = True
else:
action = None
if instance.volume.size != volume:
action = 'resize'
if instance.volume.size > volume:
module.fail_json(changed=False, action=action,
msg='The new volume size must be larger than '
'the current volume size',
cdb=rax_to_dict(instance))
instance.resize_volume(volume)
changed = True
if int(instance.flavor.id) != flavor:
action = 'resize'
pyrax.utils.wait_until(instance, 'status', 'ACTIVE',
attempts=wait_timeout)
instance.resize(flavor)
changed = True
if wait:
pyrax.utils.wait_until(instance, 'status', 'ACTIVE',
attempts=wait_timeout)
if wait and instance.status != 'ACTIVE':
module.fail_json(changed=changed, action=action,
cdb=rax_to_dict(instance),
msg='Timeout waiting for "%s" databases instance to '
'be created' % name)
module.exit_json(changed=changed, action=action, cdb=rax_to_dict(instance))
def delete_instance(module, name, wait, wait_timeout):
if not name:
module.fail_json(msg='name is required for the "rax_cdb" module')
changed = False
instance = find_instance(name)
if not instance:
module.exit_json(changed=False, action='delete')
try:
instance.delete()
except Exception as e:
module.fail_json(msg='%s' % e.message)
else:
changed = True
if wait:
pyrax.utils.wait_until(instance, 'status', 'SHUTDOWN',
attempts=wait_timeout)
if wait and instance.status != 'SHUTDOWN':
module.fail_json(changed=changed, action='delete',
cdb=rax_to_dict(instance),
msg='Timeout waiting for "%s" databases instance to '
'be deleted' % name)
module.exit_json(changed=changed, action='delete',
cdb=rax_to_dict(instance))
def rax_cdb(module, state, name, flavor, volume, cdb_type, cdb_version, wait,
wait_timeout):
# act on the state
if state == 'present':
save_instance(module, name, flavor, volume, cdb_type, cdb_version, wait,
wait_timeout)
elif state == 'absent':
delete_instance(module, name, wait, wait_timeout)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
name=dict(type='str', required=True),
flavor=dict(type='int', default=1),
volume=dict(type='int', default=2),
cdb_type=dict(type='str', default='MySQL', aliases=['type']),
cdb_version=dict(type='str', default='5.6', aliases=['version']),
state=dict(default='present', choices=['present', 'absent']),
wait=dict(type='bool', default=False),
wait_timeout=dict(type='int', default=300),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
name = module.params.get('name')
flavor = module.params.get('flavor')
volume = module.params.get('volume')
cdb_type = module.params.get('cdb_type')
cdb_version = module.params.get('cdb_version')
state = module.params.get('state')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
setup_rax_module(module, pyrax)
rax_cdb(module, state, name, flavor, volume, cdb_type, cdb_version, wait, wait_timeout)
if __name__ == '__main__':
main()
|
the-stack_106_13165
|
# qubit number=3
# total number=50
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.rx(-0.09738937226128368,input_qubit[2]) # number=2
prog.h(input_qubit[1]) # number=33
prog.cz(input_qubit[2],input_qubit[1]) # number=34
prog.h(input_qubit[1]) # number=35
prog.h(input_qubit[1]) # number=3
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit_QC278.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = provider.get_backend("ibmq_belem")
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
circuit1.measure_all()
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
|
the-stack_106_13169
|
#!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 16688 if testnet else 6688
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bitcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
the-stack_106_13174
|
from django.http import QueryDict
import urllib
import json
import copy
from django.test import TestCase
from rest_framework.test import APIClient, APIRequestFactory
from rest_framework import status
from cheapiesgr.models import *
from .views import AUTH_TOKEN_LABEL
def decode_response(response):
return json.loads(response.content.decode('utf-8'))
def urldump(data):
result = []
for key, val in data.items():
if isinstance(val, list):
for x in val:
result.append((key, x))
else:
result.append((key, val))
return urllib.parse.urlencode(result)
class APITestcase(TestCase):
def setUp(self):
self.factory = APIRequestFactory()
self.client = APIClient()
usr = User.objects.create_user(username='test',
email='[email protected]',
password='test',
first_name='John',
last_name='Doe')
usr.save()
volunteer = Volunteer(user=usr, confirmed_email=True)
volunteer.save()
login_response = self.client.post('/observatory/api/login/',
{'username' : 'test',
'password' : 'test'},
format='json')
login_response = decode_response(login_response)
assert('token' in login_response)
self.token = login_response['token']
self.header = { AUTH_TOKEN_LABEL : self.token }
self.client.credentials(HTTP_X_OBSERVATORY_AUTH=self.token)
self.product = {
"name" : "foo",
"description" : "foo",
"category" : "laptop",
"tags" : ['a', 'b'],
"withdrawn" : False
}
self.products = [self.product]
response = self.client.post('/observatory/api/products/', urldump(self.product), content_type='application/x-www-form-urlencoded')
response = decode_response(response)
self.idd = response['id']
response = self.client.get('/observatory/api/products/{}'.format(self.idd),format = 'json')
response = decode_response(response)
assert(response['name'] == self.product['name'])
assert(response['description'] == self.product['description'])
assert(response['category'] == self.product['category'])
assert(self.product['tags'] == response['tags'])
assert(response['withdrawn'] == self.product['withdrawn'])
assert(response['name'] == self.product['name'])
def test_get_products(self):
response = self.client.get('/observatory/api/products/',format = 'json')
response = decode_response(response)
assert(response['count'] == 20)
assert(response['products'][0]['name'] == self.product['name'])
assert(response['start'] == 0)
assert(response['total'] == 1)
def test_put_products(self):
newproduct = {
'name' : 'foo1',
'description' : 'foo1',
'category' : 'laptop1',
'tags' : ['a1', 'b1'],
'withdrawn' : False
}
response = self.client.put('/observatory/api/products/{}'.format(self.idd), urldump(newproduct), content_type='application/x-www-form-urlencoded')
response = self.client.get('/observatory/api/products/{}'.format(self.idd), format = 'json')
response = decode_response(response)
assert(response['name'] == newproduct['name'])
assert(response['description'] == newproduct['description'])
assert(response['category'] == newproduct['category'])
assert(response['tags'] == newproduct['tags'])
assert(response['withdrawn'] == newproduct['withdrawn'])
response = self.client.put('/observatory/api/products/{}'.format(self.idd), urldump(self.product), content_type='application/x-www-form-urlencoded')
def test_patch_product(self):
patch_data = {'name':'foo2'}
response = self.client.patch('/observatory/api/products/{}'.format(self.idd), urldump(patch_data), content_type='application/x-www-form-urlencoded')
response = decode_response(response)
assert(response['name'] == 'foo2')
def test_delete_product(self):
response = self.client.delete('/observatory/api/products/{}'.format(self.idd))
response = decode_response(response)
assert(response['withdrawn'] == True)
|
the-stack_106_13176
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import tempfile
import six
import tensorflow as tf
from official.nlp.bert import tokenization
class TokenizationTest(tf.test.TestCase):
"""Tokenization test.
The implementation is forked from
https://github.com/google-research/bert/blob/master/tokenization_test.py."
"""
def test_full_tokenizer(self):
vocab_tokens = [
"[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
"##ing", ","
]
with tempfile.NamedTemporaryFile(delete=False) as vocab_writer:
if six.PY2:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
else:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens
]).encode("utf-8"))
vocab_file = vocab_writer.name
tokenizer = tokenization.FullTokenizer(vocab_file)
os.unlink(vocab_file)
tokens = tokenizer.tokenize(u"UNwant\u00E9d,running")
self.assertAllEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertAllEqual(
tokenizer.convert_tokens_to_ids(tokens), [7, 4, 5, 10, 8, 9])
def test_chinese(self):
tokenizer = tokenization.BasicTokenizer()
self.assertAllEqual(
tokenizer.tokenize(u"ah\u535A\u63A8zz"),
[u"ah", u"\u535A", u"\u63A8", u"zz"])
def test_basic_tokenizer_lower(self):
tokenizer = tokenization.BasicTokenizer(do_lower_case=True)
self.assertAllEqual(
tokenizer.tokenize(u" \tHeLLo!how \n Are yoU? "),
["hello", "!", "how", "are", "you", "?"])
self.assertAllEqual(tokenizer.tokenize(u"H\u00E9llo"), ["hello"])
def test_basic_tokenizer_no_lower(self):
tokenizer = tokenization.BasicTokenizer(do_lower_case=False)
self.assertAllEqual(
tokenizer.tokenize(u" \tHeLLo!how \n Are yoU? "),
["HeLLo", "!", "how", "Are", "yoU", "?"])
def test_basic_tokenizer_no_split_on_punc(self):
tokenizer = tokenization.BasicTokenizer(
do_lower_case=True, split_on_punc=False)
self.assertAllEqual(
tokenizer.tokenize(u" \tHeLLo!how \n Are yoU? "),
["hello!how", "are", "you?"])
def test_wordpiece_tokenizer(self):
vocab_tokens = [
"[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
"##ing", "##!", "!"
]
vocab = {}
for (i, token) in enumerate(vocab_tokens):
vocab[token] = i
tokenizer = tokenization.WordpieceTokenizer(vocab=vocab)
self.assertAllEqual(tokenizer.tokenize(""), [])
self.assertAllEqual(
tokenizer.tokenize("unwanted running"),
["un", "##want", "##ed", "runn", "##ing"])
self.assertAllEqual(
tokenizer.tokenize("unwanted running !"),
["un", "##want", "##ed", "runn", "##ing", "!"])
self.assertAllEqual(
tokenizer.tokenize("unwanted running!"),
["un", "##want", "##ed", "runn", "##ing", "##!"])
self.assertAllEqual(
tokenizer.tokenize("unwantedX running"), ["[UNK]", "runn", "##ing"])
def test_convert_tokens_to_ids(self):
vocab_tokens = [
"[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
"##ing"
]
vocab = {}
for (i, token) in enumerate(vocab_tokens):
vocab[token] = i
self.assertAllEqual(
tokenization.convert_tokens_to_ids(
vocab, ["un", "##want", "##ed", "runn", "##ing"]), [7, 4, 5, 8, 9])
def test_is_whitespace(self):
self.assertTrue(tokenization._is_whitespace(u" "))
self.assertTrue(tokenization._is_whitespace(u"\t"))
self.assertTrue(tokenization._is_whitespace(u"\r"))
self.assertTrue(tokenization._is_whitespace(u"\n"))
self.assertTrue(tokenization._is_whitespace(u"\u00A0"))
self.assertFalse(tokenization._is_whitespace(u"A"))
self.assertFalse(tokenization._is_whitespace(u"-"))
def test_is_control(self):
self.assertTrue(tokenization._is_control(u"\u0005"))
self.assertFalse(tokenization._is_control(u"A"))
self.assertFalse(tokenization._is_control(u" "))
self.assertFalse(tokenization._is_control(u"\t"))
self.assertFalse(tokenization._is_control(u"\r"))
self.assertFalse(tokenization._is_control(u"\U0001F4A9"))
def test_is_punctuation(self):
self.assertTrue(tokenization._is_punctuation(u"-"))
self.assertTrue(tokenization._is_punctuation(u"$"))
self.assertTrue(tokenization._is_punctuation(u"`"))
self.assertTrue(tokenization._is_punctuation(u"."))
self.assertFalse(tokenization._is_punctuation(u"A"))
self.assertFalse(tokenization._is_punctuation(u" "))
if __name__ == "__main__":
tf.test.main()
|
the-stack_106_13177
|
"""
Color playground.
Any fenced code blocks with `playground` as the language will be converted to interactive playgrounds that
are initialized with the results of the initial contents.
Any inline code blocks with `color` as the language will be evaluated as a simple color swatch.
"""
import coloraide
from coloraide import Color, NaN, Piecewise
from coloraide.color.interpolate import Interpolator
from pymdownx import superfences
import xml.etree.ElementTree as Etree
from collections.abc import Sequence
from collections import namedtuple
import ast
from io import StringIO
import contextlib
import sys
import re
import traceback
AST_BLOCKS = (ast.If, ast.For, ast.While, ast.Try, ast.With, ast.FunctionDef, ast.ClassDef)
RE_COLOR_START = re.compile(
r"(?i)(?:\b(?<![-#&$])(?:color|hsla?|lch|lab|hwb|rgba?)\(|\b(?<![-#&$])[\w]{3,}(?![(-])\b|(?<![&])#)"
)
template = '''<div class="playground" id="__playground_{el_id}">
<div class="playground-results" id="__playground-results_{el_id}">
{results}
</div>
<div class="playground-code hidden" id="__playground-code_{el_id}">
<form autocomplete="off">
<textarea class="playground-inputs" id="__playground-inputs_{el_id}" spellcheck="false">{raw_source}</textarea>
</form>
</div>
<button id="__playground-edit_{el_id}" class="playground-edit" title="Edit the current snippet">Edit</button>
<button id="__playground-share_{el_id}" class="playground-share" title="Copy URL to current snippet">Share</button>
<button id="__playground-run_{el_id}" class="playground-run hidden" title="Run code (Ctrl + Enter)">Run</button>
<button id="__playground-cancel_{el_id}" class="playground-cancel hidden" title="Cancel edit (Escape)">Cancel</button>
</div>'''
code_id = 0
class ColorInterpolate(list):
"""Color interpolate."""
class ColorTuple(namedtuple('ColorTuple', ['string', 'color'])):
"""Color tuple."""
def _escape(txt):
"""Basic HTML escaping."""
txt = txt.replace('&', '&')
txt = txt.replace('<', '<')
txt = txt.replace('>', '>')
return txt
@contextlib.contextmanager
def std_output(stdout=None):
"""Capture standard out."""
old = sys.stdout
if stdout is None:
stdout = StringIO()
sys.stdout = stdout
yield stdout
sys.stdout = old
def get_colors(result):
"""Get color from results."""
colors = []
if isinstance(result, Color):
colors.append(ColorTuple(result.to_string(fit=False), result))
elif isinstance(result, Interpolator):
colors = ColorInterpolate(result.steps(steps=5, max_delta_e=4))
elif isinstance(result, str):
try:
colors.append(ColorTuple(result, Color(result)))
except Exception:
pass
elif isinstance(result, Sequence):
for x in result:
if isinstance(x, Color):
colors.append(ColorTuple(x.to_string(fit=False), x))
elif isinstance(x, str):
try:
colors.append(ColorTuple(x, Color(x)))
except Exception:
pass
return colors
def find_colors(text):
"""Find colors in text buffer."""
colors = []
for m in RE_COLOR_START.finditer(text):
start = m.start()
mcolor = Color.match(text, start=start)
if mcolor is not None:
colors.append(ColorTuple(text[mcolor.start:mcolor.end], mcolor.color))
return colors
def execute(cmd):
"""Execute color commands."""
g = {'Color': Color, 'coloraide': coloraide, 'NaN': NaN, 'Piecewise': Piecewise}
console = ''
colors = []
# Build AST tree
src = cmd.strip()
lines = src.split('\n')
tree = ast.parse(src)
for node in tree.body:
result = None
# Format source as Python console statements
start = node.lineno
end = node.end_lineno
stmt = lines[start - 1: end]
command = ''
for i, line in enumerate(stmt, 0):
if i == 0:
stmt[i] = '>>> ' + line
else:
stmt[i] = '... ' + line
command += '\n'.join(stmt)
if isinstance(node, AST_BLOCKS):
command += '\n... '
# Capture anything sent to standard out
try:
text = ''
with std_output() as s:
# Execute code
if isinstance(node, ast.Expr):
_eval = ast.Expression(node.value)
result = eval(compile(_eval, '<string>', 'eval'), g)
else:
_exec = ast.Module([node], [])
exec(compile(_exec, '<string>', 'exec'), g)
# Execution went well, so append command
console += command
# Output captured standard out after statements
text = s.getvalue()
if text:
clist = find_colors(text)
if clist:
colors.append(clist)
console += '\n{}'.format(text)
s.flush()
except Exception:
console += '{}\n{}'.format(command, traceback.format_exc())
# Failed for some reason, so quit
break
# If we got a result, output it as well
if result is not None:
clist = get_colors(result)
if clist:
colors.append(clist)
console += '{}{}\n'.format('\n' if not text else '', str(result))
else:
console += '\n' if not text else ''
return console, colors
def color_command_validator(language, inputs, options, attrs, md):
"""Color validator."""
valid_inputs = set()
for k, v in inputs.items():
if k in valid_inputs:
options[k] = True
continue
attrs[k] = v
return True
def color_command_formatter(src="", language="", class_name=None, options=None, md="", **kwargs):
"""Formatter wrapper."""
global code_id
try:
if len(md.preprocessors['fenced_code_block'].extension.stash) == 0:
code_id = 0
console, colors = execute(src.strip())
el = ''
bar = False
values = []
for item in colors:
if isinstance(item, ColorInterpolate):
if bar:
el += '<div class="swatch-bar">{}</div>'.format(' '.join(values))
values = []
sub_el1 = '<div class="swatch-bar"><span class="swatch swatch-gradient">{}</span></div>'
style = "--swatch-stops: "
stops = []
for color in item:
color.fit("srgb", in_place=True)
stops.append(color.convert("srgb").to_string())
if not stops:
stops.extend(['transparent'] * 2)
if len(stops) == 1:
stops.append(stops[0])
style += ','.join(stops)
sub_el2 = '<span class="swatch-color" style="{}"></span>'.format(style)
el += sub_el1.format(sub_el2)
bar = False
else:
bar = True
base_classes = "swatch"
for color in item:
if not color.color.in_gamut('srgb'):
base_classes += " out-of-gamut"
color.color.fit('srgb', in_place=True)
srgb = color.color.convert('srgb')
value1 = srgb.to_string(alpha=False)
value2 = srgb.to_string()
style = "--swatch-stops: {} 50%, {} 50%".format(value1, value2)
title = color.string
classes = base_classes
c = '<span class="swatch-color" style="{style}"></span>'.format(style=style)
c = '<span class="{classes}" title="{title}">{color}</span>'.format(
classes=classes,
color=c,
title=title
)
values.append(c)
if bar:
el += '<div class="swatch-bar">{}</div>'.format(' '.join(values))
values = []
el += md.preprocessors['fenced_code_block'].extension.superfences[0]['formatter'](
src=console,
class_name="highlight",
language='pycon3',
md=md,
options=options,
**kwargs
)
el = '<div class="color-command">{}</div>'.format(el)
el = template.format(el_id=code_id, raw_source=_escape(src), results=el)
code_id += 1
except Exception:
import traceback
print(traceback.format_exc())
return superfences.fence_code_format(src, 'text', class_name, options, md, **kwargs)
return el
def color_formatter(src="", language="", class_name=None, md=""):
"""Formatter wrapper."""
try:
result = src.strip()
try:
console, colors = execute(result)
if len(colors) != 1 or len(colors[0]) != 1:
raise ValueError('Need one color only')
color = colors[0][0].color
result = colors[0][0].string
except Exception:
color = Color(result.strip())
el = Etree.Element('span')
stops = []
if not color.in_gamut("srgb"):
color.fit("srgb", in_place=True)
attributes = {'class': "swatch out-of-gamut", "title": result}
sub_el = Etree.SubElement(el, 'span', attributes)
stops.append(color.convert("srgb").to_string(hex=True, alpha=False))
if color.alpha < 1.0:
stops[-1] += ' 50%'
stops.append(color.convert("srgb").to_string(hex=True) + ' 50%')
else:
attributes = {'class': "swatch", "title": result}
sub_el = Etree.SubElement(el, 'span', attributes)
stops.append(color.convert("srgb").to_string(hex=True, alpha=False))
if color.alpha < 1.0:
stops[-1] += ' 50%'
stops.append(color.convert("srgb").to_string(hex=True) + ' 50%')
if not stops:
stops.extend(['transparent'] * 2)
if len(stops) == 1:
stops.append(stops[0])
Etree.SubElement(
sub_el,
'span',
{
"class": "swatch-color",
"style": "--swatch-stops: {};".format(','.join(stops))
}
)
el.append(md.inlinePatterns['backtick'].handle_code('css-color', result))
except Exception:
import traceback
print(traceback.format_exc())
el = md.inlinePatterns['backtick'].handle_code('text', src)
return el
|
the-stack_106_13178
|
#
# Copyright (c) 2009-2016, Jack Poulson
# All rights reserved.
#
# This file is part of Elemental and is under the BSD 2-Clause License,
# which can be found in the LICENSE file in the root directory, or at
# http://opensource.org/licenses/BSD-2-Clause
#
from environment import *
from imports import mpi
import DistGraph as DG
class DistSparseMatrix(object):
# Constructors and destructors
# ============================
lib.ElDistSparseMatrixCreate_i.argtypes = \
lib.ElDistSparseMatrixCreate_s.argtypes = \
lib.ElDistSparseMatrixCreate_d.argtypes = \
lib.ElDistSparseMatrixCreate_c.argtypes = \
lib.ElDistSparseMatrixCreate_z.argtypes = \
[POINTER(c_void_p),mpi.Comm]
def __init__(self,tag=dTag,comm=mpi.COMM_WORLD(),create=True):
self.obj = c_void_p()
self.tag = tag
CheckTag(tag)
if create:
args = [pointer(self.obj),comm]
if tag == iTag: lib.ElDistSparseMatrixCreate_i(*args)
elif tag == sTag: lib.ElDistSparseMatrixCreate_s(*args)
elif tag == dTag: lib.ElDistSparseMatrixCreate_d(*args)
elif tag == cTag: lib.ElDistSparseMatrixCreate_c(*args)
elif tag == zTag: lib.ElDistSparseMatrixCreate_z(*args)
else: DataExcept()
lib.ElDistSparseMatrixDestroy_i.argtypes = \
lib.ElDistSparseMatrixDestroy_s.argtypes = \
lib.ElDistSparseMatrixDestroy_d.argtypes = \
lib.ElDistSparseMatrixDestroy_c.argtypes = \
lib.ElDistSparseMatrixDestroy_z.argtypes = \
[c_void_p]
def Destroy(self):
args = [self.obj]
if self.tag == iTag: lib.ElDistSparseMatrixDestroy_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixDestroy_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixDestroy_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixDestroy_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixDestroy_z(*args)
else: DataExcept()
# Assignment and reconfiguration
# ==============================
lib.ElDistSparseMatrixEmpty_i.argtypes = \
lib.ElDistSparseMatrixEmpty_s.argtypes = \
lib.ElDistSparseMatrixEmpty_d.argtypes = \
lib.ElDistSparseMatrixEmpty_c.argtypes = \
lib.ElDistSparseMatrixEmpty_z.argtypes = \
[c_void_p]
def Empty(self):
args = [self.obj]
if self.tag == iTag: lib.ElDistSparseMatrixEmpty_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixEmpty_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixEmpty_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixEmpty_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixEmpty_z(*args)
else: DataExcept()
lib.ElDistSparseMatrixResize_i.argtypes = \
lib.ElDistSparseMatrixResize_s.argtypes = \
lib.ElDistSparseMatrixResize_d.argtypes = \
lib.ElDistSparseMatrixResize_c.argtypes = \
lib.ElDistSparseMatrixResize_z.argtypes = \
[c_void_p,iType,iType]
def Resize(self,height,width):
args = [self.obj,height,width]
if self.tag == iTag: lib.ElDistSparseMatrixResize_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixResize_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixResize_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixResize_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixResize_z(*args)
else: DataExcept()
lib.ElDistSparseMatrixSetComm_i.argtypes = \
lib.ElDistSparseMatrixSetComm_s.argtypes = \
lib.ElDistSparseMatrixSetComm_d.argtypes = \
lib.ElDistSparseMatrixSetComm_c.argtypes = \
lib.ElDistSparseMatrixSetComm_z.argtypes = \
[c_void_p,mpi.Comm]
def SetComm(self,comm):
args = [self.obj,comm]
if self.tag == iTag: lib.ElDistSparseMatrixSetComm_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixSetComm_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixSetComm_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixSetComm_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixSetComm_z(*args)
else: DataExcept()
lib.ElDistSparseMatrixReserve_i.argtypes = \
lib.ElDistSparseMatrixReserve_s.argtypes = \
lib.ElDistSparseMatrixReserve_d.argtypes = \
lib.ElDistSparseMatrixReserve_c.argtypes = \
lib.ElDistSparseMatrixReserve_z.argtypes = \
[c_void_p,iType,iType]
def Reserve(self,numLocalEntries,numRemoteEntries=0):
args = [self.obj,numLocalEntries,numRemoteEntries]
if self.tag == iTag: lib.ElDistSparseMatrixReserve_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixReserve_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixReserve_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixReserve_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixReserve_z(*args)
else: DataExcept()
lib.ElDistSparseMatrixUpdate_i.argtypes = [c_void_p,iType,iType,iType]
lib.ElDistSparseMatrixUpdate_s.argtypes = [c_void_p,iType,iType,sType]
lib.ElDistSparseMatrixUpdate_d.argtypes = [c_void_p,iType,iType,dType]
lib.ElDistSparseMatrixUpdate_c.argtypes = [c_void_p,iType,iType,cType]
lib.ElDistSparseMatrixUpdate_z.argtypes = [c_void_p,iType,iType,zType]
def Update(self,row,col,value):
args = [self.obj,row,col,value]
if self.tag == iTag: lib.ElDistSparseMatrixUpdate_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixUpdate_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixUpdate_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixUpdate_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixUpdate_z(*args)
else: DataExcept()
lib.ElDistSparseMatrixUpdateLocal_i.argtypes = [c_void_p,iType,iType,iType]
lib.ElDistSparseMatrixUpdateLocal_s.argtypes = [c_void_p,iType,iType,sType]
lib.ElDistSparseMatrixUpdateLocal_d.argtypes = [c_void_p,iType,iType,dType]
lib.ElDistSparseMatrixUpdateLocal_c.argtypes = [c_void_p,iType,iType,cType]
lib.ElDistSparseMatrixUpdateLocal_z.argtypes = [c_void_p,iType,iType,zType]
def UpdateLocal(self,localRow,col,value):
args = [self.obj,localRow,col,value]
if self.tag == iTag: lib.ElDistSparseMatrixUpdateLocal_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixUpdateLocal_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixUpdateLocal_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixUpdateLocal_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixUpdateLocal_z(*args)
else: DataExcept()
lib.ElDistSparseMatrixZero_i.argtypes = \
lib.ElDistSparseMatrixZero_s.argtypes = \
lib.ElDistSparseMatrixZero_d.argtypes = \
lib.ElDistSparseMatrixZero_c.argtypes = \
lib.ElDistSparseMatrixZero_z.argtypes = \
[c_void_p,iType,iType]
def Zero(self,row,col):
args = [self.obj,row,col]
if self.tag == iTag: lib.ElDistSparseMatrixZero_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixZero_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixZero_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixZero_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixZero_z(*args)
else: DataExcept()
lib.ElDistSparseMatrixZeroLocal_i.argtypes = \
lib.ElDistSparseMatrixZeroLocal_s.argtypes = \
lib.ElDistSparseMatrixZeroLocal_d.argtypes = \
lib.ElDistSparseMatrixZeroLocal_c.argtypes = \
lib.ElDistSparseMatrixZeroLocal_z.argtypes = \
[c_void_p,iType,iType]
def ZeroLocal(self,localRow,col):
args = [self.obj,localRow,col]
if self.tag == iTag: lib.ElDistSparseMatrixZeroLocal_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixZeroLocal_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixZeroLocal_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixZeroLocal_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixZeroLocal_z(*args)
else: DataExcept()
lib.ElDistSparseMatrixQueueUpdate_i.argtypes = \
[c_void_p,iType,iType,iType,bType]
lib.ElDistSparseMatrixQueueUpdate_s.argtypes = \
[c_void_p,iType,iType,sType,bType]
lib.ElDistSparseMatrixQueueUpdate_d.argtypes = \
[c_void_p,iType,iType,dType,bType]
lib.ElDistSparseMatrixQueueUpdate_c.argtypes = \
[c_void_p,iType,iType,cType,bType]
lib.ElDistSparseMatrixQueueUpdate_z.argtypes = \
[c_void_p,iType,iType,zType,bType]
def QueueUpdate(self,row,col,value,passive=False):
args = [self.obj,row,col,value,passive]
if self.tag == iTag: lib.ElDistSparseMatrixQueueUpdate_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixQueueUpdate_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixQueueUpdate_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixQueueUpdate_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixQueueUpdate_z(*args)
else: DataExcept()
lib.ElDistSparseMatrixQueueLocalUpdate_i.argtypes = \
[c_void_p,iType,iType,iType]
lib.ElDistSparseMatrixQueueLocalUpdate_s.argtypes = \
[c_void_p,iType,iType,sType]
lib.ElDistSparseMatrixQueueLocalUpdate_d.argtypes = \
[c_void_p,iType,iType,dType]
lib.ElDistSparseMatrixQueueLocalUpdate_c.argtypes = \
[c_void_p,iType,iType,cType]
lib.ElDistSparseMatrixQueueLocalUpdate_z.argtypes = \
[c_void_p,iType,iType,zType]
def QueueLocalUpdate(self,localRow,col,value):
args = [self.obj,localRow,col,value]
if self.tag == iTag: lib.ElDistSparseMatrixQueueLocalUpdate_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixQueueLocalUpdate_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixQueueLocalUpdate_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixQueueLocalUpdate_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixQueueLocalUpdate_z(*args)
else: DataExcept()
lib.ElDistSparseMatrixQueueZero_i.argtypes = \
lib.ElDistSparseMatrixQueueZero_s.argtypes = \
lib.ElDistSparseMatrixQueueZero_d.argtypes = \
lib.ElDistSparseMatrixQueueZero_c.argtypes = \
lib.ElDistSparseMatrixQueueZero_z.argtypes = \
[c_void_p,iType,iType,bType]
def QueueZero(self,row,col,passive=False):
args = [self.obj,row,col,passive]
if self.tag == iTag: lib.ElDistSparseMatrixQueueZero_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixQueueZero_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixQueueZero_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixQueueZero_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixQueueZero_z(*args)
else: DataExcept()
lib.ElDistSparseMatrixQueueLocalZero_i.argtypes = \
lib.ElDistSparseMatrixQueueLocalZero_s.argtypes = \
lib.ElDistSparseMatrixQueueLocalZero_d.argtypes = \
lib.ElDistSparseMatrixQueueLocalZero_c.argtypes = \
lib.ElDistSparseMatrixQueueLocalZero_z.argtypes = \
[c_void_p,iType,iType]
def QueueLocalZero(self,localRow,col):
args = [self.obj,localRow,col]
if self.tag == iTag: lib.ElDistSparseMatrixQueueLocalZero_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixQueueLocalZero_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixQueueLocalZero_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixQueueLocalZero_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixQueueLocalZero_z(*args)
else: DataExcept()
lib.ElDistSparseMatrixProcessQueues_i.argtypes = \
lib.ElDistSparseMatrixProcessQueues_s.argtypes = \
lib.ElDistSparseMatrixProcessQueues_d.argtypes = \
lib.ElDistSparseMatrixProcessQueues_c.argtypes = \
lib.ElDistSparseMatrixProcessQueues_z.argtypes = \
[c_void_p]
def ProcessQueues(self):
args = [self.obj]
if self.tag == iTag: lib.ElDistSparseMatrixProcessQueues_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixProcessQueues_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixProcessQueues_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixProcessQueues_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixProcessQueues_z(*args)
else: DataExcept()
lib.ElDistSparseMatrixProcessLocalQueues_i.argtypes = \
lib.ElDistSparseMatrixProcessLocalQueues_s.argtypes = \
lib.ElDistSparseMatrixProcessLocalQueues_d.argtypes = \
lib.ElDistSparseMatrixProcessLocalQueues_c.argtypes = \
lib.ElDistSparseMatrixProcessLocalQueues_z.argtypes = \
[c_void_p]
def ProcessLocalQueues(self):
args = [self.obj]
if self.tag == iTag: lib.ElDistSparseMatrixProcessLocalQueues_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixProcessLocalQueues_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixProcessLocalQueues_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixProcessLocalQueues_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixProcessLocalQueues_z(*args)
else: DataExcept()
# Queries
# =======
lib.ElDistSparseMatrixHeight_i.argtypes = \
lib.ElDistSparseMatrixHeight_s.argtypes = \
lib.ElDistSparseMatrixHeight_d.argtypes = \
lib.ElDistSparseMatrixHeight_c.argtypes = \
lib.ElDistSparseMatrixHeight_z.argtypes = \
[c_void_p,POINTER(iType)]
def Height(self):
height = iType()
args = [self.obj,pointer(height)]
if self.tag == iTag: lib.ElDistSparseMatrixHeight_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixHeight_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixHeight_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixHeight_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixHeight_z(*args)
else: DataExcept()
return height.value
lib.ElDistSparseMatrixWidth_i.argtypes = \
lib.ElDistSparseMatrixWidth_s.argtypes = \
lib.ElDistSparseMatrixWidth_d.argtypes = \
lib.ElDistSparseMatrixWidth_c.argtypes = \
lib.ElDistSparseMatrixWidth_z.argtypes = \
[c_void_p,POINTER(iType)]
def Width(self):
width = iType()
args = [self.obj,pointer(width)]
if self.tag == iTag: lib.ElDistSparseMatrixWidth_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixWidth_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixWidth_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixWidth_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixWidth_z(*args)
else: DataExcept()
return width.value
lib.ElDistSparseMatrixDistGraph_i.argtypes = \
lib.ElDistSparseMatrixDistGraph_s.argtypes = \
lib.ElDistSparseMatrixDistGraph_d.argtypes = \
lib.ElDistSparseMatrixDistGraph_c.argtypes = \
lib.ElDistSparseMatrixDistGraph_z.argtypes = \
lib.ElDistSparseMatrixLockedDistGraph_i.argtypes = \
lib.ElDistSparseMatrixLockedDistGraph_s.argtypes = \
lib.ElDistSparseMatrixLockedDistGraph_d.argtypes = \
lib.ElDistSparseMatrixLockedDistGraph_c.argtypes = \
lib.ElDistSparseMatrixLockedDistGraph_z.argtypes = \
[c_void_p,POINTER(c_void_p)]
def DistGraph(self,locked=False):
graph = DG.DistGraph(mpi.COMM_WORLD(),False)
args = [self.obj,pointer(graph.obj)]
if locked:
if self.tag == iTag: lib.ElDistSparseMatrixLockedDistGraph_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixLockedDistGraph_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixLockedDistGraph_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixLockedDistGraph_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixLockedDistGraph_z(*args)
else: DataExcept()
else:
if self.tag == iTag: lib.ElDistSparseMatrixDistGraph_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixDistGraph_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixDistGraph_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixDistGraph_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixDistGraph_z(*args)
else: DataExcept()
return graph
lib.ElDistSparseMatrixFirstLocalRow_i.argtypes = \
lib.ElDistSparseMatrixFirstLocalRow_s.argtypes = \
lib.ElDistSparseMatrixFirstLocalRow_d.argtypes = \
lib.ElDistSparseMatrixFirstLocalRow_c.argtypes = \
lib.ElDistSparseMatrixFirstLocalRow_z.argtypes = \
[c_void_p,POINTER(iType)]
def FirstLocalRow(self):
firstLocalRow = iType()
args = [self.obj,pointer(firstLocalRow)]
if self.tag == iTag: lib.ElDistSparseMatrixFirstLocalRow_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixFirstLocalRow_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixFirstLocalRow_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixFirstLocalRow_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixFirstLocalRow_z(*args)
else: DataExcept()
return firstLocalRow.value
lib.ElDistSparseMatrixLocalHeight_i.argtypes = \
lib.ElDistSparseMatrixLocalHeight_s.argtypes = \
lib.ElDistSparseMatrixLocalHeight_d.argtypes = \
lib.ElDistSparseMatrixLocalHeight_c.argtypes = \
lib.ElDistSparseMatrixLocalHeight_z.argtypes = \
[c_void_p,POINTER(iType)]
def LocalHeight(self):
localHeight = iType()
args = [self.obj,pointer(localHeight)]
if self.tag == iTag: lib.ElDistSparseMatrixLocalHeight_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixLocalHeight_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixLocalHeight_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixLocalHeight_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixLocalHeight_z(*args)
else: DataExcept()
return localHeight.value
lib.ElDistSparseMatrixNumLocalEntries_i.argtypes = \
lib.ElDistSparseMatrixNumLocalEntries_s.argtypes = \
lib.ElDistSparseMatrixNumLocalEntries_d.argtypes = \
lib.ElDistSparseMatrixNumLocalEntries_c.argtypes = \
lib.ElDistSparseMatrixNumLocalEntries_z.argtypes = \
[c_void_p,POINTER(iType)]
def NumLocalEntries(self):
numLocalEntries = iType()
args = [self.obj,pointer(numLocalEntries)]
if self.tag == iTag: lib.ElDistSparseMatrixNumLocalEntries_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixNumLocalEntries_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixNumLocalEntries_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixNumLocalEntries_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixNumLocalEntries_z(*args)
else: DataExcept()
return numLocalEntries.value
lib.ElDistSparseMatrixCapacity_i.argtypes = \
lib.ElDistSparseMatrixCapacity_s.argtypes = \
lib.ElDistSparseMatrixCapacity_d.argtypes = \
lib.ElDistSparseMatrixCapacity_c.argtypes = \
lib.ElDistSparseMatrixCapacity_z.argtypes = \
[c_void_p,POINTER(iType)]
def Capacity(self):
capacity = iType()
args = [self.obj,pointer(capacity)]
if self.tag == iTag: lib.ElDistSparseMatrixCapacity_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixCapacity_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixCapacity_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixCapacity_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixCapacity_z(*args)
else: DataExcept()
return capacity.value
lib.ElDistSparseMatrixLocallyConsistent_i.argtypes = \
lib.ElDistSparseMatrixLocallyConsistent_s.argtypes = \
lib.ElDistSparseMatrixLocallyConsistent_d.argtypes = \
lib.ElDistSparseMatrixLocallyConsistent_c.argtypes = \
lib.ElDistSparseMatrixLocallyConsistent_z.argtypes = \
[c_void_p,POINTER(bType)]
def LocallyConsistent(self):
consistent = bType()
args = [self.obj,pointer(consistent)]
if self.tag == iTag: lib.ElDistSparseMatrixLocallyConsistent_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixLocallyConsistent_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixLocallyConsistent_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixLocallyConsistent_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixLocallyConsistent_z(*args)
else: DataExcept()
return consistent.value
lib.ElDistSparseMatrixComm_i.argtypes = \
lib.ElDistSparseMatrixComm_s.argtypes = \
lib.ElDistSparseMatrixComm_d.argtypes = \
lib.ElDistSparseMatrixComm_c.argtypes = \
lib.ElDistSparseMatrixComm_z.argtypes = \
[c_void_p,POINTER(mpi.Comm)]
def Comm(self):
comm = mpi.Comm()
args = [self.obj,pointer(comm)]
if self.tag == iTag: lib.ElDistSparseMatrixComm_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixComm_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixComm_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixComm_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixComm_z(*args)
else: DataExcept()
return comm
lib.ElDistSparseMatrixBlocksize_i.argtypes = \
lib.ElDistSparseMatrixBlocksize_s.argtypes = \
lib.ElDistSparseMatrixBlocksize_d.argtypes = \
lib.ElDistSparseMatrixBlocksize_c.argtypes = \
lib.ElDistSparseMatrixBlocksize_z.argtypes = \
[c_void_p,POINTER(iType)]
def Blocksize(self):
blocksize = iType()
args = [self.obj,pointer(blocksize)]
if self.tag == iTag: lib.ElDistSparseMatrixBlocksize_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixBlocksize_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixBlocksize_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixBlocksize_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixBlocksize_z(*args)
else: DataExcept()
return blocksize.value
lib.ElDistSparseMatrixRowOwner_i.argtypes = \
lib.ElDistSparseMatrixRowOwner_s.argtypes = \
lib.ElDistSparseMatrixRowOwner_d.argtypes = \
lib.ElDistSparseMatrixRowOwner_c.argtypes = \
lib.ElDistSparseMatrixRowOwner_z.argtypes = \
[c_void_p,iType,POINTER(c_int)]
def RowOwner(self,i):
owner = c_int()
args = [self.obj,i,pointer(owner)]
if self.tag == iTag: lib.ElDistSparseMatrixRowOwner_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixRowOwner_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixRowOwner_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixRowOwner_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixRowOwner_z(*args)
else: DataExcept()
return owner.value
lib.ElDistSparseMatrixGlobalRow_i.argtypes = \
lib.ElDistSparseMatrixGlobalRow_s.argtypes = \
lib.ElDistSparseMatrixGlobalRow_d.argtypes = \
lib.ElDistSparseMatrixGlobalRow_c.argtypes = \
lib.ElDistSparseMatrixGlobalRow_z.argtypes = \
[c_void_p,iType,POINTER(iType)]
def GlobalRow(self,iLoc):
i = iType()
args = [self.obj,iLoc,pointer(i)]
if self.tag == iTag: lib.ElDistSparseMatrixGlobalRow_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixGlobalRow_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixGlobalRow_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixGlobalRow_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixGlobalRow_z(*args)
else: DataExcept()
return i.value
lib.ElDistSparseMatrixRow_i.argtypes = \
lib.ElDistSparseMatrixRow_s.argtypes = \
lib.ElDistSparseMatrixRow_d.argtypes = \
lib.ElDistSparseMatrixRow_c.argtypes = \
lib.ElDistSparseMatrixRow_z.argtypes = \
[c_void_p,iType,POINTER(iType)]
def Row(self,localInd):
row = iType()
args = [self.obj,localInd,pointer(row)]
if self.tag == iTag: lib.ElDistSparseMatrixRow_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixRow_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixRow_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixRow_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixRow_z(*args)
else: DataExcept()
return row.value
lib.ElDistSparseMatrixCol_i.argtypes = \
lib.ElDistSparseMatrixCol_s.argtypes = \
lib.ElDistSparseMatrixCol_d.argtypes = \
lib.ElDistSparseMatrixCol_c.argtypes = \
lib.ElDistSparseMatrixCol_z.argtypes = \
[c_void_p,iType,POINTER(iType)]
def Col(self,localInd):
col = iType()
args = [self.obj,localInd,pointer(col)]
if self.tag == iTag: lib.ElDistSparseMatrixCol_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixCol_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixCol_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixCol_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixCol_z(*args)
else: DataExcept()
return col.value
lib.ElDistSparseMatrixValue_i.argtypes = [c_void_p,iType,POINTER(iType)]
lib.ElDistSparseMatrixValue_s.argtypes = [c_void_p,iType,POINTER(sType)]
lib.ElDistSparseMatrixValue_d.argtypes = [c_void_p,iType,POINTER(dType)]
lib.ElDistSparseMatrixValue_c.argtypes = [c_void_p,iType,POINTER(cType)]
lib.ElDistSparseMatrixValue_z.argtypes = [c_void_p,iType,POINTER(zType)]
def Value(self,localInd):
value = TagToType(self.tag)()
args = [self.obj,localInd,pointer(value)]
if self.tag == iTag: lib.ElDistSparseMatrixValue_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixValue_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixValue_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixValue_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixValue_z(*args)
else: DataExcept()
return ScalarData(value)
lib.ElDistSparseMatrixRowOffset_i.argtypes = \
lib.ElDistSparseMatrixRowOffset_s.argtypes = \
lib.ElDistSparseMatrixRowOffset_d.argtypes = \
lib.ElDistSparseMatrixRowOffset_c.argtypes = \
lib.ElDistSparseMatrixRowOffset_z.argtypes = \
[c_void_p,iType,POINTER(iType)]
def RowOffset(self,localRow):
offset = iType()
args = [self.obj,localRow,pointer(offset)]
if self.tag == iTag: lib.ElDistSparseMatrixRowOffset_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixRowOffset_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixRowOffset_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixRowOffset_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixRowOffset_z(*args)
else: DataExcept()
return offset.value
lib.ElDistSparseMatrixOffset_i.argtypes = \
lib.ElDistSparseMatrixOffset_s.argtypes = \
lib.ElDistSparseMatrixOffset_d.argtypes = \
lib.ElDistSparseMatrixOffset_c.argtypes = \
lib.ElDistSparseMatrixOffset_z.argtypes = \
[c_void_p,iType,iType,POINTER(iType)]
def Offset(self,localRow,col):
offset = iType()
args = [self.obj,localRow,col,pointer(offset)]
if self.tag == iTag: lib.ElDistSparseMatrixOffset_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixOffset_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixOffset_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixOffset_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixOffset_z(*args)
else: DataExcept()
return offset.value
lib.ElDistSparseMatrixNumConnections_i.argtypes = \
lib.ElDistSparseMatrixNumConnections_s.argtypes = \
lib.ElDistSparseMatrixNumConnections_d.argtypes = \
lib.ElDistSparseMatrixNumConnections_c.argtypes = \
lib.ElDistSparseMatrixNumConnections_z.argtypes = \
[c_void_p,iType,POINTER(iType)]
def NumConnections(self,localRow):
numConnections = iType()
args = [self.obj,localRow,pointer(numConnections)]
if self.tag == iTag: lib.ElDistSparseMatrixNumConnections_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixNumConnections_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixNumConnections_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixNumConnections_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixNumConnections_z(*args)
else: DataExcept()
return numConnections.value
lib.ElDistSparseMatrixImbalance_i.argtypes = \
lib.ElDistSparseMatrixImbalance_s.argtypes = \
lib.ElDistSparseMatrixImbalance_d.argtypes = \
lib.ElDistSparseMatrixImbalance_c.argtypes = \
lib.ElDistSparseMatrixImbalance_z.argtypes = \
[c_void_p,POINTER(dType)]
def Imbalance(self):
imbalance = dType()
args = [self.obj,pointer(imbalance)]
if self.tag == iTag: lib.ElDistSparseMatrixImbalance_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixImbalance_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixImbalance_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixImbalance_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixImbalance_z(*args)
else: DataExcept()
return imbalance.value
lib.ElDistSparseMatrixSourceBuffer_i.argtypes = \
lib.ElDistSparseMatrixSourceBuffer_s.argtypes = \
lib.ElDistSparseMatrixSourceBuffer_d.argtypes = \
lib.ElDistSparseMatrixSourceBuffer_c.argtypes = \
lib.ElDistSparseMatrixSourceBuffer_z.argtypes = \
lib.ElDistSparseMatrixLockedSourceBuffer_i.argtypes = \
lib.ElDistSparseMatrixLockedSourceBuffer_s.argtypes = \
lib.ElDistSparseMatrixLockedSourceBuffer_d.argtypes = \
lib.ElDistSparseMatrixLockedSourceBuffer_c.argtypes = \
lib.ElDistSparseMatrixLockedSourceBuffer_z.argtypes = \
[c_void_p,POINTER(POINTER(iType))]
def SourceBuffer(self,locked=False):
sourceBuf = POINTER(iType)()
args = [self.obj,pointer(sourceBuf)]
if locked:
if self.tag == iTag: lib.ElDistSparseMatrixLockedSourceBuffer_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixLockedSourceBuffer_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixLockedSourceBuffer_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixLockedSourceBuffer_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixLockedSourceBuffer_z(*args)
else: DataExcept()
else:
if self.tag == iTag: lib.ElDistSparseMatrixSourceBuffer_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixSourceBuffer_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixSourceBuffer_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixSourceBuffer_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixSourceBuffer_z(*args)
else: DataExcept()
return sourceBuf
lib.ElDistSparseMatrixTargetBuffer_i.argtypes = \
lib.ElDistSparseMatrixTargetBuffer_s.argtypes = \
lib.ElDistSparseMatrixTargetBuffer_d.argtypes = \
lib.ElDistSparseMatrixTargetBuffer_c.argtypes = \
lib.ElDistSparseMatrixTargetBuffer_z.argtypes = \
lib.ElDistSparseMatrixLockedTargetBuffer_i.argtypes = \
lib.ElDistSparseMatrixLockedTargetBuffer_s.argtypes = \
lib.ElDistSparseMatrixLockedTargetBuffer_d.argtypes = \
lib.ElDistSparseMatrixLockedTargetBuffer_c.argtypes = \
lib.ElDistSparseMatrixLockedTargetBuffer_z.argtypes = \
[c_void_p,POINTER(POINTER(iType))]
def TargetBuffer(self,locked=False):
targetBuf = POINTER(iType)()
args = [self.obj,pointer(targetBuf)]
if locked:
if self.tag == iTag: lib.ElDistSparseMatrixLockedTargetBuffer_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixLockedTargetBuffer_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixLockedTargetBuffer_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixLockedTargetBuffer_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixLockedTargetBuffer_z(*args)
else: DataExcept()
else:
if self.tag == iTag: lib.ElDistSparseMatrixTargetBuffer_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixTargetBuffer_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixTargetBuffer_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixTargetBuffer_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixTargetBuffer_z(*args)
else: DataExcept()
return targetBuf
lib.ElDistSparseMatrixValueBuffer_i.argtypes = \
lib.ElDistSparseMatrixLockedValueBuffer_i.argtypes = \
[c_void_p,POINTER(POINTER(iType))]
lib.ElDistSparseMatrixValueBuffer_s.argtypes = \
lib.ElDistSparseMatrixLockedValueBuffer_s.argtypes = \
[c_void_p,POINTER(POINTER(sType))]
lib.ElDistSparseMatrixValueBuffer_d.argtypes = \
lib.ElDistSparseMatrixLockedValueBuffer_d.argtypes = \
[c_void_p,POINTER(POINTER(dType))]
lib.ElDistSparseMatrixValueBuffer_c.argtypes = \
lib.ElDistSparseMatrixLockedValueBuffer_c.argtypes = \
[c_void_p,POINTER(POINTER(cType))]
lib.ElDistSparseMatrixValueBuffer_z.argtypes = \
lib.ElDistSparseMatrixLockedValueBuffer_z.argtypes = \
[c_void_p,POINTER(POINTER(zType))]
def ValueBuffer(self,locked=False):
valueBuf = POINTER(TagToType(self.tag))()
args = [self.obj,pointer(valueBuf)]
if locked:
if self.tag == iTag: lib.ElDistSparseMatrixLockedValueBuffer_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixLockedValueBuffer_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixLockedValueBuffer_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixLockedValueBuffer_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixLockedValueBuffer_z(*args)
else: DataExcept()
else:
if self.tag == iTag: lib.ElDistSparseMatrixValueBuffer_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixValueBuffer_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixValueBuffer_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixValueBuffer_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixValueBuffer_z(*args)
else: DataExcept()
return valueBuf
lib.ElGetContigSubmatrixDistSparse_i.argtypes = \
lib.ElGetContigSubmatrixDistSparse_s.argtypes = \
lib.ElGetContigSubmatrixDistSparse_d.argtypes = \
lib.ElGetContigSubmatrixDistSparse_c.argtypes = \
lib.ElGetContigSubmatrixDistSparse_z.argtypes = \
[c_void_p,IndexRange,IndexRange,c_void_p]
def __getitem__(self,indTup):
iInd, jInd = indTup
if isinstance(iInd,slice):
if iInd.start == None:
iInd = slice(0,iInd.stop,iInd.step)
if iInd.stop == None:
iInd = slice(iInd.start,self.Height(),iInd.step)
if isinstance(jInd,slice):
if jInd.start == None:
jInd = slice(0,jInd.stop,jInd.step)
if jInd.stop == None:
jInd = slice(jInd.start,self.Width(),jInd.step)
iRan = IndexRange(iInd)
jRan = IndexRange(jInd)
ASub = DistSparseMatrix(self.tag,self.Comm())
args = [self.obj,iRan,jRan,ASub.obj]
if self.tag == iTag: lib.ElGetContigSubmatrixDistSparse_i(*args)
elif self.tag == sTag: lib.ElGetContigSubmatrixDistSparse_s(*args)
elif self.tag == dTag: lib.ElGetContigSubmatrixDistSparse_d(*args)
elif self.tag == cTag: lib.ElGetContigSubmatrixDistSparse_c(*args)
elif self.tag == zTag: lib.ElGetContigSubmatrixDistSparse_z(*args)
else: DataExcept()
return ASub
|
the-stack_106_13179
|
"""
Given a list of non negative integers, arrange them such that they form the largest number.
For example:
Given [3, 30, 34, 5, 9], the largest formed number is 9534330.
Note: The result may be very large, so you need to return a string instead of an integer.
3034
3430
"""
class Solution:
def concat_ints(self, x, y):
digits = len(str(y))
x = x * (10**digits)
return x + y
def is_larger(self, x, y):
if x < 10 and y < 10:
return x > y
x_concat = self.concat_ints(x, y)
y_concat = self.concat_ints(y, x)
return x_concat > y_concat
def sort(self, A):
if len(A) > 1:
mid = len(A) // 2
L = A[:mid]
R = A[mid:]
self.sort(L)
self.sort(R)
i = j = k = 0
while i < len(L) and j < len(R):
if self.is_larger(L[i], R[j]):
A[k] = L[i]
i += 1
else:
A[k] = R[j]
j += 1
k += 1
while i < len(L):
A[k] = L[i]
i += 1
k += 1
while j < len(R):
A[k] = R[j]
j += 1
k += 1
# @param A : tuple of integers
# @return a strings
def largestNumber(self, A):
if sum(A) == 0:
return "0"
self.sort(A)
return "".join(map(str, A))
s = Solution()
a = [3, 30, 34, 5, 9]
print(s.largestNumber(a))
a = [8, 89]
print(s.largestNumber(a))
a = [989]
print(s.largestNumber(a))
|
the-stack_106_13181
|
__author__ = 'Jason'
import unittest
from icalendar import Event
from cal_tools import model
from cal_tools.model import EventModel
class EventModelTests(unittest.TestCase):
def test_event_is_instance_of_event_class(self):
# Act.
with self.assertRaises(AssertionError) as ex:
EventModel(None)
def test_event(self):
# Arrange.
ical_str = (
'BEGIN:VEVENT\r\n'
'END:VEVENT\r\n'
)
event = Event.from_ical(ical_str)
# Act.
with self.assertRaises(AssertionError) as ex:
event_model = EventModel(event)
# Assert.
self.assertEqual(
ex.exception.args[0],
model.REQ_PROP_MISSING.format('uid'))
def test_minimal_event(self):
ical_str = (
'BEGIN:VEVENT\r\n'
'UID:[email protected]\r\n'
'DTSTAMP:19970610T172345Z\r\n'
'END:VEVENT\r\n'
)
event_model = EventModel(ical_str)
|
the-stack_106_13183
|
# qubit number=4
# total number=40
import cirq
import qiskit
from qiskit import IBMQ
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.cx(input_qubit[0],input_qubit[3]) # number=13
prog.cx(input_qubit[0],input_qubit[3]) # number=17
prog.x(input_qubit[3]) # number=18
prog.cx(input_qubit[0],input_qubit[3]) # number=19
prog.cx(input_qubit[0],input_qubit[3]) # number=15
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=12
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=37
prog.cz(input_qubit[0],input_qubit[3]) # number=38
prog.h(input_qubit[3]) # number=39
prog.x(input_qubit[3]) # number=28
prog.h(input_qubit[3]) # number=30
prog.cz(input_qubit[0],input_qubit[3]) # number=31
prog.h(input_qubit[3]) # number=32
prog.h(input_qubit[0]) # number=33
prog.cz(input_qubit[3],input_qubit[0]) # number=34
prog.rx(0.33300882128051834,input_qubit[2]) # number=36
prog.h(input_qubit[0]) # number=35
prog.cx(input_qubit[3],input_qubit[0]) # number=23
prog.z(input_qubit[3]) # number=24
prog.cx(input_qubit[3],input_qubit[0]) # number=25
prog.cx(input_qubit[3],input_qubit[0]) # number=22
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.y(input_qubit[2]) # number=10
prog.y(input_qubit[2]) # number=11
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 2 and not x.configuration().simulator and x.status().operational == True))
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_QC2562.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
the-stack_106_13185
|
'''
Copyright 2019, David Pierce Walker-Howell, All rights reserved
Author: David Pierce Walker-Howell<[email protected]>
Last Modified 07/05/2019
Description: This widget control the mission selection and control.
'''
import os
import sys
PARAM_PATH = os.path.join("..", "..", "Sub", "Src", "Params")
sys.path.append(PARAM_PATH)
MECHOS_CONFIG_FILE_PATH = os.path.join(PARAM_PATH, "mechos_network_configs.txt")
from mechos_network_configs import MechOS_Network_Configs
from PyQt5.QtWidgets import QWidget, QApplication, QGridLayout, QLineEdit, QLabel, QVBoxLayout
from PyQt5 import uic
from PyQt5.QtGui import QColor
from PyQt5.QtCore import Qt, QTimer
from MechOS import mechos
from MechOS.simple_messages.bool import Bool
import struct
class Mission_Planner_Widget(QWidget):
'''
Widget for mission planning. Select which missions to run.
'''
def __init__(self):
'''
Initialize the mission planning widget.
Parameters:
N/A
Returns:
N/A
'''
QWidget.__init__(self)
#Get mechos network configurations
configs = MechOS_Network_Configs(MECHOS_CONFIG_FILE_PATH)._get_network_parameters()
#MechOS parameter server (this is where we will save the waypoint file)
self.param_serv = mechos.Parameter_Server_Client(configs["param_ip"], configs["param_port"])
self.param_serv.use_parameter_database(configs["param_server_path"])
#Call in the ui for mission select
self.mission_select_widget = uic.loadUi("mission_select.ui", self)
self.mission_select_node = mechos.Node("MISSION_SELECT_GUI", '192.168.1.2', '192.168.1.14')
self.update_mission_info_publisher = self.mission_select_node.create_publisher("MISSON_SELECT", Bool(), protocol="tcp")
#Connect the mission select button to update the mission in the parameter
#server and tell the mission commander that the mission file has changed.
self.mission_select_widget.save_mission_btn.clicked.connect(self._update_mission_file)
currently_set_mission_file = self.param_serv.get_param("Missions/mission_file")
self.mission_select_widget.mission_file_line_edit.setText(currently_set_mission_file)
self._update_mission_file()
self.linking_layout = QGridLayout(self)
self.setLayout(self.linking_layout)
self.setMinimumSize(449, 330)
def _update_mission_file(self):
'''
If the Save Mission File button is pressed. Update the parameter server
with that mission file and tell the sub that the mission file has changed.
Parameters:
N/A
Returns:
N/A
'''
mission_file = self.mission_select_widget.mission_file_line_edit.text()
#Print information to the text edit box
self.mission_select_widget.mission_info_text_edit.append("[INFO]: Setting Current Mission to:")
self.mission_select_widget.mission_info_text_edit.append("\t%s" % mission_file)
self.param_serv.set_param("Missions/mission_file", mission_file)
#Tell the sub to update it's mission information
self.update_mission_info_publisher.publish(True)
if __name__ == "__main__":
main_app = QApplication([])
main_app.setStyle('Fusion')
main_widget = Mission_Planner_Widget()
sys.exit(main_app.exec_())
|
the-stack_106_13187
|
# Copyright (c) 2015-2018 Cisco Systems, Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""Ansible Provisioner Module."""
import copy
import collections
import os
import shutil
from molecule import logger
from molecule import util
from molecule.api import drivers
from molecule.provisioner import base
from molecule.provisioner import ansible_playbook
from molecule.provisioner import ansible_playbooks
LOG = logger.get_logger(__name__)
class Ansible(base.Base):
"""
`Ansible`_ is the default provisioner. No other provisioner will be \
supported.
Molecule's provisioner manages the instances lifecycle. However, the user
must provide the create, destroy, and converge playbooks. Molecule's
``init`` subcommand will provide the necessary files for convenience.
Molecule will skip tasks which are tagged with either `molecule-notest` or
`notest`. With the tag `molecule-idempotence-notest` tasks are only
skipped during the idempotence action step.
.. important::
Reserve the create and destroy playbooks for provisioning. Do not
attempt to gather facts or perform operations on the provisioned nodes
inside these playbooks. Due to the gymnastics necessary to sync state
between Ansible and Molecule, it is best to perform these tasks in the
prepare or converge playbooks.
It is the developers responsiblity to properly map the modules's fact
data into the instance_conf_dict fact in the create playbook. This
allows Molecule to properly configure Ansible inventory.
Additional options can be passed to ``ansible-playbook`` through the options
dict. Any option set in this section will override the defaults.
.. important::
Options do not affect the create and destroy actions.
.. note::
Molecule will remove any options matching '^[v]+$', and pass ``-vvv``
to the underlying ``ansible-playbook`` command when executing
`molecule --debug`.
Molecule will silence log output, unless invoked with the ``--debug`` flag.
However, this results in quite a bit of output. To enable Ansible log
output, add the following to the ``provisioner`` section of ``molecule.yml``.
.. code-block:: yaml
provisioner:
name: ansible
log: True
The create/destroy playbooks for Docker and Podman are bundled with
Molecule. These playbooks have a clean API from `molecule.yml`, and
are the most commonly used. The bundled playbooks can still be overridden.
The playbook loading order is:
1. provisioner.playbooks.$driver_name.$action
2. provisioner.playbooks.$action
3. bundled_playbook.$driver_name.$action
.. code-block:: yaml
provisioner:
name: ansible
options:
vvv: True
playbooks:
create: create.yml
converge: converge.yml
destroy: destroy.yml
Share playbooks between roles.
.. code-block:: yaml
provisioner:
name: ansible
playbooks:
create: ../default/create.yml
destroy: ../default/destroy.yml
converge: converge.yml
Multiple driver playbooks. In some situations a developer may choose to
test the same role against different backends. Molecule will choose driver
specific create/destroy playbooks, if the determined driver has a key in
the playbooks section of the provisioner's dict.
.. important::
If the determined driver has a key in the playbooks dict, Molecule will
use this dict to resolve all provisioning playbooks (create/destroy).
.. code-block:: yaml
provisioner:
name: ansible
playbooks:
docker:
create: create.yml
destroy: destroy.yml
create: create.yml
destroy: destroy.yml
converge: converge.yml
.. important::
Paths in this section are converted to absolute paths, where the
relative parent is the $scenario_directory.
The side effect playbook executes actions which produce side effects to the
instances(s). Intended to test HA failover scenarios or the like. It is
not enabled by default. Add the following to the provisioner's ``playbooks``
section to enable.
.. code-block:: yaml
provisioner:
name: ansible
playbooks:
side_effect: side_effect.yml
.. important::
This feature should be considered experimental.
The prepare playbook executes actions which bring the system to a given
state prior to converge. It is executed after create, and only once for
the duration of the instances life.
This can be used to bring instances into a particular state, prior to
testing.
.. code-block:: yaml
provisioner:
name: ansible
playbooks:
prepare: prepare.yml
The cleanup playbook is for cleaning up test infrastructure that may not
be present on the instance that will be destroyed. The primary use-case
is for "cleaning up" changes that were made outside of Molecule's test
environment. For example, remote database connections or user accounts.
Intended to be used in conjunction with `prepare` to modify external
resources when required.
The cleanup step is executed directly before every destroy step. Just like
the destroy step, it will be run twice. An initial clean before converge
and then a clean before the last destroy step. This means that the cleanup
playbook must handle failures to cleanup resources which have not
been created yet.
Add the following to the provisioner's `playbooks` section
to enable.
.. code-block:: yaml
provisioner:
name: ansible
playbooks:
cleanup: cleanup.yml
.. important::
This feature should be considered experimental.
Environment variables. Molecule does its best to handle common Ansible
paths. The defaults are as follows.
::
ANSIBLE_ROLES_PATH:
$ephemeral_directory/roles/:$project_directory/../:~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles
ANSIBLE_LIBRARY:
$ephemeral_directory/modules/:$project_directory/library/:~/.ansible/plugins/modules:/usr/share/ansible/plugins/modules
ANSIBLE_FILTER_PLUGINS:
$ephemeral_directory/plugins/filter/:$project_directory/filter/plugins/:~/.ansible/plugins/filter:/usr/share/ansible/plugins/modules
Environment variables can be passed to the provisioner. Variables in this
section which match the names above will be appened to the above defaults,
and converted to absolute paths, where the relative parent is the
$scenario_directory.
.. important::
Paths in this section are converted to absolute paths, where the
relative parent is the $scenario_directory.
.. code-block:: yaml
provisioner:
name: ansible
env:
FOO: bar
Modifying ansible.cfg.
.. code-block:: yaml
provisioner:
name: ansible
config_options:
defaults:
fact_caching: jsonfile
ssh_connection:
scp_if_ssh: True
.. important::
The following keys are disallowed to prevent Molecule from
improperly functioning. They can be specified through the
provisioner's env setting described above, with the exception
of the `privilege_escalation`.
.. code-block:: yaml
provisioner:
name: ansible
config_options:
defaults:
roles_path: /path/to/roles_path
library: /path/to/library
filter_plugins: /path/to/filter_plugins
privilege_escalation: {}
Roles which require host/groups to have certain variables set. Molecule
uses the same `variables defined in a playbook`_ syntax as `Ansible`_.
.. code-block:: yaml
provisioner:
name: ansible
inventory:
group_vars:
foo1:
foo: bar
foo2:
foo: bar
baz:
qux: zzyzx
host_vars:
foo1-01:
foo: bar
Molecule automatically generates the inventory based on the hosts defined
under `Platforms`_. Using the ``hosts`` key allows to add extra hosts to
the inventory that are not managed by Molecule.
A typical use case is if you want to access some variables from another
host in the inventory (using hostvars) without creating it.
.. note::
The content of ``hosts`` should follow the YAML based inventory syntax:
start with the ``all`` group and have hosts/vars/children entries.
.. code-block:: yaml
provisioner:
name: ansible
inventory:
hosts:
all:
extra_host:
foo: hello
.. important::
The extra hosts added to the inventory using this key won't be
created/destroyed by Molecule. It is the developers responsibility
to target the proper hosts in the playbook. Only the hosts defined
under `Platforms`_ should be targetted instead of ``all``.
An alternative to the above is symlinking. Molecule creates symlinks to
the specified directory in the inventory directory. This allows ansible to
converge utilizing its built in host/group_vars resolution. These two
forms of inventory management are mutually exclusive.
Like above, it is possible to pass an additional inventory file
(or even dynamic inventory script), using the ``hosts`` key. `Ansible`_ will
automatically merge this inventory with the one generated by molecule.
This can be useful if you want to define extra hosts that are not managed
by Molecule.
.. important::
Again, it is the developers responsibility to target the proper hosts
in the playbook. Only the hosts defined under
`Platforms`_ should be targetted instead of ``all``.
.. note::
The source directory linking is relative to the scenario's
directory.
The only valid keys are ``hosts``, ``group_vars`` and ``host_vars``. Molecule's
schema validator will enforce this.
.. code-block:: yaml
provisioner:
name: ansible
inventory:
links:
hosts: ../../../inventory/hosts
group_vars: ../../../inventory/group_vars/
host_vars: ../../../inventory/host_vars/
Override connection options:
.. code-block:: yaml
provisioner:
name: ansible
connection_options:
ansible_ssh_user: foo
ansible_ssh_common_args: -o IdentitiesOnly=no
.. _`variables defined in a playbook`: https://docs.ansible.com/ansible/latest/user_guide/playbooks_variables.html#defining-variables-in-a-playbook
Add arguments to ansible-playbook when running converge:
.. code-block:: yaml
provisioner:
name: ansible
ansible_args:
- --inventory=mygroups.yml
- --limit=host1,host2
""" # noqa
def __init__(self, config):
"""
Initialize a new ansible class and returns None.
:param config: An instance of a Molecule config.
:return: None
"""
super(Ansible, self).__init__(config)
@property
def default_config_options(self):
"""
Provide Default options to construct ansible.cfg and returns a dict.
:return: dict
"""
return {
"defaults": {
"ansible_managed": "Ansible managed: Do NOT edit this file manually!",
"display_failed_stderr": True,
"forks": 50,
"retry_files_enabled": False,
"host_key_checking": False,
"nocows": 1,
"interpreter_python": "auto",
},
"ssh_connection": {
"scp_if_ssh": True,
"control_path": "%(directory)s/%%h-%%p-%%r",
},
}
@property
def default_options(self):
d = {"skip-tags": "molecule-notest,notest"}
if self._config.action == "idempotence":
d["skip-tags"] += ",molecule-idempotence-notest"
if self._config.debug:
d["vvv"] = True
d["diff"] = True
return d
@property
def default_env(self):
# Finds if the current project is part of an ansible_collections hierarchy
collection_indicator = "ansible_collections"
collections_paths_list = [
util.abs_path(
os.path.join(self._config.scenario.ephemeral_directory, "collections")
)
]
if collection_indicator in self._config.project_directory:
collection_path, right = self._config.project_directory.split(
collection_indicator
)
collections_paths_list.append(util.abs_path(collection_path))
collections_paths_list.extend(
[
util.abs_path(os.path.join(os.path.expanduser("~"), ".ansible")),
"/usr/share/ansible/collections",
"/etc/ansible/collections",
]
)
env = util.merge_dicts(
os.environ,
{
"ANSIBLE_CONFIG": self._config.provisioner.config_file,
"ANSIBLE_ROLES_PATH": ":".join(
[
util.abs_path(
os.path.join(
self._config.scenario.ephemeral_directory, "roles"
)
),
util.abs_path(
os.path.join(self._config.project_directory, os.path.pardir)
),
util.abs_path(
os.path.join(os.path.expanduser("~"), ".ansible", "roles")
),
"/usr/share/ansible/roles",
"/etc/ansible/roles",
]
),
"ANSIBLE_COLLECTIONS_PATHS": ":".join(collections_paths_list),
"ANSIBLE_LIBRARY": ":".join(self._get_modules_directories()),
"ANSIBLE_FILTER_PLUGINS": ":".join(
[
self._get_filter_plugin_directory(),
util.abs_path(
os.path.join(
self._config.scenario.ephemeral_directory,
"plugins",
"filter",
)
),
util.abs_path(
os.path.join(
self._config.project_directory, "plugins", "filter"
)
),
util.abs_path(
os.path.join(
os.path.expanduser("~"), ".ansible", "plugins", "filter"
)
),
"/usr/share/ansible/plugins/filter",
]
),
},
)
env = util.merge_dicts(env, self._config.env)
return env
@property
def name(self):
return self._config.config["provisioner"]["name"]
@property
def ansible_args(self):
return self._config.config["provisioner"]["ansible_args"]
@property
def config_options(self):
return util.merge_dicts(
self.default_config_options,
self._config.config["provisioner"]["config_options"],
)
@property
def options(self):
if self._config.action in ["create", "destroy"]:
return self.default_options
o = self._config.config["provisioner"]["options"]
# NOTE(retr0h): Remove verbose options added by the user while in
# debug.
if self._config.debug:
o = util.filter_verbose_permutation(o)
return util.merge_dicts(self.default_options, o)
@property
def env(self):
default_env = self.default_env
env = self._config.config["provisioner"]["env"].copy()
# ensure that all keys and values are strings
env = {str(k): str(v) for k, v in env.items()}
roles_path = default_env["ANSIBLE_ROLES_PATH"]
library_path = default_env["ANSIBLE_LIBRARY"]
filter_plugins_path = default_env["ANSIBLE_FILTER_PLUGINS"]
try:
path = self._absolute_path_for(env, "ANSIBLE_ROLES_PATH")
roles_path = "{}:{}".format(roles_path, path)
except KeyError:
pass
try:
path = self._absolute_path_for(env, "ANSIBLE_LIBRARY")
library_path = "{}:{}".format(library_path, path)
except KeyError:
pass
try:
path = self._absolute_path_for(env, "ANSIBLE_FILTER_PLUGINS")
filter_plugins_path = "{}:{}".format(filter_plugins_path, path)
except KeyError:
pass
env["ANSIBLE_ROLES_PATH"] = roles_path
env["ANSIBLE_LIBRARY"] = library_path
env["ANSIBLE_FILTER_PLUGINS"] = filter_plugins_path
return util.merge_dicts(default_env, env)
@property
def hosts(self):
return self._config.config["provisioner"]["inventory"]["hosts"]
@property
def host_vars(self):
return self._config.config["provisioner"]["inventory"]["host_vars"]
@property
def group_vars(self):
return self._config.config["provisioner"]["inventory"]["group_vars"]
@property
def links(self):
return self._config.config["provisioner"]["inventory"]["links"]
@property
def inventory(self):
"""
Create an inventory structure and returns a dict.
.. code-block:: yaml
ungrouped:
vars:
foo: bar
hosts:
instance-1:
instance-2:
children:
$child_group_name:
hosts:
instance-1:
instance-2:
$group_name:
hosts:
instance-1:
ansible_connection: docker
instance-2:
ansible_connection: docker
:return: str
"""
dd = self._vivify()
for platform in self._config.platforms.instances:
for group in platform.get("groups", ["ungrouped"]):
instance_name = platform["name"]
connection_options = self.connection_options(instance_name)
molecule_vars = {
"molecule_file": "{{ lookup('env', 'MOLECULE_FILE') }}",
"molecule_ephemeral_directory": "{{ lookup('env', 'MOLECULE_EPHEMERAL_DIRECTORY') }}",
"molecule_scenario_directory": "{{ lookup('env', 'MOLECULE_SCENARIO_DIRECTORY') }}",
"molecule_yml": "{{ lookup('file', molecule_file) | molecule_from_yaml }}",
"molecule_instance_config": "{{ lookup('env', 'MOLECULE_INSTANCE_CONFIG') }}",
"molecule_no_log": "{{ lookup('env', 'MOLECULE_NO_LOG') or not "
"molecule_yml.provisioner.log|default(False) | bool }}",
}
# All group
dd["all"]["hosts"][instance_name] = connection_options
dd["all"]["vars"] = molecule_vars
# Named group
dd[group]["hosts"][instance_name] = connection_options
dd[group]["vars"] = molecule_vars
# Ungrouped
dd["ungrouped"]["vars"] = {}
# Children
for child_group in platform.get("children", []):
dd[group]["children"][child_group]["hosts"][
instance_name
] = connection_options
return self._default_to_regular(dd)
@property
def inventory_directory(self):
return self._config.scenario.inventory_directory
@property
def inventory_file(self):
return os.path.join(self.inventory_directory, "ansible_inventory.yml")
@property
def config_file(self):
return os.path.join(self._config.scenario.ephemeral_directory, "ansible.cfg")
@property
@util.lru_cache()
def playbooks(self):
return ansible_playbooks.AnsiblePlaybooks(self._config)
@property
def directory(self):
return os.path.join(
os.path.dirname(__file__),
os.path.pardir,
os.path.pardir,
"molecule",
"provisioner",
"ansible",
)
def cleanup(self):
"""
Execute `ansible-playbook` against the cleanup playbook and returns \
None.
:return: None
"""
pb = self._get_ansible_playbook(self.playbooks.cleanup)
pb.execute()
def connection_options(self, instance_name):
d = self._config.driver.ansible_connection_options(instance_name)
return util.merge_dicts(
d, self._config.config["provisioner"]["connection_options"]
)
def check(self):
"""
Execute ``ansible-playbook`` against the converge playbook with the \
``--check`` flag and returns None.
:return: None
"""
pb = self._get_ansible_playbook(self.playbooks.converge)
pb.add_cli_arg("check", True)
pb.execute()
def converge(self, playbook=None, **kwargs):
"""
Execute ``ansible-playbook`` against the converge playbook unless \
specified otherwise and returns a string.
:param playbook: An optional string containing an absolute path to a
playbook.
:param kwargs: An optional keyword arguments.
:return: str
"""
pb = self._get_ansible_playbook(playbook or self.playbooks.converge, **kwargs)
return pb.execute()
def destroy(self):
"""
Execute ``ansible-playbook`` against the destroy playbook and returns \
None.
:return: None
"""
pb = self._get_ansible_playbook(self.playbooks.destroy)
pb.execute()
def side_effect(self):
"""
Execute ``ansible-playbook`` against the side_effect playbook and \
returns None.
:return: None
"""
pb = self._get_ansible_playbook(self.playbooks.side_effect)
pb.execute()
def create(self):
"""
Execute ``ansible-playbook`` against the create playbook and returns \
None.
:return: None
"""
pb = self._get_ansible_playbook(self.playbooks.create)
pb.execute()
def prepare(self):
"""
Execute ``ansible-playbook`` against the prepare playbook and returns \
None.
:return: None
"""
pb = self._get_ansible_playbook(self.playbooks.prepare)
pb.execute()
def syntax(self):
"""
Execute ``ansible-playbook`` against the converge playbook with the \
``-syntax-check`` flag and returns None.
:return: None
"""
pb = self._get_ansible_playbook(self.playbooks.converge)
pb.add_cli_arg("syntax-check", True)
pb.execute()
def verify(self):
"""
Execute ``ansible-playbook`` against the verify playbook and returns \
None.
:return: None
"""
if not self.playbooks.verify:
LOG.warning("Skipping, verify playbook not configured.")
return
pb = self._get_ansible_playbook(self.playbooks.verify)
pb.execute()
def write_config(self):
"""
Write the provisioner's config file to disk and returns None.
:return: None
"""
template = util.render_template(
self._get_config_template(), config_options=self.config_options
)
util.write_file(self.config_file, template)
def manage_inventory(self):
"""
Manage inventory for Ansible and returns None.
:returns: None
"""
self._write_inventory()
self._remove_vars()
if not self.links:
self._add_or_update_vars()
else:
self._link_or_update_vars()
def abs_path(self, path):
return util.abs_path(os.path.join(self._config.scenario.directory, path))
def _add_or_update_vars(self):
"""
Create host and/or group vars and returns None.
:returns: None
"""
# Create the hosts extra inventory source (only if not empty)
hosts_file = os.path.join(self.inventory_directory, "hosts")
if self.hosts:
util.write_file(hosts_file, util.safe_dump(self.hosts))
# Create the host_vars and group_vars directories
for target in ["host_vars", "group_vars"]:
if target == "host_vars":
vars_target = copy.deepcopy(self.host_vars)
for instance_name, _ in self.host_vars.items():
instance_key = instance_name
vars_target[instance_key] = vars_target.pop(instance_name)
elif target == "group_vars":
vars_target = self.group_vars
if vars_target:
target_vars_directory = os.path.join(self.inventory_directory, target)
if not os.path.isdir(util.abs_path(target_vars_directory)):
os.mkdir(util.abs_path(target_vars_directory))
for target in vars_target.keys():
target_var_content = vars_target[target]
path = os.path.join(util.abs_path(target_vars_directory), target)
util.write_file(path, util.safe_dump(target_var_content))
def _write_inventory(self):
"""
Write the provisioner's inventory file to disk and returns None.
:return: None
"""
self._verify_inventory()
util.write_file(self.inventory_file, util.safe_dump(self.inventory))
def _remove_vars(self):
"""
Remove hosts/host_vars/group_vars and returns None.
:returns: None
"""
for name in ("hosts", "group_vars", "host_vars"):
d = os.path.join(self.inventory_directory, name)
if os.path.islink(d) or os.path.isfile(d):
os.unlink(d)
elif os.path.isdir(d):
shutil.rmtree(d)
def _link_or_update_vars(self):
"""
Create or updates the symlink to group_vars and returns None.
:returns: None
"""
for d, source in self.links.items():
target = os.path.join(self.inventory_directory, d)
source = os.path.join(self._config.scenario.directory, source)
if not os.path.exists(source):
msg = "The source path '{}' does not exist.".format(source)
util.sysexit_with_message(msg)
msg = "Inventory {} linked to {}".format(source, target)
LOG.info(msg)
os.symlink(source, target)
def _get_ansible_playbook(self, playbook, **kwargs):
"""
Get an instance of AnsiblePlaybook and returns it.
:param playbook: A string containing an absolute path to a
provisioner's playbook.
:param kwargs: An optional keyword arguments.
:return: object
"""
return ansible_playbook.AnsiblePlaybook(playbook, self._config, **kwargs)
def _verify_inventory(self):
"""
Verify the inventory is valid and returns None.
:return: None
"""
if not self.inventory:
msg = "Instances missing from the 'platform' " "section of molecule.yml."
util.sysexit_with_message(msg)
def _get_config_template(self):
"""
Return a config template string.
:return: str
"""
return """
{% for section, section_dict in config_options.items() -%}
[{{ section }}]
{% for k, v in section_dict.items() -%}
{{ k }} = {{ v }}
{% endfor -%}
{% endfor -%}
""".strip()
def _vivify(self):
"""
Return an autovivification default dict.
:return: dict
"""
return collections.defaultdict(self._vivify)
def _default_to_regular(self, d):
if isinstance(d, collections.defaultdict):
d = {k: self._default_to_regular(v) for k, v in d.items()}
return d
def _get_plugin_directory(self):
return os.path.join(self.directory, "plugins")
def _get_modules_directories(self):
"""Return list of ansilbe module includes directories.
Adds modules directory from molecule and its plugins.
"""
paths = [util.abs_path(os.path.join(self._get_plugin_directory(), "modules"))]
for d in drivers():
p = d.modules_dir()
if p:
paths.append(p)
paths.extend(
[
util.abs_path(
os.path.join(self._config.scenario.ephemeral_directory, "library")
),
util.abs_path(os.path.join(self._config.project_directory, "library")),
util.abs_path(
os.path.join(
os.path.expanduser("~"), ".ansible", "plugins", "modules",
)
),
"/usr/share/ansible/plugins/modules",
]
)
return paths
def _get_filter_plugin_directory(self):
return util.abs_path(os.path.join(self._get_plugin_directory(), "filter"))
def _absolute_path_for(self, env, key):
return ":".join([self.abs_path(p) for p in env[key].split(":")])
|
the-stack_106_13188
|
#!/usr/bin/env python3
"""
Script for generating doxygen output
"""
def root_path():
import os
path = os.path.realpath(__file__) # ./tools/run-doxygen.py
path = os.path.dirname(path) # ./tools/
path = os.path.dirname(path) # ./
return path
if __name__ == "__main__":
import subprocess
import os
doxyfile_path = os.path.join(root_path(),".codedocs")
subprocess.run(["doxygen", doxyfile_path],
cwd=root_path(),
check=True)
|
the-stack_106_13191
|
import pytest
import numpy as np
from sympy import Symbol, Min
import pickle
from conftest import skipif
from devito import (Constant, Eq, Function, TimeFunction, SparseFunction, Grid,
Dimension, SubDimension, ConditionalDimension, IncrDimension,
TimeDimension, SteppingDimension, Operator, ShiftedDimension)
from devito.data import LEFT, OWNED
from devito.mpi.halo_scheme import Halo
from devito.mpi.routines import (MPIStatusObject, MPIMsgEnriched, MPIRequestObject,
MPIRegion)
from devito.types import (Array, CustomDimension, Symbol as dSymbol, Scalar,
PointerArray, Lock, STDThreadArray, SharedData, Timer)
from devito.symbolics import (IntDiv, ListInitializer, FieldFromPointer,
FunctionFromPointer, DefFunction)
from examples.seismic import (demo_model, AcquisitionGeometry,
TimeAxis, RickerSource, Receiver)
def test_constant():
c = Constant(name='c')
assert c.data == 0.
c.data = 1.
pkl_c = pickle.dumps(c)
new_c = pickle.loads(pkl_c)
# .data is initialized, so it should have been pickled too
assert np.all(c.data == 1.)
assert np.all(new_c.data == 1.)
def test_dimension():
d = Dimension(name='d')
pkl_d = pickle.dumps(d)
new_d = pickle.loads(pkl_d)
assert d.name == new_d.name
assert d.dtype == new_d.dtype
def test_function():
grid = Grid(shape=(3, 3, 3))
f = Function(name='f', grid=grid)
f.data[0] = 1.
pkl_f = pickle.dumps(f)
new_f = pickle.loads(pkl_f)
# .data is initialized, so it should have been pickled too
assert np.all(f.data[0] == 1.)
assert np.all(new_f.data[0] == 1.)
assert f.space_order == new_f.space_order
assert f.dtype == new_f.dtype
assert f.shape == new_f.shape
def test_sparse_function():
grid = Grid(shape=(3,))
sf = SparseFunction(name='sf', grid=grid, npoint=3, space_order=2,
coordinates=[(0.,), (1.,), (2.,)])
sf.data[0] = 1.
pkl_sf = pickle.dumps(sf)
new_sf = pickle.loads(pkl_sf)
# .data is initialized, so it should have been pickled too
assert np.all(sf.data[0] == 1.)
assert np.all(new_sf.data[0] == 1.)
# coordinates should also have been pickled
assert np.all(sf.coordinates.data == new_sf.coordinates.data)
assert sf.space_order == new_sf.space_order
assert sf.dtype == new_sf.dtype
assert sf.npoint == new_sf.npoint
def test_internal_symbols():
s = dSymbol(name='s', dtype=np.float32)
pkl_s = pickle.dumps(s)
new_s = pickle.loads(pkl_s)
assert new_s.name == s.name
assert new_s.dtype is np.float32
s = Scalar(name='s', dtype=np.int32, is_const=True)
pkl_s = pickle.dumps(s)
new_s = pickle.loads(pkl_s)
assert new_s.name == s.name
assert new_s.dtype is np.int32
assert new_s.is_const is True
s = Scalar(name='s', nonnegative=True)
pkl_s = pickle.dumps(s)
new_s = pickle.loads(pkl_s)
assert new_s.name == s.name
assert new_s.assumptions0['nonnegative'] is True
def test_array():
grid = Grid(shape=(3, 3))
d = Dimension(name='d')
a = Array(name='a', dimensions=grid.dimensions, dtype=np.int32, halo=((1, 1), (2, 2)),
padding=((2, 2), (2, 2)), space='remote', scope='stack', sharing='local')
pkl_a = pickle.dumps(a)
new_a = pickle.loads(pkl_a)
assert new_a.name == a.name
assert new_a.dtype is np.int32
assert new_a.dimensions[0].name == 'x'
assert new_a.dimensions[1].name == 'y'
assert new_a.halo == ((1, 1), (2, 2))
assert new_a.padding == ((2, 2), (2, 2))
assert new_a.space == 'remote'
assert new_a.scope == 'stack'
assert new_a.sharing == 'local'
# Now with a pointer array
pa = PointerArray(name='pa', dimensions=d, array=a)
pkl_pa = pickle.dumps(pa)
new_pa = pickle.loads(pkl_pa)
assert new_pa.name == pa.name
assert new_pa.dim.name == 'd'
assert new_pa.array.name == 'a'
def test_sub_dimension():
di = SubDimension.middle('di', Dimension(name='d'), 1, 1)
pkl_di = pickle.dumps(di)
new_di = pickle.loads(pkl_di)
assert di.name == new_di.name
assert di.dtype == new_di.dtype
assert di.parent == new_di.parent
assert di._thickness == new_di._thickness
assert di._interval == new_di._interval
def test_conditional_dimension():
d = Dimension(name='d')
s = Scalar(name='s')
cd = ConditionalDimension(name='ci', parent=d, factor=4, condition=s > 3)
pkl_cd = pickle.dumps(cd)
new_cd = pickle.loads(pkl_cd)
assert cd.name == new_cd.name
assert cd.parent == new_cd.parent
assert cd.factor == new_cd.factor
assert cd.condition == new_cd.condition
def test_incr_dimension():
s = Scalar(name='s')
d = Dimension(name='d')
dd = IncrDimension('dd', d, s, 5, 2)
pkl_dd = pickle.dumps(dd)
new_dd = pickle.loads(pkl_dd)
assert dd.name == new_dd.name
assert dd.parent == new_dd.parent
assert dd.symbolic_min == new_dd.symbolic_min
assert dd.symbolic_max == new_dd.symbolic_max
assert dd.step == new_dd.step
def test_shifted_dimension():
d = Dimension(name='d')
dd = ShiftedDimension(d, name='dd')
pkl_dd = pickle.dumps(dd)
new_dd = pickle.loads(pkl_dd)
assert dd.name == new_dd.name
assert dd.parent == new_dd.parent
def test_custom_dimension():
symbolic_size = Constant(name='d_custom_size')
d = CustomDimension(name='d', symbolic_size=symbolic_size)
pkl_d = pickle.dumps(d)
new_d = pickle.loads(pkl_d)
assert d.name == new_d.name
assert d.symbolic_size.name == new_d.symbolic_size.name
def test_lock():
grid = Grid(shape=(3, 3, 3))
f = Function(name='f', grid=grid)
ld = CustomDimension(name='ld', symbolic_size=2)
lock = Lock(name='lock', dimensions=ld, target=f)
pkl_lock = pickle.dumps(lock)
new_lock = pickle.loads(pkl_lock)
lock.name == new_lock.name
new_lock.dimensions[0].symbolic_size == ld.symbolic_size
new_lock.target.name == f.name
new_lock.target.shape == f.shape
def test_std_thread_array():
a = STDThreadArray(name='threads', nthreads_std=4)
pkl_a = pickle.dumps(a)
new_a = pickle.loads(pkl_a)
assert a.name == new_a.name
assert a.dimensions[0].name == new_a.dimensions[0].name
assert a.size == new_a.size
def test_shared_data():
s = Scalar(name='s')
sdata = SharedData(name='sdata', nthreads_std=2, fields=[s])
pkl_sdata = pickle.dumps(sdata)
new_sdata = pickle.loads(pkl_sdata)
assert sdata.name == new_sdata.name
assert sdata.size == new_sdata.size
assert sdata.fields == new_sdata.fields
assert sdata.pfields == new_sdata.pfields
ffp = FieldFromPointer(sdata._field_flag, sdata.symbolic_base)
pkl_ffp = pickle.dumps(ffp)
new_ffp = pickle.loads(pkl_ffp)
assert ffp == new_ffp
indexed = sdata[0]
pkl_indexed = pickle.dumps(indexed)
new_indexed = pickle.loads(pkl_indexed)
assert indexed.name == new_indexed.name
assert indexed.shape == new_indexed.shape
def test_receiver():
grid = Grid(shape=(3,))
time_range = TimeAxis(start=0., stop=1000., step=0.1)
nreceivers = 3
rec = Receiver(name='rec', grid=grid, time_range=time_range, npoint=nreceivers,
coordinates=[(0.,), (1.,), (2.,)])
rec.data[:] = 1.
pkl_rec = pickle.dumps(rec)
new_rec = pickle.loads(pkl_rec)
assert np.all(new_rec.data == 1)
assert np.all(new_rec.coordinates.data == [[0.], [1.], [2.]])
def test_geometry():
shape = (50, 50, 50)
spacing = [10. for _ in shape]
nbl = 10
nrec = 10
tn = 150.
# Create two-layer model from preset
model = demo_model(preset='layers-isotropic', vp_top=1., vp_bottom=2.,
spacing=spacing, shape=shape, nbl=nbl)
# Source and receiver geometries
src_coordinates = np.empty((1, len(spacing)))
src_coordinates[0, :] = np.array(model.domain_size) * .5
if len(shape) > 1:
src_coordinates[0, -1] = model.origin[-1] + 2 * spacing[-1]
rec_coordinates = np.empty((nrec, len(spacing)))
rec_coordinates[:, 0] = np.linspace(0., model.domain_size[0], num=nrec)
if len(shape) > 1:
rec_coordinates[:, 1] = np.array(model.domain_size)[1] * .5
rec_coordinates[:, -1] = model.origin[-1] + 2 * spacing[-1]
geometry = AcquisitionGeometry(model, rec_coordinates, src_coordinates,
t0=0.0, tn=tn, src_type='Ricker', f0=0.010)
pkl_geom = pickle.dumps(geometry)
new_geom = pickle.loads(pkl_geom)
assert np.all(new_geom.src_positions == geometry.src_positions)
assert np.all(new_geom.rec_positions == geometry.rec_positions)
assert new_geom.f0 == geometry.f0
assert np.all(new_geom.src_type == geometry.src_type)
assert np.all(new_geom.src.data == geometry.src.data)
assert new_geom.t0 == geometry.t0
assert new_geom.tn == geometry.tn
def test_symbolics():
a = Symbol('a')
id = IntDiv(a, 3)
pkl_id = pickle.dumps(id)
new_id = pickle.loads(pkl_id)
assert id == new_id
ffp = FunctionFromPointer('foo', a, ['b', 'c'])
pkl_ffp = pickle.dumps(ffp)
new_ffp = pickle.loads(pkl_ffp)
assert ffp == new_ffp
li = ListInitializer(['a', 'b'])
pkl_li = pickle.dumps(li)
new_li = pickle.loads(pkl_li)
assert li == new_li
df = DefFunction('f', ['a', 1, 2])
pkl_df = pickle.dumps(df)
new_df = pickle.loads(pkl_df)
assert df == new_df
assert df.arguments == new_df.arguments
def test_timers():
"""Pickling for Timers used in Operators for C-level profiling."""
timer = Timer('timer', ['sec0', 'sec1'])
pkl_obj = pickle.dumps(timer)
new_obj = pickle.loads(pkl_obj)
assert new_obj.name == timer.name
assert new_obj.sections == timer.sections
assert new_obj.value._obj.sec0 == timer.value._obj.sec0 == 0.0
assert new_obj.value._obj.sec1 == timer.value._obj.sec1 == 0.0
def test_operator_parameters():
grid = Grid(shape=(3, 3, 3))
f = Function(name='f', grid=grid)
g = TimeFunction(name='g', grid=grid)
h = TimeFunction(name='h', grid=grid, save=10)
op = Operator(Eq(h.forward, h + g + f + 1))
for i in op.parameters:
pkl_i = pickle.dumps(i)
pickle.loads(pkl_i)
def test_unjitted_operator():
grid = Grid(shape=(3, 3, 3))
f = Function(name='f', grid=grid)
op = Operator(Eq(f, f + 1))
pkl_op = pickle.dumps(op)
new_op = pickle.loads(pkl_op)
assert str(op) == str(new_op)
def test_operator_function():
grid = Grid(shape=(3, 3, 3))
f = Function(name='f', grid=grid)
op = Operator(Eq(f, f + 1))
op.apply()
pkl_op = pickle.dumps(op)
new_op = pickle.loads(pkl_op)
assert str(op) == str(new_op)
new_op.apply(f=f)
assert np.all(f.data == 2)
def test_operator_function_w_preallocation():
grid = Grid(shape=(3, 3, 3))
f = Function(name='f', grid=grid)
f.data
op = Operator(Eq(f, f + 1))
op.apply()
pkl_op = pickle.dumps(op)
new_op = pickle.loads(pkl_op)
assert str(op) == str(new_op)
new_op.apply(f=f)
assert np.all(f.data == 2)
def test_operator_timefunction():
grid = Grid(shape=(3, 3, 3))
f = TimeFunction(name='f', grid=grid, save=3)
op = Operator(Eq(f.forward, f + 1))
op.apply(time=0)
pkl_op = pickle.dumps(op)
new_op = pickle.loads(pkl_op)
assert str(op) == str(new_op)
new_op.apply(time_m=1, time_M=1, f=f)
assert np.all(f.data[2] == 2)
def test_operator_timefunction_w_preallocation():
grid = Grid(shape=(3, 3, 3))
f = TimeFunction(name='f', grid=grid, save=3)
f.data
op = Operator(Eq(f.forward, f + 1))
op.apply(time=0)
pkl_op = pickle.dumps(op)
new_op = pickle.loads(pkl_op)
assert str(op) == str(new_op)
new_op.apply(time_m=1, time_M=1, f=f)
assert np.all(f.data[2] == 2)
@skipif(['nompi'])
@pytest.mark.parallel(mode=[1])
def test_mpi_objects():
grid = Grid(shape=(4, 4, 4))
# Neighbours
obj = grid.distributor._obj_neighborhood
pkl_obj = pickle.dumps(obj)
new_obj = pickle.loads(pkl_obj)
assert obj.name == new_obj.name
assert obj.pname == new_obj.pname
assert obj.pfields == new_obj.pfields
# Communicator
obj = grid.distributor._obj_comm
pkl_obj = pickle.dumps(obj)
new_obj = pickle.loads(pkl_obj)
assert obj.name == new_obj.name
assert obj.dtype == new_obj.dtype
# Status
obj = MPIStatusObject(name='status')
pkl_obj = pickle.dumps(obj)
new_obj = pickle.loads(pkl_obj)
assert obj.name == new_obj.name
assert obj.dtype == new_obj.dtype
# Request
obj = MPIRequestObject(name='request')
pkl_obj = pickle.dumps(obj)
new_obj = pickle.loads(pkl_obj)
assert obj.name == new_obj.name
assert obj.dtype == new_obj.dtype
@skipif(['nompi'])
@pytest.mark.parallel(mode=[(1, 'full')])
def test_mpi_fullmode_objects():
grid = Grid(shape=(4, 4, 4))
x, y, _ = grid.dimensions
# Message
f = Function(name='f', grid=grid)
obj = MPIMsgEnriched('msg', f, [Halo(x, LEFT)])
pkl_obj = pickle.dumps(obj)
new_obj = pickle.loads(pkl_obj)
assert obj.name == new_obj.name
assert obj.function.name == new_obj.function.name
assert all(obj.function.dimensions[i].name == new_obj.function.dimensions[i].name
for i in range(grid.dim))
assert new_obj.function.dimensions[0] is new_obj.halos[0].dim
# Region
x_m, x_M = x.symbolic_min, x.symbolic_max
y_m, y_M = y.symbolic_min, y.symbolic_max
obj = MPIRegion('reg', 1, [y, x],
[(((x, OWNED, LEFT),), {x: (x_m, Min(x_M, x_m))}),
(((y, OWNED, LEFT),), {y: (y_m, Min(y_M, y_m))})])
pkl_obj = pickle.dumps(obj)
new_obj = pickle.loads(pkl_obj)
assert obj.prefix == new_obj.prefix
assert obj.key == new_obj.key
assert obj.name == new_obj.name
assert len(new_obj.arguments) == 2
assert all(d0.name == d1.name for d0, d1 in zip(obj.arguments, new_obj.arguments))
assert all(new_obj.arguments[i] is new_obj.owned[i][0][0][0] # `x` and `y`
for i in range(2))
assert new_obj.owned[0][0][0][1] is new_obj.owned[1][0][0][1] # `OWNED`
assert new_obj.owned[0][0][0][2] is new_obj.owned[1][0][0][2] # `LEFT`
for n, i in enumerate(new_obj.owned):
d, v = list(i[1].items())[0]
assert d is new_obj.arguments[n]
assert v[0] is d.symbolic_min
assert v[1] == Min(d.symbolic_max, d.symbolic_min)
@skipif(['nompi'])
@pytest.mark.parallel(mode=[(1, 'basic'), (1, 'full')])
def test_mpi_operator():
grid = Grid(shape=(4,))
f = TimeFunction(name='f', grid=grid)
# Using `sum` creates a stencil in `x`, which in turn will
# trigger the generation of code for MPI halo exchange
op = Operator(Eq(f.forward, f.sum() + 1))
op.apply(time=2)
pkl_op = pickle.dumps(op)
new_op = pickle.loads(pkl_op)
assert str(op) == str(new_op)
new_grid = new_op.input[0].grid
g = TimeFunction(name='g', grid=new_grid)
new_op.apply(time=2, f=g)
assert np.all(f.data[0] == [2., 3., 3., 3.])
assert np.all(f.data[1] == [3., 6., 7., 7.])
assert np.all(g.data[0] == f.data[0])
assert np.all(g.data[1] == f.data[1])
def test_full_model():
shape = (50, 50, 50)
spacing = [10. for _ in shape]
nbl = 10
# Create two-layer model from preset
model = demo_model(preset='layers-isotropic', vp_top=1., vp_bottom=2.,
spacing=spacing, shape=shape, nbl=nbl)
# Test Model pickling
pkl_model = pickle.dumps(model)
new_model = pickle.loads(pkl_model)
assert np.isclose(np.linalg.norm(model.vp.data[:]-new_model.vp.data[:]), 0)
f0 = .010
dt = model.critical_dt
t0 = 0.0
tn = 350.0
time_range = TimeAxis(start=t0, stop=tn, step=dt)
# Test TimeAxis pickling
pkl_time_range = pickle.dumps(time_range)
new_time_range = pickle.loads(pkl_time_range)
assert np.isclose(np.linalg.norm(time_range.time_values),
np.linalg.norm(new_time_range.time_values))
# Test Class Constant pickling
pkl_origin = pickle.dumps(model.grid.origin)
new_origin = pickle.loads(pkl_origin)
for a, b in zip(model.grid.origin, new_origin):
assert a.compare(b) == 0
# Test Class TimeDimension pickling
time_dim = TimeDimension(name='time', spacing=Constant(name='dt', dtype=np.float32))
pkl_time_dim = pickle.dumps(time_dim)
new_time_dim = pickle.loads(pkl_time_dim)
assert time_dim.spacing._value == new_time_dim.spacing._value
# Test Class SteppingDimension
stepping_dim = SteppingDimension(name='t', parent=time_dim)
pkl_stepping_dim = pickle.dumps(stepping_dim)
new_stepping_dim = pickle.loads(pkl_stepping_dim)
assert stepping_dim.is_Time == new_stepping_dim.is_Time
# Test Grid pickling
pkl_grid = pickle.dumps(model.grid)
new_grid = pickle.loads(pkl_grid)
assert model.grid.shape == new_grid.shape
assert model.grid.extent == new_grid.extent
assert model.grid.shape == new_grid.shape
for a, b in zip(model.grid.dimensions, new_grid.dimensions):
assert a.compare(b) == 0
ricker = RickerSource(name='src', grid=model.grid, f0=f0, time_range=time_range)
pkl_ricker = pickle.dumps(ricker)
new_ricker = pickle.loads(pkl_ricker)
assert np.isclose(np.linalg.norm(ricker.data), np.linalg.norm(new_ricker.data))
# FIXME: fails randomly when using data.flatten() AND numpy is using MKL
|
the-stack_106_13192
|
#!/usr/bin/env python
#
# Copyright 2018 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# shell script to check if placement API is up after X attempts.
# Default max is 60 iterations with 10s (default) timeout in between.
import logging
import os
import re
import six
import sys
import time
from keystoneauth1.identity import v3
from keystoneauth1 import session
from keystoneclient.v3 import client
import requests
# In python3 SafeConfigParser was renamed to ConfigParser and the default
# for duplicate options default to true. In case of nova it is valid to
# have duplicate option lines, e.g. passthrough_whitelist which leads to
# issues reading the nova.conf
# https://bugs.launchpad.net/tripleo/+bug/1827775
if six.PY3:
from six.moves.configparser import ConfigParser
config = ConfigParser(strict=False)
else:
from six.moves.configparser import SafeConfigParser
config = SafeConfigParser()
debug = os.getenv('__OS_DEBUG', 'false')
if debug.lower() == 'true':
loglevel = logging.DEBUG
else:
loglevel = logging.INFO
logging.basicConfig(stream=sys.stdout, level=loglevel)
LOG = logging.getLogger('placement_wait_for_service')
iterations = 60
timeout = 10
placement_cfg = '/etc/placement/placement.conf'
if __name__ == '__main__':
if os.path.isfile(placement_cfg):
try:
config.read(placement_cfg)
except Exception:
LOG.exception('Error while reading placement.conf:')
else:
LOG.error('Placement configuration file %s does not exist',
placement_cfg)
sys.exit(1)
# get keystone client with details from [keystone_authtoken] section
auth = v3.Password(
user_domain_name=config.get('keystone_authtoken', 'user_domain_name'),
username=config.get('keystone_authtoken', 'username'),
password=config.get('keystone_authtoken', 'password'),
project_name=config.get('keystone_authtoken', 'project_name'),
project_domain_name=config.get('keystone_authtoken',
'project_domain_name'),
auth_url=config.get('keystone_authtoken', 'auth_url') + '/v3')
sess = session.Session(auth=auth, verify=False)
keystone = client.Client(session=sess, interface='internal')
iterations_endpoint = iterations
placement_endpoint_url = None
while iterations_endpoint > 1:
iterations_endpoint -= 1
try:
# get placement service id
placement_service_id = keystone.services.list(
name='placement')[0].id
# get placement endpoint
# Note: puppet-placement does not support setting the interface
# until we have https://review.opendev.org/688862.
# Lets hard code 'internal' for now.
placement_endpoint_url = keystone.endpoints.list(
service=placement_service_id,
region=config.get('keystone_authtoken', 'region_name'),
interface='internal')[0].url
if not placement_endpoint_url:
LOG.error('Failed to get placement service endpoint!')
else:
break
except Exception:
LOG.exception('Retry - Failed to get placement service endpoint:')
time.sleep(timeout)
if not placement_endpoint_url:
LOG.error('Failed to get placement service endpoint!')
sys.exit(1)
# we should have CURRENT in the request response from placement:
# {"versions": [{"status": "CURRENT", "min_version": "1.0", "max_version":
# "1.29", "id": "v1.0", "links": [{"href": "", "rel": "self"}]}]}
response_reg = re.compile('.*CURRENT,*')
while iterations > 1:
iterations -= 1
try:
r = requests.get(placement_endpoint_url + '/', verify=False)
if r.status_code == 200 and response_reg.match(r.text):
LOG.info('Placement service up! - %s', r.text)
sys.exit(0)
break
else:
LOG.info('response - %r', r)
LOG.info('Placement service not up - %s, %s',
r.status_code,
r.text)
except Exception:
LOG.exception('Error query the placement endpoint:')
time.sleep(timeout)
sys.exit(1)
# vim: set et ts=4 sw=4 :
|
the-stack_106_13194
|
"""
The ``mlflow.tensorflow`` module provides an API for logging and loading TensorFlow models.
This module exports TensorFlow models with the following flavors:
TensorFlow (native) format
This is the main flavor that can be loaded back into TensorFlow.
:py:mod:`mlflow.pyfunc`
Produced for use by generic pyfunc-based deployment tools and batch inference.
"""
from __future__ import absolute_import
import os
import shutil
import yaml
import logging
import gorilla
import concurrent.futures
import warnings
import atexit
import time
import tempfile
import pandas
import mlflow
import tensorflow
import mlflow.keras
from distutils.version import LooseVersion
from tensorflow.keras.callbacks import Callback, TensorBoard # pylint: disable=import-error
from mlflow import pyfunc
from mlflow.exceptions import MlflowException
from mlflow.models import Model
from mlflow.protos.databricks_pb2 import DIRECTORY_NOT_EMPTY
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from mlflow.utils import keyword_only, experimental
from mlflow.utils.environment import _mlflow_conda_env
from mlflow.utils.file_utils import _copy_file_or_tree
from mlflow.utils.model_utils import _get_flavor_configuration
from mlflow.utils.autologging_utils import try_mlflow_log
from mlflow.entities import Metric
FLAVOR_NAME = "tensorflow"
_logger = logging.getLogger(__name__)
_MAX_METRIC_QUEUE_SIZE = 500
_LOG_EVERY_N_STEPS = 100
_metric_queue = []
_thread_pool = concurrent.futures.ThreadPoolExecutor(max_workers=1)
def get_default_conda_env():
"""
:return: The default Conda environment for MLflow Models produced by calls to
:func:`save_model()` and :func:`log_model()`.
"""
return _mlflow_conda_env(
additional_conda_deps=[
"tensorflow={}".format(tensorflow.__version__),
],
additional_pip_deps=None,
additional_conda_channels=None)
@keyword_only
def log_model(tf_saved_model_dir, tf_meta_graph_tags, tf_signature_def_key, artifact_path,
conda_env=None, registered_model_name=None):
"""
Log a *serialized* collection of TensorFlow graphs and variables as an MLflow model
for the current run. This method operates on TensorFlow variables and graphs that have been
serialized in TensorFlow's ``SavedModel`` format. For more information about ``SavedModel``
format, see the TensorFlow documentation:
https://www.tensorflow.org/guide/saved_model#save_and_restore_models.
This method saves a model with both ``python_function`` and ``tensorflow`` flavors.
If loaded back using the ``python_function`` flavor, the model can be used to predict on
pandas DataFrames, producing a pandas DataFrame whose output columns correspond to the
TensorFlow model's outputs. The python_function model will flatten outputs that are length-one,
one-dimensional tensors of a single scalar value (e.g.
``{"predictions": [[1.0], [2.0], [3.0]]}``) into the scalar values (e.g.
``{"predictions": [1, 2, 3]}``), so that the resulting output column is a column of scalars
rather than lists of length one. All other model output types are included as-is in the output
DataFrame.
:param tf_saved_model_dir: Path to the directory containing serialized TensorFlow variables and
graphs in ``SavedModel`` format.
:param tf_meta_graph_tags: A list of tags identifying the model's metagraph within the
serialized ``SavedModel`` object. For more information, see the
``tags`` parameter of the
``tf.saved_model.builder.SavedModelBuilder`` method.
:param tf_signature_def_key: A string identifying the input/output signature associated with the
model. This is a key within the serialized ``SavedModel`` signature
definition mapping. For more information, see the
``signature_def_map`` parameter of the
``tf.saved_model.builder.SavedModelBuilder`` method.
:param artifact_path: The run-relative path to which to log model artifacts.
:param conda_env: Either a dictionary representation of a Conda environment or the path to a
Conda environment yaml file. If provided, this decribes the environment
this model should be run in. At minimum, it should specify the dependencies
contained in :func:`get_default_conda_env()`. If ``None``, the default
:func:`get_default_conda_env()` environment is added to the model. The
following is an *example* dictionary representation of a Conda environment::
{
'name': 'mlflow-env',
'channels': ['defaults'],
'dependencies': [
'python=3.7.0',
'tensorflow=1.8.0'
]
}
:param registered_model_name: Note:: Experimental: This argument may change or be removed in a
future release without warning. If given, create a model
version under ``registered_model_name``, also creating a
registered model if one with the given name does not exist.
"""
return Model.log(artifact_path=artifact_path, flavor=mlflow.tensorflow,
tf_saved_model_dir=tf_saved_model_dir, tf_meta_graph_tags=tf_meta_graph_tags,
tf_signature_def_key=tf_signature_def_key, conda_env=conda_env,
registered_model_name=registered_model_name)
@keyword_only
def save_model(tf_saved_model_dir, tf_meta_graph_tags, tf_signature_def_key, path,
mlflow_model=Model(), conda_env=None):
"""
Save a *serialized* collection of TensorFlow graphs and variables as an MLflow model
to a local path. This method operates on TensorFlow variables and graphs that have been
serialized in TensorFlow's ``SavedModel`` format. For more information about ``SavedModel``
format, see the TensorFlow documentation:
https://www.tensorflow.org/guide/saved_model#save_and_restore_models.
:param tf_saved_model_dir: Path to the directory containing serialized TensorFlow variables and
graphs in ``SavedModel`` format.
:param tf_meta_graph_tags: A list of tags identifying the model's metagraph within the
serialized ``SavedModel`` object. For more information, see the
``tags`` parameter of the
``tf.saved_model.builder.savedmodelbuilder`` method.
:param tf_signature_def_key: A string identifying the input/output signature associated with the
model. This is a key within the serialized ``savedmodel``
signature definition mapping. For more information, see the
``signature_def_map`` parameter of the
``tf.saved_model.builder.savedmodelbuilder`` method.
:param path: Local path where the MLflow model is to be saved.
:param mlflow_model: MLflow model configuration to which to add the ``tensorflow`` flavor.
:param conda_env: Either a dictionary representation of a Conda environment or the path to a
Conda environment yaml file. If provided, this decribes the environment
this model should be run in. At minimum, it should specify the dependencies
contained in :func:`get_default_conda_env()`. If ``None``, the default
:func:`get_default_conda_env()` environment is added to the model. The
following is an *example* dictionary representation of a Conda environment::
{
'name': 'mlflow-env',
'channels': ['defaults'],
'dependencies': [
'python=3.7.0',
'tensorflow=1.8.0'
]
}
"""
_logger.info(
"Validating the specified TensorFlow model by attempting to load it in a new TensorFlow"
" graph...")
_validate_saved_model(tf_saved_model_dir=tf_saved_model_dir,
tf_meta_graph_tags=tf_meta_graph_tags,
tf_signature_def_key=tf_signature_def_key)
_logger.info("Validation succeeded!")
if os.path.exists(path):
raise MlflowException("Path '{}' already exists".format(path), DIRECTORY_NOT_EMPTY)
os.makedirs(path)
root_relative_path = _copy_file_or_tree(src=tf_saved_model_dir, dst=path, dst_dir=None)
model_dir_subpath = "tfmodel"
shutil.move(os.path.join(path, root_relative_path), os.path.join(path, model_dir_subpath))
conda_env_subpath = "conda.yaml"
if conda_env is None:
conda_env = get_default_conda_env()
elif not isinstance(conda_env, dict):
with open(conda_env, "r") as f:
conda_env = yaml.safe_load(f)
with open(os.path.join(path, conda_env_subpath), "w") as f:
yaml.safe_dump(conda_env, stream=f, default_flow_style=False)
mlflow_model.add_flavor(FLAVOR_NAME, saved_model_dir=model_dir_subpath,
meta_graph_tags=tf_meta_graph_tags,
signature_def_key=tf_signature_def_key)
pyfunc.add_to_model(mlflow_model, loader_module="mlflow.tensorflow", env=conda_env_subpath)
mlflow_model.save(os.path.join(path, "MLmodel"))
def _validate_saved_model(tf_saved_model_dir, tf_meta_graph_tags, tf_signature_def_key):
"""
Validate the TensorFlow SavedModel by attempting to load it in a new TensorFlow graph.
If the loading process fails, any exceptions thrown by TensorFlow are propagated.
"""
if LooseVersion(tensorflow.__version__) < LooseVersion('2.0.0'):
validation_tf_graph = tensorflow.Graph()
validation_tf_sess = tensorflow.Session(graph=validation_tf_graph)
with validation_tf_graph.as_default():
_load_tensorflow_saved_model(tf_saved_model_dir=tf_saved_model_dir,
tf_sess=validation_tf_sess,
tf_meta_graph_tags=tf_meta_graph_tags,
tf_signature_def_key=tf_signature_def_key)
else:
_load_tensorflow_saved_model(tf_saved_model_dir=tf_saved_model_dir,
tf_meta_graph_tags=tf_meta_graph_tags,
tf_signature_def_key=tf_signature_def_key)
def load_model(model_uri, tf_sess=None):
"""
Load an MLflow model that contains the TensorFlow flavor from the specified path.
*With TensorFlow version <2.0.0, this method must be called within a TensorFlow graph context.*
:param model_uri: The location, in URI format, of the MLflow model. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/tracking.html#
artifact-locations>`_.
:param tf_sess: The TensorFlow session in which to load the model. If using TensorFlow
version >= 2.0.0, this argument is ignored. If using TensorFlow <2.0.0, if no
session is passed to this function, MLflow will attempt to load the model using
the default TensorFlow session. If no default session is available, then the
function raises an exception.
:return: For TensorFlow < 2.0.0, a TensorFlow signature definition of type:
``tensorflow.core.protobuf.meta_graph_pb2.SignatureDef``. This defines the input and
output tensors for model inference.
For TensorFlow >= 2.0.0, A callable graph (tf.function) that takes inputs and
returns inferences.
>>> import mlflow.tensorflow
>>> import tensorflow as tf
>>> tf_graph = tf.Graph()
>>> tf_sess = tf.Session(graph=tf_graph)
>>> with tf_graph.as_default():
>>> signature_definition = mlflow.tensorflow.load_model(model_uri="model_uri",
>>> tf_sess=tf_sess)
>>> input_tensors = [tf_graph.get_tensor_by_name(input_signature.name)
>>> for _, input_signature in signature_def.inputs.items()]
>>> output_tensors = [tf_graph.get_tensor_by_name(output_signature.name)
>>> for _, output_signature in signature_def.outputs.items()]
"""
if LooseVersion(tensorflow.__version__) < LooseVersion('2.0.0'):
if not tf_sess:
tf_sess = tensorflow.get_default_session()
if not tf_sess:
raise MlflowException("No TensorFlow session found while calling load_model()." +
"You can set the default Tensorflow session before calling" +
" load_model via `session.as_default()`, or directly pass " +
"a session in which to load the model via the tf_sess " +
"argument.")
else:
if tf_sess:
warnings.warn("A TensorFlow session was passed into load_model, but the " +
"currently used version is TF 2.0 where sessions are deprecated. " +
"The tf_sess argument will be ignored.", FutureWarning)
local_model_path = _download_artifact_from_uri(artifact_uri=model_uri)
tf_saved_model_dir, tf_meta_graph_tags, tf_signature_def_key =\
_get_and_parse_flavor_configuration(model_path=local_model_path)
return _load_tensorflow_saved_model(tf_saved_model_dir=tf_saved_model_dir,
tf_meta_graph_tags=tf_meta_graph_tags,
tf_signature_def_key=tf_signature_def_key,
tf_sess=tf_sess)
def _load_tensorflow_saved_model(tf_saved_model_dir, tf_meta_graph_tags, tf_signature_def_key,
tf_sess=None):
"""
Load a specified TensorFlow model consisting of a TensorFlow metagraph and signature definition
from a serialized TensorFlow ``SavedModel`` collection.
:param tf_saved_model_dir: The local filesystem path or run-relative artifact path to the model.
:param tf_meta_graph_tags: A list of tags identifying the model's metagraph within the
serialized ``SavedModel`` object. For more information, see the
``tags`` parameter of the `tf.saved_model.builder.SavedModelBuilder
method <https://www.tensorflow.org/api_docs/python/tf/saved_model/
builder/SavedModelBuilder#add_meta_graph>`_.
:param tf_signature_def_key: A string identifying the input/output signature associated with the
model. This is a key within the serialized ``SavedModel``'s
signature definition mapping. For more information, see the
``signature_def_map`` parameter of the
``tf.saved_model.builder.SavedModelBuilder`` method.
:param tf_sess: The TensorFlow session in which to load the metagraph.
Required in TensorFlow versions < 2.0.0. Unused in TensorFlow versions >= 2.0.0
:return: For TensorFlow versions < 2.0.0:
A TensorFlow signature definition of type:
``tensorflow.core.protobuf.meta_graph_pb2.SignatureDef``. This defines input and
output tensors within the specified metagraph for inference.
For TensorFlow versions >= 2.0.0:
A callable graph (tensorflow.function) that takes inputs and returns inferences.
"""
if LooseVersion(tensorflow.__version__) < LooseVersion('2.0.0'):
loaded = tensorflow.saved_model.loader.load(
sess=tf_sess,
tags=tf_meta_graph_tags,
export_dir=tf_saved_model_dir)
loaded_sig = loaded.signature_def
else:
loaded = tensorflow.saved_model.load( # pylint: disable=no-value-for-parameter
tags=tf_meta_graph_tags,
export_dir=tf_saved_model_dir)
loaded_sig = loaded.signatures
if tf_signature_def_key not in loaded_sig:
raise MlflowException("Could not find signature def key %s. Available keys are: %s"
% (tf_signature_def_key, list(loaded_sig.keys())))
return loaded_sig[tf_signature_def_key]
def _get_and_parse_flavor_configuration(model_path):
"""
:param path: Local filesystem path to the MLflow Model with the ``tensorflow`` flavor.
:return: A triple containing the following elements:
- ``tf_saved_model_dir``: The local filesystem path to the underlying TensorFlow
SavedModel directory.
- ``tf_meta_graph_tags``: A list of tags identifying the TensorFlow model's metagraph
within the serialized ``SavedModel`` object.
- ``tf_signature_def_key``: A string identifying the input/output signature associated
with the model. This is a key within the serialized
``SavedModel``'s signature definition mapping.
"""
flavor_conf = _get_flavor_configuration(model_path=model_path, flavor_name=FLAVOR_NAME)
tf_saved_model_dir = os.path.join(model_path, flavor_conf['saved_model_dir'])
tf_meta_graph_tags = flavor_conf['meta_graph_tags']
tf_signature_def_key = flavor_conf['signature_def_key']
return tf_saved_model_dir, tf_meta_graph_tags, tf_signature_def_key
def _load_pyfunc(path):
"""
Load PyFunc implementation. Called by ``pyfunc.load_pyfunc``. This function loads an MLflow
model with the TensorFlow flavor into a new TensorFlow graph and exposes it behind the
``pyfunc.predict`` interface.
:param path: Local filesystem path to the MLflow Model with the ``tensorflow`` flavor.
"""
tf_saved_model_dir, tf_meta_graph_tags, tf_signature_def_key =\
_get_and_parse_flavor_configuration(model_path=path)
if LooseVersion(tensorflow.__version__) < LooseVersion('2.0.0'):
tf_graph = tensorflow.Graph()
tf_sess = tensorflow.Session(graph=tf_graph)
with tf_graph.as_default():
signature_def = _load_tensorflow_saved_model(
tf_saved_model_dir=tf_saved_model_dir, tf_sess=tf_sess,
tf_meta_graph_tags=tf_meta_graph_tags, tf_signature_def_key=tf_signature_def_key)
return _TFWrapper(tf_sess=tf_sess, tf_graph=tf_graph, signature_def=signature_def)
else:
loaded_model = tensorflow.saved_model.load( # pylint: disable=no-value-for-parameter
export_dir=tf_saved_model_dir,
tags=tf_meta_graph_tags)
return _TF2Wrapper(infer=loaded_model.signatures[tf_signature_def_key])
class _TFWrapper(object):
"""
Wrapper class that exposes a TensorFlow model for inference via a ``predict`` function such that
``predict(data: pandas.DataFrame) -> pandas.DataFrame``. For TensorFlow versions < 2.0.0.
"""
def __init__(self, tf_sess, tf_graph, signature_def):
"""
:param tf_sess: The TensorFlow session used to evaluate the model.
:param tf_graph: The TensorFlow graph containing the model.
:param signature_def: The TensorFlow signature definition used to transform input dataframes
into tensors and output vectors into dataframes.
"""
self.tf_sess = tf_sess
self.tf_graph = tf_graph
# We assume that input keys in the signature definition correspond to
# input DataFrame column names
self.input_tensor_mapping = {
tensor_column_name: tf_graph.get_tensor_by_name(tensor_info.name)
for tensor_column_name, tensor_info in signature_def.inputs.items()
}
# We assume that output keys in the signature definition correspond to
# output DataFrame column names
self.output_tensors = {
sigdef_output: tf_graph.get_tensor_by_name(tnsr_info.name)
for sigdef_output, tnsr_info in signature_def.outputs.items()
}
def predict(self, df):
with self.tf_graph.as_default():
# Build the feed dict, mapping input tensors to DataFrame column values.
feed_dict = {
self.input_tensor_mapping[tensor_column_name]: df[tensor_column_name].values
for tensor_column_name in self.input_tensor_mapping.keys()
}
raw_preds = self.tf_sess.run(self.output_tensors, feed_dict=feed_dict)
pred_dict = {column_name: values.ravel() for
column_name, values in raw_preds.items()}
return pandas.DataFrame(data=pred_dict)
class _TF2Wrapper(object):
"""
Wrapper class that exposes a TensorFlow model for inference via a ``predict`` function such that
``predict(data: pandas.DataFrame) -> pandas.DataFrame``. For TensorFlow versions >= 2.0.0.
"""
def __init__(self, infer):
"""
:param infer: Tensorflow function returned by a saved model that is used for inference.
"""
self.infer = infer
def predict(self, df):
feed_dict = {}
for df_col_name in list(df):
# If there are multiple columns with the same name, selecting the shared name
# from the DataFrame will result in another DataFrame containing the columns
# with the shared name. TensorFlow cannot make eager tensors out of pandas
# DataFrames, so we convert the DataFrame to a numpy array here.
val = df[df_col_name]
if isinstance(val, pandas.DataFrame):
val = val.values
feed_dict[df_col_name] = tensorflow.constant(val)
raw_preds = self.infer(**feed_dict)
pred_dict = {
col_name: raw_preds[col_name].numpy() for col_name in raw_preds.keys()
}
for col in pred_dict.keys():
if all(len(element) == 1 for element in pred_dict[col]):
pred_dict[col] = pred_dict[col].ravel()
else:
pred_dict[col] = pred_dict[col].tolist()
return pandas.DataFrame.from_dict(data=pred_dict)
class __MLflowTfKerasCallback(Callback):
"""
Callback for auto-logging parameters (we rely on TensorBoard for metrics) in TensorFlow < 2.
Records model structural information as params after training finishes.
"""
def __init__(self):
if mlflow.active_run() is None:
mlflow.start_run()
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def on_train_begin(self, logs=None): # pylint: disable=unused-argument
opt = self.model.optimizer
if hasattr(opt, 'optimizer'):
opt = opt.optimizer
try_mlflow_log(mlflow.log_param, 'optimizer_name', type(opt).__name__)
if hasattr(opt, '_lr'):
lr = opt._lr if type(opt._lr) is float else tensorflow.keras.backend.eval(opt._lr)
try_mlflow_log(mlflow.log_param, 'learning_rate', lr)
if hasattr(opt, '_epsilon'):
epsilon = opt._epsilon if type(opt._epsilon) is float \
else tensorflow.keras.backend.eval(opt._epsilon)
try_mlflow_log(mlflow.log_param, 'epsilon', epsilon)
sum_list = []
self.model.summary(print_fn=sum_list.append)
summary = '\n'.join(sum_list)
try_mlflow_log(mlflow.set_tag, 'summary', summary)
def on_epoch_end(self, epoch, logs=None):
pass
def on_train_end(self, logs=None): # pylint: disable=unused-argument
try_mlflow_log(mlflow.keras.log_model, self.model, artifact_path='model')
class __MLflowTfKeras2Callback(Callback):
"""
Callback for auto-logging parameters and metrics in TensorFlow >= 2.0.0.
Records model structural information as params after training finishes.
"""
def __init__(self):
if mlflow.active_run() is None:
mlflow.start_run()
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def on_train_begin(self, logs=None): # pylint: disable=unused-argument
config = self.model.optimizer.get_config()
for attribute in config:
try_mlflow_log(mlflow.log_param, "opt_" + attribute, config[attribute])
sum_list = []
self.model.summary(print_fn=sum_list.append)
summary = '\n'.join(sum_list)
try_mlflow_log(mlflow.set_tag, 'summary', summary)
def on_epoch_end(self, epoch, logs=None):
if (epoch-1) % _LOG_EVERY_N_STEPS == 0:
try_mlflow_log(mlflow.log_metrics, logs, step=epoch)
def on_train_end(self, logs=None): # pylint: disable=unused-argument
try_mlflow_log(mlflow.keras.log_model, self.model, artifact_path='model')
def _log_artifacts_with_warning(**kwargs):
try_mlflow_log(mlflow.log_artifacts, **kwargs)
def _assoc_list_to_map(lst):
"""
Convert an association list to a dictionary.
"""
d = {}
for run_id, metric in lst:
d[run_id] = d[run_id] + [metric] if run_id in d else [metric]
return d
def _flush_queue():
"""
Flush the metric queue and log contents in batches to MLflow.
Queue is divided into batches according to run id.
"""
global _metric_queue
client = mlflow.tracking.MlflowClient()
dic = _assoc_list_to_map(_metric_queue)
for key in dic:
try_mlflow_log(client.log_batch, key, metrics=dic[key], params=[], tags=[])
_metric_queue = []
atexit.register(_flush_queue)
def _add_to_queue(key, value, step, time, run_id):
"""
Add a metric to the metric queue. Flush the queue if it exceeds
max size.
"""
met = Metric(key=key, value=value, timestamp=time, step=step)
_metric_queue.append((run_id, met))
if len(_metric_queue) > _MAX_METRIC_QUEUE_SIZE:
_flush_queue()
def _log_event(event):
"""
Extracts metric information from the event protobuf
"""
if mlflow.active_run() is None:
mlflow.start_run()
if event.WhichOneof('what') == 'summary':
summary = event.summary
for v in summary.value:
if v.HasField('simple_value'):
if (event.step-1) % _LOG_EVERY_N_STEPS == 0:
_thread_pool.submit(_add_to_queue, key=v.tag,
value=v.simple_value, step=event.step,
time=int(time.time() * 1000),
run_id=mlflow.active_run().info.run_id)
def _get_tensorboard_callback(lst):
for x in lst:
if isinstance(x, tensorflow.keras.callbacks.TensorBoard):
return x
return None
def _setup_callbacks(lst):
"""
Adds TensorBoard and MlfLowTfKeras callbacks to the
input list, and returns the new list and appropriate log directory.
"""
tb = _get_tensorboard_callback(lst)
if tb is None:
log_dir = tempfile.mkdtemp()
out_list = lst + [TensorBoard(log_dir)]
else:
log_dir = tb.log_dir
out_list = lst
if LooseVersion(tensorflow.__version__) < LooseVersion('2.0.0'):
out_list += [__MLflowTfKerasCallback()]
else:
out_list += [__MLflowTfKeras2Callback()]
return out_list, log_dir
@experimental
def autolog(every_n_iter=100):
# pylint: disable=E0611
"""
Enable automatic logging from TensorFlow to MLflow. If applicable,
model checkpoints are logged as artifacts to a 'models' directory, along
with any TensorBoard log data.
Refer to the tracking documentation for
information on what is logged with different TensorFlow workflows.
:param every_n_iter: The frequency with which metrics should be logged.
Defaults to 100. Ex: a value of 100 will log metrics
at step 0, 100, 200, etc.
"""
global _LOG_EVERY_N_STEPS
_LOG_EVERY_N_STEPS = every_n_iter
if LooseVersion(tensorflow.__version__) < LooseVersion('1.12'):
warnings.warn("Could not log to MLflow. Only TensorFlow versions" +
"1.12 <= v <= 2.0.0 are supported.")
return
try:
from tensorflow.python.summary.writer.event_file_writer import EventFileWriter
from tensorflow.python.summary.writer.event_file_writer_v2 import EventFileWriterV2
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.summary.writer.writer import FileWriter
except ImportError:
warnings.warn("Could not log to MLflow. Only TensorFlow versions" +
"1.12 <= v <= 2.0.0 are supported.")
return
@gorilla.patch(tensorflow.estimator.Estimator)
def export_saved_model(self, *args, **kwargs):
original = gorilla.get_original_attribute(tensorflow.estimator.Estimator,
'export_saved_model')
serialized = original(self, *args, **kwargs)
try_mlflow_log(log_model, tf_saved_model_dir=serialized.decode('utf-8'),
tf_meta_graph_tags=[tag_constants.SERVING],
tf_signature_def_key='predict',
artifact_path='model')
return serialized
@gorilla.patch(tensorflow.estimator.Estimator)
def export_savedmodel(self, *args, **kwargs):
original = gorilla.get_original_attribute(tensorflow.estimator.Estimator,
'export_savedmodel')
serialized = original(self, *args, **kwargs)
try_mlflow_log(log_model, tf_saved_model_dir=serialized.decode('utf-8'),
tf_meta_graph_tags=[tag_constants.SERVING],
tf_signature_def_key='predict',
artifact_path='model')
return serialized
@gorilla.patch(tensorflow.keras.Model)
def fit(self, *args, **kwargs):
original = gorilla.get_original_attribute(tensorflow.keras.Model, 'fit')
# Checking if the 'callback' argument of fit() is set
if len(args) >= 6:
tmp_list = list(args)
tmp_list[5], log_dir = _setup_callbacks(tmp_list[5])
args = tuple(tmp_list)
elif 'callbacks' in kwargs:
kwargs['callbacks'], log_dir = _setup_callbacks(kwargs['callbacks'])
else:
kwargs['callbacks'], log_dir = _setup_callbacks([])
result = original(self, *args, **kwargs)
_flush_queue()
_log_artifacts_with_warning(local_dir=log_dir, artifact_path='tensorboard_logs')
shutil.rmtree(log_dir)
return result
@gorilla.patch(EventFileWriter)
def add_event(self, event):
_log_event(event)
original = gorilla.get_original_attribute(EventFileWriter, 'add_event')
return original(self, event)
@gorilla.patch(FileWriter)
def add_summary(self, *args, **kwargs):
original = gorilla.get_original_attribute(FileWriter, 'add_summary')
result = original(self, *args, **kwargs)
_flush_queue()
return result
settings = gorilla.Settings(allow_hit=True, store_hit=True)
patches = [
gorilla.Patch(EventFileWriter, 'add_event', add_event, settings=settings),
gorilla.Patch(EventFileWriterV2, 'add_event', add_event, settings=settings),
gorilla.Patch(tensorflow.keras.Model, 'fit', fit, settings=settings),
gorilla.Patch(tensorflow.estimator.Estimator, 'export_saved_model',
export_saved_model, settings=settings),
gorilla.Patch(tensorflow.estimator.Estimator, 'export_savedmodel',
export_savedmodel, settings=settings),
gorilla.Patch(FileWriter, 'add_summary', add_summary, settings=settings),
]
for x in patches:
gorilla.apply(x)
|
the-stack_106_13195
|
"""
VoVNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'An Energy and GPU-Computation Efficient Backbone Network for Real-Time Object Detection,'
https://arxiv.org/abs/1904.09730.
"""
__all__ = ['VoVNet', 'vovnet27s', 'vovnet39', 'vovnet57']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv1x1_block, conv3x3_block, SequentialConcurrent, MaxPool2d, flatten, is_channels_first
class VoVUnit(nn.Layer):
"""
VoVNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
branch_channels : int
Number of output channels for each branch.
num_branches : int
Number of branches.
resize : bool
Whether to use resize block.
use_residual : bool
Whether to use residual block.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
branch_channels,
num_branches,
resize,
use_residual,
data_format="channels_last",
**kwargs):
super(VoVUnit, self).__init__(**kwargs)
self.resize = resize
self.use_residual = use_residual
if self.resize:
self.pool = MaxPool2d(
pool_size=3,
strides=2,
ceil_mode=True,
data_format=data_format,
name="pool")
self.branches = SequentialConcurrent(
data_format=data_format,
name="branches")
branch_in_channels = in_channels
for i in range(num_branches):
self.branches.add(conv3x3_block(
in_channels=branch_in_channels,
out_channels=branch_channels,
data_format=data_format,
name="branch{}".format(i + 1)))
branch_in_channels = branch_channels
self.concat_conv = conv1x1_block(
in_channels=(in_channels + num_branches * branch_channels),
out_channels=out_channels,
data_format=data_format,
name="concat_conv")
def call(self, x, training=None):
if self.resize:
x = self.pool(x)
if self.use_residual:
identity = x
x = self.branches(x, training=training)
x = self.concat_conv(x, training=training)
if self.use_residual:
x = x + identity
return x
class VoVInitBlock(nn.Layer):
"""
VoVNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(VoVInitBlock, self).__init__(**kwargs)
mid_channels = out_channels // 2
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
strides=2,
data_format=data_format,
name="conv1")
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
data_format=data_format,
name="conv2")
self.conv3 = conv3x3_block(
in_channels=mid_channels,
out_channels=out_channels,
strides=2,
data_format=data_format,
name="conv3")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
x = self.conv3(x, training=training)
return x
class VoVNet(tf.keras.Model):
"""
VoVNet model from 'An Energy and GPU-Computation Efficient Backbone Network for Real-Time Object Detection,'
https://arxiv.org/abs/1904.09730.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
branch_channels : list of list of int
Number of branch output channels for each unit.
num_branches : int
Number of branches for the each unit.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
branch_channels,
num_branches,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(VoVNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
init_block_channels = 128
self.features = tf.keras.Sequential(name="features")
self.features.add(VoVInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = tf.keras.Sequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
use_residual = (j != 0)
resize = (j == 0) and (i != 0)
stage.add(VoVUnit(
in_channels=in_channels,
out_channels=out_channels,
branch_channels=branch_channels[i][j],
num_branches=num_branches,
resize=resize,
use_residual=use_residual,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AveragePooling2D(
pool_size=7,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_vovnet(blocks,
slim=False,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create ResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
slim : bool, default False
Whether to use a slim model.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if blocks == 27:
layers = [1, 1, 1, 1]
elif blocks == 39:
layers = [1, 1, 2, 2]
elif blocks == 57:
layers = [1, 1, 4, 3]
else:
raise ValueError("Unsupported VoVNet with number of blocks: {}".format(blocks))
assert (sum(layers) * 6 + 3 == blocks)
num_branches = 5
channels_per_layers = [256, 512, 768, 1024]
branch_channels_per_layers = [128, 160, 192, 224]
if slim:
channels_per_layers = [ci // 2 for ci in channels_per_layers]
branch_channels_per_layers = [ci // 2 for ci in branch_channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
branch_channels = [[ci] * li for (ci, li) in zip(branch_channels_per_layers, layers)]
net = VoVNet(
channels=channels,
branch_channels=branch_channels,
num_branches=num_branches,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def vovnet27s(**kwargs):
"""
VoVNet-27-slim model from 'An Energy and GPU-Computation Efficient Backbone Network for Real-Time Object Detection,'
https://arxiv.org/abs/1904.09730.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_vovnet(blocks=27, slim=True, model_name="vovnet27s", **kwargs)
def vovnet39(**kwargs):
"""
VoVNet-39 model from 'An Energy and GPU-Computation Efficient Backbone Network for Real-Time Object Detection,'
https://arxiv.org/abs/1904.09730.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_vovnet(blocks=39, model_name="vovnet39", **kwargs)
def vovnet57(**kwargs):
"""
VoVNet-57 model from 'An Energy and GPU-Computation Efficient Backbone Network for Real-Time Object Detection,'
https://arxiv.org/abs/1904.09730.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_vovnet(blocks=57, model_name="vovnet57", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
# data_format = "channels_first"
pretrained = False
models = [
vovnet27s,
vovnet39,
vovnet57,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != vovnet27s or weight_count == 3525736)
assert (model != vovnet39 or weight_count == 22600296)
assert (model != vovnet57 or weight_count == 36640296)
if __name__ == "__main__":
_test()
|
the-stack_106_13198
|
class DataTablesParsedData(object):
def __init__(self):
self.columns = {}
self.start = 0
self.length = 0
self.ordering = {}
self.search_reg_ex = False
self.search_value = ""
self.draw = ""
self._calculated_columns_count = None
@property
def columns_count(self):
"""
Returns columns count
:return: columns count
"""
if self._calculated_columns_count is not None:
return self._calculated_columns_count
max_index = max([int(key) for key in self.columns.keys()], default=None)
self._calculated_columns_count = max_index + 1 if max_index is not None else 0
return self._calculated_columns_count
def filter_by(self, mask=(lambda x: "%" + x + "%")):
"""
Returns filter by rule
:param mask: lamda or function which must be used for filter rule decoration
:return: filter by rule
"""
if self.search_value is None:
return None
v = str(self.search_value).strip()
if len(v) == 0:
return None
if not mask:
return v
return mask(v)
def column_attribute(self, column_index, attribute_name):
"""
Returns attribute for column
:param column_index: column index
:param attribute_name: attribute name
:return: attribute value for column
"""
attrs = self.column_attributes(column_index)
if attrs is None:
return None
if attribute_name not in attrs:
return None
return attrs[attribute_name]
def column_attributes(self, column_index):
if column_index not in self.columns:
return None
return self.columns[column_index]
def column_data(self, column_index):
"""
Returns `data` attribute for column
:param column_index: column index
:return: `data` attribute value for column
"""
return self.column_attribute(column_index, "data")
def all_columns_data(self, remove_empty=True):
"""
Returns all requested columns list (values of `data` attribute for each column
:param remove_empty: remove columns with no `data` attribute specified
:return: requested columns list
"""
ret = []
for i in range(self.columns_count):
val = self.column_data(i)
if remove_empty and (len(val) == 0):
continue
ret.append(val)
return ret
def _columns_data(self):
ret = []
# looking for names and indexes
for k in self.columns.keys():
params = self.columns[k]
if "data" in params:
ret.append((k, params["data"]))
# sorting by index
ret = sorted(ret, key=lambda entry: entry[0])
# filtering only names
xret = []
for (index, name, ) in ret:
xret.append(name)
return xret
def low_level_order_by(self, name_mappings=None, filter_not_mapped=False):
ret = []
if (name_mappings is None) and filter_not_mapped:
return ret
keys = list(self.ordering.keys())
keys = sorted(keys)
for k in keys:
attrs = self.ordering[k]
if "column" not in attrs:
return None
column_index = int(attrs["column"])
request_column_name = str(self.column_data(column_index)).strip()
if len(request_column_name) == 0:
continue
if(name_mappings is not None) and (request_column_name in name_mappings):
columnName = name_mappings[request_column_name]
else:
if filter_not_mapped:
continue
columnName = request_column_name
if "dir" not in attrs:
return None
order_direction = str(attrs["dir"])
item = (columnName, order_direction)
ret.append(item)
return ret
def order_by(self, name_mappings=None, filter_not_mapped=False):
"""
Returns array of field names and sorting type (asc/desc)
:param name_mappings: Map of field names for sorting, if field name differs in Db and in DataTable
actual Db must be mapped there. If field name same in Db and in DataTable it can be skipped in mapping.
:param filter_not_mapped:
:return:
"""
data = self.low_level_order_by(name_mappings, filter_not_mapped)
if data is None:
return None
ret = []
for (column, order_direction) in data:
item = column
order_direction = str(order_direction).strip()
if len(order_direction) > 0:
item += " " + order_direction
ret.append(item)
return ", ".join(ret)
|
the-stack_106_13203
|
import multiprocessing as mp
import os
import subprocess
from collections import OrderedDict
from itertools import product, repeat
from pathlib import Path
import matplotlib.pyplot as plt
import pandas as pd
from gams import *
import config as cfg
def timesort(df, index_sets='t', timeindex_name='t', timeindex_string='t'):
"""
Sorts a pandas dataframe indexed by a string-float combination by float (ignoring string) in descending order.
Useful for sorting GAMS-time-set-indexed data.
:param df: A dataframe indexed by a float-string combination
:param index_sets: column name(s) to be used as index
:param timeindex_name: name of df's index. Defaults to 't'
:param timeindex_string:
:return:
"""
df.reset_index(inplace=True)
df['tix'] = pd.to_numeric(df[timeindex_name].str.split(pat=timeindex_string).str.get(1))
df.sort_values(by=['tix'], inplace=True)
df.set_index(index_sets, drop=True, inplace=True)
df.drop(columns=['tix'], inplace=True)
return df
def reset_symbol(db_gams, symbol_name, df):
"""
writes values in df to the already existing symbol "symbol name" in GAMS-database gams_db
:param db_gams: a GAMS database object
:param symbol_name: a string with the symbol name
:param df: a pandas dataframe with one line per value and all correspondong dimensions in the index
:return: modifies gams database, does not return anything
"""
gams_symbol = db_gams.get_symbol(symbol_name)
gams_symbol.clear()
if gams_symbol.get_dimension() > 0:
for row in df.itertuples():
gams_symbol.add_record(row[0]).value = row[1]
elif gams_symbol.get_dimension() == 0:
for row in df.itertuples():
gams_symbol.add_record().value = row[1]
else:
raise ValueError('dimension_list must be list or integer')
def gdx2df(db_gams, symbol, index_list, column_list, check_sets=False):
"""
writes data from a GAMS gdx to a pandas dataframe.
:param db_gams: a GAMS database object
:param symbol: string of the GAMS symbol name
:param index_list: a list of strings of GAMS-sets over which 'symbol' is defined which constitute the df's index
:param column_list: a list of strings of GAMS-sets over which 'symbol' is defined which constitute the df's columns
:param check_sets:
:return: a pd.DataFrame
"""
sym = db_gams.get_symbol(symbol)
if isinstance(sym, GamsParameter):
gdx_dict = {tuple(obj.keys): obj.value for obj in sym}
elif isinstance(sym, GamsVariable):
gdx_dict = {tuple(obj.keys): obj.level for obj in sym}
elif isinstance(sym, GamsEquation):
gdx_dict = {tuple(obj.keys): obj.marginal for obj in sym}
elif isinstance(sym, GamsSet):
gdx_dict = {obj.keys[0] for obj in sym}
else:
raise ValueError('Symbol not in gdx')
if not gdx_dict:
gdx_df = pd.DataFrame([False], index=[symbol], columns=['Value'])
elif isinstance(sym, GamsSet):
gdx_df = pd.DataFrame(data=True, index=gdx_dict, columns=['Value'])
elif not any(gdx_dict.keys()):
gdx_df = pd.DataFrame(data=list(gdx_dict.values())[0], index=[symbol], columns=['Value'])
else:
gdx_df = pd.DataFrame(list(gdx_dict.values()), index=pd.MultiIndex.from_tuples(gdx_dict.keys()),
columns=['Value'])
gdx_df.index.names = db_gams.get_symbol(symbol).domains_as_strings
gdx_df = pd.pivot_table(gdx_df, values='Value', index=index_list, columns=column_list)
if 't' in index_list:
gdx_df = timesort(gdx_df, index_sets=index_list)
# if check_sets and (isinstance(sym, GamsParameter) or isinstance(sym, GamsVariable)):
# gdx_index_set = {obj.keys[0] for obj in sym}
gdx_df = gdx_df.fillna(0)
return gdx_df
def df2gdx(db_gams, df, symbol_name, symbol_type, dimension_list, desc='None'):
"""
writes data from a pandas dataframe to a GAMS database
:param db_gams: a GAMS database object
:param df: a pandas dataframe with dimension as indices and one column with values
:param symbol_name: name of the GAMS symbol as created in the GAMS database
:param symbol_type: 'par' is parameter, 'set' is set
:param dimension_list: list of all symbol dimensions / sets over which symbol is defined
:param desc: optional description string
:return: a GAMS database object
"""
if not isinstance(df, pd.DataFrame):
df = df.to_frame()
if symbol_type is 'par':
if isinstance(dimension_list, list):
obj = db_gams.add_parameter_dc(symbol_name, dimension_list, desc)
for row in df.itertuples():
obj.add_record(row[0]).value = row[1]
elif isinstance(dimension_list, int):
obj = db_gams.add_parameter(symbol_name, dimension_list, desc)
for row in df.itertuples():
obj.add_record().value = row[1]
else:
raise ValueError('dimension_list must be list or integer')
elif symbol_type is 'set':
obj = db_gams.add_set(symbol_name, 1, desc)
for row in df.itertuples():
obj.add_record(row[0])
else:
raise ValueError('improper symbol_type provided')
return obj
def gdx2plot(db_gams, symbol, index_list, column_list, base_year, slicer=None, stacked=False):
"""
function to create plots from gdx files
:param db_gams: a python-GAMS database
:param symbol: name-string of symbol in GAMS database
:param index_list: set(s) to be used as index
:param column_list: set(s) to be used as columns
:param base_year: year of model simulation
:param slicer: slices the column-data
:param stacked:
:return:
"""
idx = pd.IndexSlice
df = gdx2df(db_gams, symbol, index_list, column_list)
df = df.loc[:, (df != 0).any(axis=0)]
if slicer:
df = df.loc[:, idx[slicer]]
# convert model time to actual time
if 't' in index_list:
start_time = pd.Timestamp(f'{base_year}-01-01')
time_offset = pd.to_timedelta(df.index.str.replace('t', '').astype('float'), unit='h')
model_time = start_time + time_offset
df.index = model_time
# plot data
fig, ax = plt.subplots(figsize=(16, 10)) # figsize=(16, 10)
if not stacked:
ax.plot(df)
elif stacked:
ax.stackplot(df.index, df.T)
ax.grid(b=True, which='both')
ax.legend(df.columns.values)
fig.autofmt_xdate()
fig.suptitle(symbol, y=0.9975)
fig.tight_layout()
plt.show()
return df
def create_scenario_gdx(gdb, gdx_path, dict_base, dict_campaign):
"""
Generates gdx input files for each scenario iteration in a separate folder for each campaign.
:param gdb: A GAMS database holding all required MEDEA parameters
:param gdx_path: a Path-object with the path to the GAMS project directory
:param dict_base: a nested dictionary that defines baseline values for all parameters to be (potentially) modified.
Expected structure: dict_base = {'base': {'co2_price': [value], pv_limit: [values]}}
:param dict_campaign: a nested dictionary with parameter modifications for each campaign
:return:
"""
for campaign in dict_campaign.keys():
# update campaign dictionary
parms_dict = dict_base.copy()
parms_dict.update(dict_campaign[campaign])
od = OrderedDict(sorted(parms_dict.items()))
cart = list(product(*od.values()))
moddf = pd.DataFrame(cart, columns=od.keys())
for par in parms_dict.keys():
try:
_ = gdb.get_symbol(par)
except GamsException:
_ = df2gdx(gdb, pd.DataFrame(data=[0]), par, 'par', 0, 'auto-generated scenario parameter')
# create campaign path if it does not exist
(gdx_path / campaign).mkdir(parents=True, exist_ok=True)
for n in range(0, len(cart)):
for par in parms_dict.keys():
_ = reset_symbol(gdb, par, pd.DataFrame(data=[moddf.loc[n, par]]))
# df2gdx(gdb, moddf.loc[n, par], par, symtype, symdomstr, 'auto-generated scenario parameter')
identifier = '_'.join(map(str, cart[n]))
input_fname = gdx_path / campaign / f'medea_{identifier}_data.gdx'
gdb.export(input_fname)
def run_medea_parallel(number_of_workers, project_name, campaign_dict):
"""
Run medea models in parallel. Requires pre-prepared gdx-input for each run. create_scenario_gdx can be used for
this purpose.
:param number_of_workers: integer specifying the number of parallel processes started
:param project_name: string holding the project name
:param campaign_dict: dictionary with scenario definitions with format according to medea-conventions
:return:
"""
for campaign in campaign_dict.keys():
od = OrderedDict(sorted(campaign_dict[campaign].items()))
cart = list(product(*od.values()))
identifier = ['_'.join(map(str, cart[n])) for n in range(0, len(cart))]
p = mp.Pool(number_of_workers)
_ = p.starmap(run_medea_campaign, zip(repeat(project_name), identifier, repeat(campaign)))
def run_medea(gms_exe_dir, gms_model, medea_project, project_scenario, export_location, compress=True):
"""
flexible run of power system model medea
:param gms_exe_dir: string of path to GAMS executable
:param gms_model: string of path to GAMS model to solve
:param medea_project: string of medea-project name
:param project_scenario: string of project-scenario (typically one iteration)
:param export_location: string of path where to save results
:param compress: boolean; set to True to compress output-gdx
:return:
"""
# generate identifier of scenario output
gdx_out = f'medea_out_{project_scenario}.gdx'
# call GAMS to solve model / scenario
subprocess.run(
f'{gms_exe_dir}\\gams {gms_model} gdx={gdx_out} lo=3 o=nul --project={medea_project} --scenario={project_scenario}')
# compress generated gdx file
if compress:
subprocess.run(
f'gdxcopy -V7C -Replace {gdx_out}'
)
# clean up after each run and delete input data (which is also included in output, so no information lost)
if os.path.isfile(export_location):
os.remove(export_location)
def run_medea_project(project_name, scenario_id, compress=True):
"""
runs / solves a project of power system model medea with strict project directory conventions
:param project_name: string of medea-project name
:param scenario_id: string of project-scenario (typically one iteration)
:param compress: boolean; set to True to compress output-gdx
:return:
"""
# generate file names
gms_model_fname = os.path.join(cfg.MEDEA_ROOT_DIR, 'projects', project_name, 'opt', 'medea_main.gms')
gdx_out_fname = f'medea_out_{scenario_id}.gdx'
input_fname = os.path.join(cfg.MEDEA_ROOT_DIR, 'projects', project_name, 'opt', f'medea_{scenario_id}_data.gdx')
# call GAMS to solve model / scenario
subprocess.run(
f'{cfg.GMS_SYS_DIR}\\gams {gms_model_fname} gdx={gdx_out_fname} lo=3 o=nul --project={project_name} --scenario={scenario_id}')
# compress generated gdx file
if compress:
subprocess.run(
f'gdxcopy -V7C -Replace {gdx_out_fname}'
)
# clean up after each run and delete input data (which is also included in output, so no information lost)
if os.path.isfile(input_fname):
os.remove(input_fname)
def run_medea_campaign(project_name, scenario_id, campaign, compress=True):
"""
runs / solves a project of power system model medea with strict project directory conventions
:param project_name: string of medea-project name
:param scenario_id: string of project-scenario (typically one iteration)
:param compress: boolean; set to True to compress output-gdx
:return:
"""
# generate file names
gms_model_fname = Path(cfg.MEDEA_ROOT_DIR) / 'projects' / project_name / 'opt' / 'medea_main.gms'
gdx_out_fname = Path(
cfg.MEDEA_ROOT_DIR) / 'projects' / project_name / 'opt' / campaign / f'medea_out_{scenario_id}.gdx'
input_fname = Path(
cfg.MEDEA_ROOT_DIR) / 'projects' / project_name / 'opt' / campaign / f'medea_{scenario_id}_data.gdx'
# call GAMS to solve model / scenario
subprocess.run(
f'{cfg.GMS_SYS_DIR}\\gams {gms_model_fname} gdx={gdx_out_fname} lo=3 o=nul --project={project_name} --scenario={scenario_id}')
# compress generated gdx file
if compress:
subprocess.run(
f'gdxcopy -V7C -Replace {gdx_out_fname}'
)
# clean up after each run and delete input data (which is also included in output, so no information lost)
if os.path.isfile(input_fname):
os.remove(input_fname)
def run_medea_test(test_data_name):
"""
runs / solves a project of power system model medea with strict project directory conventions
:return:
"""
# generate file names
gms_model_fname = os.path.join(cfg.MEDEA_ROOT_DIR, 'tests', 'opt', 'medea_main.gms')
gdx_out_fname = f'gdx=medea_out_{test_data_name}.gdx'
input_fname = os.path.join(cfg.MEDEA_ROOT_DIR, 'tests', 'opt', f'medea_{test_data_name}_data.gdx')
# call GAMS to solve model / scenario
subprocess.run(
f'{cfg.GMS_SYS_DIR}\\gams {gms_model_fname} {gdx_out_fname} lo=3 --project=test --scenario={test_data_name}')
# clean up after each run and delete input data (which is also included in output, so no information lost)
if os.path.isfile(input_fname):
os.remove(input_fname)
|
the-stack_106_13208
|
class RF:
"""docstring for RF"""
def read_tag(self):
import RPi.GPIO as GPIO
import sys
sys.path.append('/home/pi/Documents/objetos/MFRC522-python')
from mfrc522 import SimpleMFRC522
reader = SimpleMFRC522()
print("Hold tag near the reader")
try:
t_id, text= reader.read_no_block()
return t_id, text
finally:
GPIO.cleanup()
if __name__ == '__main__':
rf = RF()
rf.readTag()
|
the-stack_106_13211
|
"""
1217. Total Hamming Distance
https://www.lintcode.com/problem/total-hamming-distance/description
tle:
o(n^2 * 30) <- 1e9 ~~ 2<<30. so every search takes 30 in worst case. roughly o(10^9) tle
"""
class Solution:
"""
@param nums: the gievn integers
@return: the total Hamming distance between all pairs of the given numbers
"""
def totalHammingDistance(self, nums):
# Write your code here
res = 0
n = len(nums)
for i in range(n):
for j in range(i, n):
c = nums[i] ^ nums[j]
while c > 0:
res += c & 1
c >>= 1
return res
|
the-stack_106_13216
|
input = list(input().split(" "))
n = int(input[0])
k = int(input[-1])
origami = [2, 5, 8]
n_notebooks = []
total = 0
for color in origami:
total += -(-color*n // k)
print(total)
|
the-stack_106_13217
|
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Provides generic text views
This modules provides several generic views for
serializing models into human-readable text.
"""
import collections as col
import six
class MultiView(object):
"""A Text View Containing Multiple Views
This view simply serializes each
value in the data model, and then
joins them with newlines (ignoring
the key values altogether). This is
useful for serializing lists of models
(as array-like dicts).
"""
def __call__(self, model):
res = [six.text_type(model[key]) for key in model]
return "\n".join(res)
class BasicKeyValueView(object):
"""A Basic Key-Value Text View
This view performs a naive serialization of a model into
text using a basic key-value method, where each
key-value pair is rendered as "key = str(value)"
"""
def __call__(self, model):
res = ""
for key in model:
res += "{key} = {value}\n".format(key=key, value=model[key])
return res
class KeyValueView(object):
"""A Key-Value Text View
This view performs an advanced serialization of a model
into text by following the following set of rules:
key : text
key = text
rootkey : Mapping
::
rootkey =
serialize(key, value)
key : Sequence
::
key =
serialize(item)
:param str indent_str: the string used to represent one "indent"
:param str key_sep: the separator to use between keys and values
:param str dict_sep: the separator to use after a dictionary root key
:param str list_sep: the separator to use after a list root key
:param str anon_dict: the "key" to use when there is a dict in a list
(does not automatically use the dict separator)
:param before_dict: content to place on the line(s) before the a dict
root key (use None to avoid inserting an extra line)
:type before_dict: str or None
:param before_list: content to place on the line(s) before the a list
root key (use None to avoid inserting an extra line)
:type before_list: str or None
"""
def __init__(self,
indent_str=' ',
key_sep=' = ',
dict_sep=' = ',
list_sep=' = ',
anon_dict='[dict]',
before_dict=None,
before_list=None):
self.indent_str = indent_str
self.key_sep = key_sep
self.dict_sep = dict_sep
self.list_sep = list_sep
self.anon_dict = anon_dict
self.before_dict = before_dict
self.before_list = before_list
def __call__(self, model):
def serialize(root, rootkey, indent):
res = []
if rootkey is not None:
res.append((self.indent_str * indent) + rootkey)
if isinstance(root, col.Mapping):
if rootkey is None and indent > 0:
res.append((self.indent_str * indent) + self.anon_dict)
elif rootkey is not None:
res[0] += self.dict_sep
if self.before_dict is not None:
res.insert(0, self.before_dict)
for key in sorted(root):
res.extend(serialize(root[key], key, indent + 1))
elif (isinstance(root, col.Sequence) and
not isinstance(root, six.string_types)):
if rootkey is not None:
res[0] += self.list_sep
if self.before_list is not None:
res.insert(0, self.before_list)
for val in sorted(root, key=str):
res.extend(serialize(val, None, indent + 1))
else:
str_root = six.text_type(root)
if '\n' in str_root:
# we are in a submodel
if rootkey is not None:
res[0] += self.dict_sep
list_root = [(self.indent_str * (indent + 1)) + line
for line in str_root.split('\n')]
res.extend(list_root)
else:
# just a normal key or list entry
try:
res[0] += self.key_sep + str_root
except IndexError:
res = [(self.indent_str * indent) + str_root]
return res
return "\n".join(serialize(model, None, -1))
class TableView(object):
"""A Basic Table Text View
This view performs serialization of data into a basic table with
predefined column names and mappings. Column width is auto-calculated
evenly, column values are automatically truncated accordingly. Values
are centered in the columns.
:param [str] column_names: the headers for each of the columns
:param [str] column_values: the item name to match each column to in
each row
:param str table_prop_name: the name of the property within the model
containing the row models
"""
def __init__(self, column_names, column_values, table_prop_name):
self.table_prop_name = table_prop_name
self.column_names = column_names
self.column_values = column_values
self.column_width = (72 - len(column_names) + 1) // len(column_names)
column_headers = "|".join(
"{{ch[{n}]: ^{width}}}".format(n=n, width=self.column_width)
for n in range(len(column_names))
)
# correct for float-to-int roundoff error
test_fmt = column_headers.format(ch=column_names)
if len(test_fmt) < 72:
column_headers += ' ' * (72 - len(test_fmt))
vert_divider = '-' * 72
self.header_fmt_str = column_headers + "\n" + vert_divider + "\n"
self.row_fmt_str = "|".join(
"{{cv[{n}]: ^{width}}}".format(n=n, width=self.column_width)
for n in range(len(column_values))
)
def __call__(self, model):
res = self.header_fmt_str.format(ch=self.column_names)
for raw_row in model[self.table_prop_name]:
row = [six.text_type(raw_row[prop_name])
for prop_name in self.column_values]
# double format is in case we have roundoff error
res += '{0: <72}\n'.format(self.row_fmt_str.format(cv=row))
return res
|
the-stack_106_13218
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from unittest import mock
from ax.core.arm import Arm
from ax.core.experiment import Experiment
from ax.core.generator_run import GeneratorRun
from ax.core.search_space import SearchSpace
from ax.modelbridge.discrete import DiscreteModelBridge
from ax.modelbridge.factory import Models
from ax.modelbridge.generation_strategy import GenerationStep, GenerationStrategy
from ax.modelbridge.torch import TorchModelBridge
from ax.models.discrete.eb_thompson import EmpiricalBayesThompsonSampler
from ax.models.discrete.full_factorial import FullFactorialGenerator
from ax.models.discrete.thompson import ThompsonSampler
from ax.utils.common.testutils import TestCase
from ax.utils.testing.fake import get_branin_experiment, get_choice_parameter, get_data
class TestGenerationStrategy(TestCase):
def test_validation(self):
# num_arms can be positive or -1.
with self.assertRaises(ValueError):
GenerationStrategy(
steps=[
GenerationStep(model=Models.SOBOL, num_arms=5),
GenerationStep(model=Models.GPEI, num_arms=-10),
]
)
# only last num_arms can be -1.
with self.assertRaises(ValueError):
GenerationStrategy(
steps=[
GenerationStep(model=Models.SOBOL, num_arms=-1),
GenerationStep(model=Models.GPEI, num_arms=10),
]
)
exp = Experiment(
name="test", search_space=SearchSpace(parameters=[get_choice_parameter()])
)
factorial_thompson_generation_strategy = GenerationStrategy(
steps=[
GenerationStep(model=Models.FACTORIAL, num_arms=1),
GenerationStep(model=Models.THOMPSON, num_arms=2),
]
)
with self.assertRaises(ValueError):
factorial_thompson_generation_strategy.gen(exp)
def test_min_observed(self):
# We should fail to transition the next model if there is not
# enough data observed.
exp = get_branin_experiment()
gs = GenerationStrategy(
steps=[
GenerationStep(model=Models.SOBOL, num_arms=5, min_arms_observed=5),
GenerationStep(model=Models.GPEI, num_arms=1),
]
)
for _ in range(5):
gs.gen(exp)
with self.assertRaises(ValueError):
gs.gen(exp)
def test_do_not_enforce_min_observations(self):
# We should be able to move on to the next model if there is not
# enough data observed if `enforce_num_arms` setting is False, in which
# case the previous model should be used until there is enough data.
exp = get_branin_experiment()
gs = GenerationStrategy(
steps=[
GenerationStep(
model=Models.SOBOL,
num_arms=5,
min_arms_observed=5,
enforce_num_arms=False,
),
GenerationStep(model=Models.GPEI, num_arms=1),
]
)
for _ in range(5):
gs.gen(exp)
sobol = gs._model
gs.gen(exp)
# Make sure the same model is used to generate the 6th point.
self.assertIs(gs._model, sobol)
@mock.patch(
f"{TorchModelBridge.__module__}.TorchModelBridge.__init__",
autospec=True,
return_value=None,
)
@mock.patch(
f"{TorchModelBridge.__module__}.TorchModelBridge.update",
autospec=True,
return_value=None,
)
@mock.patch(
f"{TorchModelBridge.__module__}.TorchModelBridge.gen",
autospec=True,
return_value=GeneratorRun(arms=[Arm(parameters={"x1": 1, "x2": 2})]),
)
def test_sobol_GPEI_strategy(self, mock_GPEI_gen, mock_GPEI_update, mock_GPEI_init):
exp = get_branin_experiment()
sobol_GPEI_generation_strategy = GenerationStrategy(
name="Sobol+GPEI",
steps=[
GenerationStep(model=Models.SOBOL, num_arms=5),
GenerationStep(model=Models.GPEI, num_arms=2),
],
)
self.assertEqual(sobol_GPEI_generation_strategy.name, "Sobol+GPEI")
self.assertEqual(sobol_GPEI_generation_strategy.generator_changes, [5])
exp.new_trial(generator_run=sobol_GPEI_generation_strategy.gen(exp)).run()
for i in range(1, 8):
if i == 7:
# Check completeness error message.
with self.assertRaisesRegex(ValueError, "Generation strategy"):
g = sobol_GPEI_generation_strategy.gen(
exp, exp._fetch_trial_data(trial_index=i - 1)
)
else:
g = sobol_GPEI_generation_strategy.gen(
exp, exp._fetch_trial_data(trial_index=i - 1)
)
exp.new_trial(generator_run=g).run()
if i > 4:
mock_GPEI_init.assert_called()
# Check for "seen data" error message.
with self.assertRaisesRegex(ValueError, "Data for arm"):
sobol_GPEI_generation_strategy.gen(exp, exp.fetch_data())
@mock.patch(
f"{TorchModelBridge.__module__}.TorchModelBridge.__init__",
autospec=True,
return_value=None,
)
@mock.patch(
f"{TorchModelBridge.__module__}.TorchModelBridge.update",
autospec=True,
return_value=None,
)
@mock.patch(
f"{TorchModelBridge.__module__}.TorchModelBridge.gen",
autospec=True,
return_value=GeneratorRun(arms=[Arm(parameters={"x1": 1, "x2": 2})]),
)
def test_sobol_GPEI_strategy_keep_generating(
self, mock_GPEI_gen, mock_GPEI_update, mock_GPEI_init
):
exp = get_branin_experiment()
sobol_GPEI_generation_strategy = GenerationStrategy(
steps=[
GenerationStep(model=Models.SOBOL, num_arms=5),
GenerationStep(model=Models.GPEI, num_arms=-1),
]
)
self.assertEqual(sobol_GPEI_generation_strategy.name, "sobol+GPEI")
self.assertEqual(sobol_GPEI_generation_strategy.generator_changes, [5])
exp.new_trial(generator_run=sobol_GPEI_generation_strategy.gen(exp)).run()
for i in range(1, 15):
# Passing in all experiment data should cause an error as only
# new data should be passed into `gen`.
if i > 1:
with self.assertRaisesRegex(ValueError, "Data for arm"):
g = sobol_GPEI_generation_strategy.gen(exp, exp.fetch_data())
g = sobol_GPEI_generation_strategy.gen(
exp, exp._fetch_trial_data(trial_index=i - 1)
)
exp.new_trial(generator_run=g).run()
if i > 4:
mock_GPEI_init.assert_called()
@mock.patch(
f"{DiscreteModelBridge.__module__}.DiscreteModelBridge.__init__",
autospec=True,
return_value=None,
)
@mock.patch(
f"{DiscreteModelBridge.__module__}.DiscreteModelBridge.gen",
autospec=True,
return_value=GeneratorRun(arms=[Arm(parameters={"x1": 1, "x2": 2})]),
)
@mock.patch(
f"{DiscreteModelBridge.__module__}.DiscreteModelBridge.update",
autospec=True,
return_value=None,
)
def test_factorial_thompson_strategy(self, mock_update, mock_gen, mock_discrete):
exp = get_branin_experiment()
factorial_thompson_generation_strategy = GenerationStrategy(
steps=[
GenerationStep(model=Models.FACTORIAL, num_arms=1),
GenerationStep(model=Models.THOMPSON, num_arms=-1),
]
)
self.assertEqual(
factorial_thompson_generation_strategy.name, "factorial+thompson"
)
self.assertEqual(factorial_thompson_generation_strategy.generator_changes, [1])
for i in range(2):
data = get_data() if i > 0 else None
factorial_thompson_generation_strategy.gen(experiment=exp, new_data=data)
exp.new_batch_trial().add_arm(Arm(parameters={"x1": i, "x2": i}))
if i < 1:
mock_discrete.assert_called()
args, kwargs = mock_discrete.call_args
self.assertIsInstance(kwargs.get("model"), FullFactorialGenerator)
exp.new_batch_trial()
else:
mock_discrete.assert_called()
args, kwargs = mock_discrete.call_args
self.assertIsInstance(
kwargs.get("model"),
(ThompsonSampler, EmpiricalBayesThompsonSampler),
)
def test_clone_reset(self):
ftgs = GenerationStrategy(
steps=[
GenerationStep(model=Models.FACTORIAL, num_arms=1),
GenerationStep(model=Models.THOMPSON, num_arms=2),
]
)
ftgs._curr = ftgs._steps[1]
self.assertEqual(ftgs._curr.index, 1)
self.assertEqual(ftgs.clone_reset()._curr.index, 0)
def test_kwargs_passed(self):
gs = GenerationStrategy(
steps=[
GenerationStep(
model=Models.SOBOL, num_arms=1, model_kwargs={"scramble": False}
)
]
)
exp = get_branin_experiment()
gs.gen(exp, exp.fetch_data())
self.assertFalse(gs._model.model.scramble)
@mock.patch(
f"{TorchModelBridge.__module__}.TorchModelBridge.__init__",
autospec=True,
return_value=None,
)
@mock.patch(
f"{TorchModelBridge.__module__}.TorchModelBridge.update",
autospec=True,
return_value=None,
)
@mock.patch(
f"{TorchModelBridge.__module__}.TorchModelBridge.gen",
autospec=True,
return_value=GeneratorRun(
arms=[
Arm(parameters={"x1": 1, "x2": 2}),
Arm(parameters={"x1": 3, "x2": 4}),
]
),
)
def test_sobol_GPEI_strategy_batches(
self, mock_GPEI_gen, mock_GPEI_update, mock_GPEI_init
):
exp = get_branin_experiment()
sobol_GPEI_generation_strategy = GenerationStrategy(
name="Sobol+GPEI",
steps=[
GenerationStep(model=Models.SOBOL, num_arms=5),
GenerationStep(model=Models.GPEI, num_arms=8),
],
)
self.assertEqual(sobol_GPEI_generation_strategy.name, "Sobol+GPEI")
self.assertEqual(sobol_GPEI_generation_strategy.generator_changes, [5])
exp.new_batch_trial(
generator_run=sobol_GPEI_generation_strategy.gen(exp, n=2)
).run()
for i in range(1, 8):
if i == 2:
with self.assertRaisesRegex(ValueError, "Cannot generate 2 new"):
g = sobol_GPEI_generation_strategy.gen(
exp, exp._fetch_trial_data(trial_index=i - 1), n=2
)
g = sobol_GPEI_generation_strategy.gen(
exp, exp._fetch_trial_data(trial_index=i - 1)
)
elif i == 7:
# Check completeness error message.
with self.assertRaisesRegex(ValueError, "Generation strategy"):
g = sobol_GPEI_generation_strategy.gen(
exp, exp._fetch_trial_data(trial_index=i - 1), n=2
)
else:
g = sobol_GPEI_generation_strategy.gen(
exp, exp._fetch_trial_data(trial_index=i - 1), n=2
)
exp.new_batch_trial(generator_run=g).run()
if i > 4:
mock_GPEI_init.assert_called()
with self.assertRaises(ValueError):
sobol_GPEI_generation_strategy.gen(exp, exp.fetch_data())
|
the-stack_106_13219
|
from transpiler.Operation import Operation
MAX_VAL = 2 ** 64 - 1
class Gas(Operation):
def proceed(self, state):
"""
The current approach is to set the gas cost to a big constant,
instead of doing the gas book keeping in the cairo code.
It is assumed that the gas cost will not be higher than 64 bits
"""
state.stack.push_uint256(MAX_VAL)
return []
|
the-stack_106_13220
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import versionutils
from oslo_policy import policy
from keystone.common.policies import base
deprecated_get_sp = policy.DeprecatedRule(
name=base.IDENTITY % 'get_service_provider',
check_str=base.RULE_ADMIN_REQUIRED
)
deprecated_list_sp = policy.DeprecatedRule(
name=base.IDENTITY % 'list_service_providers',
check_str=base.RULE_ADMIN_REQUIRED
)
deprecated_update_sp = policy.DeprecatedRule(
name=base.IDENTITY % 'update_service_provider',
check_str=base.RULE_ADMIN_REQUIRED
)
deprecated_create_sp = policy.DeprecatedRule(
name=base.IDENTITY % 'create_service_provider',
check_str=base.RULE_ADMIN_REQUIRED
)
deprecated_delete_sp = policy.DeprecatedRule(
name=base.IDENTITY % 'delete_service_provider',
check_str=base.RULE_ADMIN_REQUIRED
)
DEPRECATED_REASON = """
As of the Stein release, the service provider API now understands default
roles and system-scoped tokens, making the API more granular by default without
compromising security. The new policy defaults account for these changes
automatically. Be sure to take these new defaults into consideration if you are
relying on overrides in your deployment for the service provider API.
"""
service_provider_policies = [
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'create_service_provider',
check_str=base.SYSTEM_ADMIN,
# FIXME(lbragstad): Today, keystone doesn't support federation without
# modifying configuration files. It makes sense to require system scope
# for these operations until keystone supports a way to add federated
# identity and service providers strictly over the API. At that point,
# it will make sense to include `project` in the list of `scope_types`
# for service provider policies.
scope_types=['system'],
description='Create federated service provider.',
operations=[{'path': ('/v3/OS-FEDERATION/service_providers/'
'{service_provider_id}'),
'method': 'PUT'}],
deprecated_rule=deprecated_create_sp,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.STEIN),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'list_service_providers',
check_str=base.SYSTEM_READER,
scope_types=['system'],
description='List federated service providers.',
operations=[
{
'path': '/v3/OS-FEDERATION/service_providers',
'method': 'GET'
},
{
'path': '/v3/OS-FEDERATION/service_providers',
'method': 'HEAD'
}
],
deprecated_rule=deprecated_list_sp,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.STEIN
),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'get_service_provider',
check_str=base.SYSTEM_READER,
scope_types=['system'],
description='Get federated service provider.',
operations=[
{
'path': ('/v3/OS-FEDERATION/service_providers/'
'{service_provider_id}'),
'method': 'GET'
},
{
'path': ('/v3/OS-FEDERATION/service_providers/'
'{service_provider_id}'),
'method': 'HEAD'
}
],
deprecated_rule=deprecated_get_sp,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.STEIN
),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'update_service_provider',
check_str=base.SYSTEM_ADMIN,
scope_types=['system'],
description='Update federated service provider.',
operations=[{'path': ('/v3/OS-FEDERATION/service_providers/'
'{service_provider_id}'),
'method': 'PATCH'}],
deprecated_rule=deprecated_update_sp,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.STEIN),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'delete_service_provider',
check_str=base.SYSTEM_ADMIN,
scope_types=['system'],
description='Delete federated service provider.',
operations=[{'path': ('/v3/OS-FEDERATION/service_providers/'
'{service_provider_id}'),
'method': 'DELETE'}],
deprecated_rule=deprecated_delete_sp,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.STEIN)
]
def list_rules():
return service_provider_policies
|
the-stack_106_13221
|
'''
There is an array of integers. There are also disjoint sets, and , each containing integers. You like all the integers in set and dislike all the integers in set . Your initial happiness is . For each integer in the array, if , you add to your happiness. If , you add to your happiness. Otherwise, your happiness does not change. Output your final happiness at the end.
Note: Since and are sets, they have no repeated elements. However, the array might contain duplicate elements.
Input Format
The first line contains integers and separated by a space.
The second line contains integers, the elements of the array.
The third and fourth lines contain integers, and , respectively.
Output Format
Output a single integer, your total happiness.
Sample Input
3 2
1 5 3
3 1
5 7
Sample Output
1
'''
n,m = (int(x) for x in input().split())
arr = [int(x) for x in input().split()]
a = {int(x) for x in input().split()}
b = {int(x) for x in input().split()}
print(len([x for x in arr if x in a])-len([x for x in arr if x in b]))
|
the-stack_106_13226
|
# -*- coding: utf-8 -*-
import datetime
import scrapy
from housing.items import HousingItem
def strip_extra_space(to_strip: str) -> str:
return ' '.join(to_strip.split())
class WhoiSpider(scrapy.Spider):
name = 'whoi'
allowed_domains = ['www.whoi.edu']
def start_requests(self):
yield scrapy.Request('https://www.whoi.edu/housing/housingListing.do', self.parse)
def parse(self, response):
item = HousingItem()
listings = response.xpath('//div[@id="cof"]/table/tr/td/form/table//tr')
# Ignore table header row
for listing in listings[1:]:
date_posted_string = listing.xpath('td[2]//text()').get()
item['date_posted'] = datetime.datetime.strptime(date_posted_string, "%Y-%m-%d").date()
item['description'] = listing.xpath('td[3]//text()').get()
item['location'] = listing.xpath('td[4]//text()').get()
item['rent'] = listing.xpath('td[5]//text()').get()
item['season'] = listing.xpath('td[6]//text()').get()
item['availability'] = listing.xpath('td[7]//text()').get()
moreinfo = listing.xpath('td[8]//@href').get()
request = scrapy.Request(response.urljoin(
moreinfo), callback=self.parse_more_info)
request.meta['item'] = item
yield request
def parse_more_info(self, response):
item = response.meta['item']
response = response.replace(body=response.body.replace(b'<br>', b'\n'))
response = response.replace(body=response.body.replace(b'\r\n', b''))
item['details'] = strip_extra_space(response.xpath(
'string(//div[@id="cof"]/table/tr/td/table/tr[2]//td[2])').get())
item['contact'] = strip_extra_space(response.xpath(
'string(//div[@id="cof"]/table/tr/td/table/tr[2]//td[3])').get())
yield item
|
the-stack_106_13227
|
from conans import ConanFile, CMake, tools
from conans.errors import ConanInvalidConfiguration
import os
import glob
import shutil
required_conan_version = ">=1.28.0"
class MongoCxxConan(ConanFile):
name = "mongo-cxx-driver"
license = "Apache-2.0"
url = "https://github.com/conan-io/conan-center-index"
homepage = "http://mongocxx.org"
description = "C++ Driver for MongoDB"
topics = ("conan", "libbsoncxx", "libmongocxx", "mongo", "mongodb", "database", "db")
settings = "os", "compiler", "arch", "build_type"
exports_sources = ["CMakeLists.txt", "patches/**"]
generators = ("cmake", "cmake_find_package")
options = {
"shared": [True, False],
"fPIC": [True, False],
"polyfill": ["std", "boost", "mnmlstc", "experimental"],
"with_ssl": [True, False]
}
default_options = {
"shared": False,
"fPIC": True,
"polyfill": "boost",
"with_ssl": True
}
_cmake = None
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _build_subfolder(self):
return "build_subfolder"
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
@property
def _minimal_std_version(self):
return {
"std": "17",
"experimental": "14",
"boost": "11",
"polyfill": "11"
}[str(self.options.polyfill)]
@property
def _compilers_minimum_version(self):
if self.options.polyfill == "std":
# C++17
return {
"Visual Studio": "15",
"gcc": "7",
"clang": "5",
"apple-clang": "10"
}
elif self.options.polyfill == "experimental":
# C++14
return {
"Visual Studio": "15",
"gcc": "5",
"clang": "3.5",
"apple-clang": "10"
}
elif self.options.polyfill == "boost":
# C++11
return {
"Visual Studio": "14",
"gcc": "5",
"clang": "3.3",
"apple-clang": "9"
}
else:
raise ConanInvalidConfiguration(
"please, specify _compilers_minimum_version for {} polyfill".format(self.options.polyfill)
)
def configure(self):
if self.options.shared:
del self.options.fPIC
if self.options.polyfill == "mnmlstc":
# TODO: add mnmlstc polyfill support
# Cannot model mnmlstc (not packaged, is pulled dynamically) polyfill dependencies
raise ConanInvalidConfiguration("mnmlstc polyfill is not yet supported")
if self.settings.compiler.get_safe("cppstd"):
tools.check_min_cppstd(self, self._minimal_std_version)
compiler = str(self.settings.compiler)
if self.options.polyfill == "experimental" and compiler == "apple-clang":
raise ConanInvalidConfiguration("experimental polyfill is not supported for apple-clang")
if compiler not in self._compilers_minimum_version:
self.output.warn("Unknown compiler, assuming it supports at least C++{}".format(self._minimal_std_version))
return
version = tools.Version(self.settings.compiler.version)
if version < self._compilers_minimum_version[compiler]:
raise ConanInvalidConfiguration(
"{} requires a compiler that supports at least C++{}".format(
self.name,
self._minimal_std_version
)
)
def requirements(self):
self.requires("mongo-c-driver/1.17.2")
if self.options.polyfill == "boost":
self.requires("boost/1.74.0")
def source(self):
tools.get(**self.conan_data["sources"][self.version])
os.rename(self.name + "-r" + self.version, self._source_subfolder)
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
self._cmake.definitions["BSONCXX_POLY_USE_MNMLSTC"] = self.options.polyfill == "mnmlstc"
self._cmake.definitions["BSONCXX_POLY_USE_STD"] = self.options.polyfill == "std"
self._cmake.definitions["BSONCXX_POLY_USE_STD_EXPERIMENTAL"] = self.options.polyfill == "experimental"
self._cmake.definitions["BSONCXX_POLY_USE_BOOST"] = self.options.polyfill == "boost"
self._cmake.definitions["BUILD_VERSION"] = self.version
self._cmake.definitions["BSONCXX_LINK_WITH_STATIC_MONGOC"] = not self.options["mongo-c-driver"].shared
self._cmake.definitions["MONGOCXX_LINK_WITH_STATIC_MONGOC"] = not self.options["mongo-c-driver"].shared
self._cmake.definitions["MONGOCXX_ENABLE_SSL"] = self.options.with_ssl
if self.settings.compiler.get_safe("cppstd") is None:
self.output.warn("The recipe will force the cppstd to {}".format(self._minimal_std_version))
self._cmake.definitions["CMAKE_CXX_STANDARD"] = self._minimal_std_version
# FIXME: two CMake module/config files should be generated (mongoc-1.0-config.cmake and bson-1.0-config.cmake),
# but it can't be modeled right now.
# Fix should happen in mongo-c-driver recipe
if not os.path.exists("Findbson-1.0.cmake"):
self.output.info("Copying mongoc config file to bson")
shutil.copy("Findmongoc-1.0.cmake", "Findbson-1.0.cmake")
self._cmake.configure(build_folder=self._build_subfolder)
return self._cmake
def _patch_sources(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
def build(self):
if self.options.with_ssl and not bool(self.options["mongo-c-driver"].with_ssl):
raise ConanInvalidConfiguration("mongo-cxx-driver with_ssl=True requires mongo-c-driver with a ssl implementation")
self._patch_sources()
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy(pattern="LICENSE", dst="licenses", src=self._source_subfolder)
self.copy(pattern="THIRD-PARTY-NOTICES", dst="licenses", src=self._source_subfolder)
cmake = self._configure_cmake()
cmake.install()
if self.settings.os == "Windows":
for dll_file in glob.glob(os.path.join(self.package_folder, "bin", "*.dll")):
if os.path.basename(dll_file).startswith(("concrt", "msvcp", "vcruntime")):
os.remove(dll_file)
tools.rmdir(os.path.join(self.package_folder, "lib", "cmake"))
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
def package_info(self):
# FIXME: two CMake module/config files should be generated (mongocxx-config.cmake and bsoncxx-config.cmake),
# but it can't be modeled right now.
self.cpp_info.filenames["cmake_find_package"] = "mongocxx"
self.cpp_info.filenames["cmake_find_package_multi"] = "mongocxx"
self.cpp_info.names["cmake_find_package"] = "mongo"
self.cpp_info.names["cmake_find_package_multi"] = "mongo"
# mongocxx
self.cpp_info.components["mongocxx"].names["cmake_find_package"] = "mongocxx_shared" if self.options.shared else "mongocxx_static"
self.cpp_info.components["mongocxx"].names["cmake_find_package_multi"] = "mongocxx_shared" if self.options.shared else "mongocxx_static"
self.cpp_info.components["mongocxx"].names["pkg_config"] = "libmongocxx"
self.cpp_info.components["mongocxx"].libs = ["mongocxx" if self.options.shared else "mongocxx-static"]
if not self.options.shared:
self.cpp_info.components["mongocxx"].defines.append("MONGOCXX_STATIC")
self.cpp_info.components["mongocxx"].requires = ["mongo-c-driver::mongoc", "bsoncxx"]
# bsoncxx
self.cpp_info.components["bsoncxx"].names["cmake_find_package"] = "bsoncxx_shared" if self.options.shared else "bsoncxx_static"
self.cpp_info.components["bsoncxx"].names["cmake_find_package_multi"] = "bsoncxx_shared" if self.options.shared else "bsoncxx_static"
self.cpp_info.components["bsoncxx"].names["pkg_config"] = "libbsoncxx" if self.options.shared else "libbsoncxx-static"
self.cpp_info.components["bsoncxx"].libs = ["bsoncxx" if self.options.shared else "bsoncxx-static"]
if not self.options.shared:
self.cpp_info.components["bsoncxx"].defines = ["BSONCXX_STATIC"]
self.cpp_info.components["bsoncxx"].requires = ["mongo-c-driver::bson"]
if self.options.polyfill == "boost":
self.cpp_info.components["bsoncxx"].requires.append("boost::boost")
|
the-stack_106_13228
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import TYPE_CHECKING
from azure.core import PipelineClient
from msrest import Deserializer, Serializer
from ._configuration import NonStringEnumsClientConfiguration
from .operations import FloatOperations, IntOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Dict, Optional
from azure.core.rest import HttpRequest, HttpResponse
class NonStringEnumsClient(object):
"""Testing non-string enums.
:ivar int: IntOperations operations
:vartype int: nonstringenumsversiontolerant.operations.IntOperations
:ivar float: FloatOperations operations
:vartype float: nonstringenumsversiontolerant.operations.FloatOperations
:keyword endpoint: Service URL. Default value is 'http://localhost:3000'.
:paramtype endpoint: str
"""
def __init__(
self, **kwargs # type: Any
):
# type: (...) -> None
endpoint = kwargs.pop("endpoint", "http://localhost:3000") # type: str
self._config = NonStringEnumsClientConfiguration(**kwargs)
self._client = PipelineClient(base_url=endpoint, config=self._config, **kwargs)
self._serialize = Serializer()
self._deserialize = Deserializer()
self._serialize.client_side_validation = False
self.int = IntOperations(self._client, self._config, self._serialize, self._deserialize)
self.float = FloatOperations(self._client, self._config, self._serialize, self._deserialize)
def send_request(
self,
request, # type: HttpRequest
**kwargs # type: Any
):
# type: (...) -> HttpResponse
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = client.send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.HttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> NonStringEnumsClient
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
|
the-stack_106_13229
|
import os
import numpy as np
import pygame
from gym.spaces import Discrete
from pettingzoo import AECEnv
from pettingzoo.utils import agent_selector, wrappers
from pettingzoo.utils.conversions import parallel_wrapper_fn
def get_image(path):
from os import path as os_path
import pygame
cwd = os_path.dirname(__file__)
image = pygame.image.load(cwd + '/' + path)
sfc = pygame.Surface(image.get_size(), flags=pygame.SRCALPHA)
sfc.blit(image, (0, 0))
return sfc
def get_font(path, size):
from os import path as os_path
cwd = os_path.dirname(__file__)
font = pygame.font.Font((cwd + '/' + path), size)
return font
def env(**kwargs):
env = raw_env(**kwargs)
env = wrappers.AssertOutOfBoundsWrapper(env)
env = wrappers.OrderEnforcingWrapper(env)
return env
parallel_env = parallel_wrapper_fn(env)
class raw_env(AECEnv):
"""Two-player environment for rock paper scissors.
Expandable environment to rock paper scissors lizard spock action_6 action_7 ...
The observation is simply the last opponent action."""
metadata = {
"render.modes": ["human", "rgb_array"],
"name": "rps_v2",
"is_parallelizable": True,
"video.frames_per_second": 2,
}
def __init__(self, num_actions=3, max_cycles=15):
self.max_cycles = max_cycles
# number of actions must be odd and greater than 3
assert num_actions > 2, "The number of actions must be equal or greater than 3."
assert num_actions % 2 != 0, "The number of actions must be an odd number."
self._moves = ["ROCK", "PAPER", "SCISSORS"]
if num_actions > 3:
# expand to lizard, spock for first extra action pair
self._moves.extend(("SPOCK", "LIZARD"))
for action in range(num_actions - 5):
self._moves.append("ACTION_"f'{action + 6}')
# none is last possible action, to satisfy discrete action space
self._moves.append("None")
self._none = num_actions
self.agents = ["player_" + str(r) for r in range(2)]
self.possible_agents = self.agents[:]
self.agent_name_mapping = dict(zip(self.agents, list(range(self.num_agents))))
self.action_spaces = {agent: Discrete(num_actions) for agent in self.agents}
self.observation_spaces = {agent: Discrete(1 + num_actions) for agent in self.agents}
self.screen = None
self.history = [0] * (2 * 5)
self.reinit()
def observation_space(self, agent):
return self.observation_spaces[agent]
def action_space(self, agent):
return self.action_spaces[agent]
def reinit(self):
self.agents = self.possible_agents[:]
self._agent_selector = agent_selector(self.agents)
self.agent_selection = self._agent_selector.next()
self.rewards = {agent: 0 for agent in self.agents}
self._cumulative_rewards = {agent: 0 for agent in self.agents}
self.dones = {agent: False for agent in self.agents}
self.infos = {agent: {} for agent in self.agents}
self.state = {agent: self._none for agent in self.agents}
self.observations = {agent: self._none for agent in self.agents}
self.num_moves = 0
def render(self, mode="human"):
def offset(i, size, offset=0):
if i == 0:
return -(size) - offset
else:
return offset
screen_height = 350
screen_width = int(screen_height * 5 / 14)
if self.screen is None:
if mode == "human":
pygame.init()
self.screen = pygame.display.set_mode((screen_width, screen_height))
else:
pygame.font.init()
self.screen = pygame.Surface((screen_width, screen_height))
if mode == "human":
pygame.event.get()
# Load and all of the necessary images
paper = get_image(os.path.join('img', 'Paper.png'))
rock = get_image(os.path.join('img', 'Rock.png'))
scissors = get_image(os.path.join('img', 'Scissors.png'))
spock = get_image(os.path.join('img', 'Spock.png'))
lizard = get_image(os.path.join('img', 'Lizard.png'))
# Scale images in history
paper = pygame.transform.scale(paper, (int(screen_height / 9), int(screen_height / 9 * (14 / 12))))
rock = pygame.transform.scale(rock, (int(screen_height / 9), int(screen_height / 9 * (10 / 13))))
scissors = pygame.transform.scale(scissors, (int(screen_height / 9), int(screen_height / 9 * (14 / 13))))
spock = pygame.transform.scale(spock, (int(screen_height / 9), int(screen_height / 9)))
lizard = pygame.transform.scale(lizard, (int(screen_height / 9 * (9 / 18)), int(screen_height / 9)))
# Set background color
bg = (255, 255, 255)
self.screen.fill(bg)
# Set font properties
black = (0, 0, 0)
font = get_font((os.path.join('font', 'Minecraft.ttf')), int(screen_height / 25))
for i, move in enumerate(self.history[0:10]):
# Blit move history
if move == 'ROCK':
self.screen.blit(rock, ((screen_width / 2) + offset((i + 1) % 2, screen_height / 9, screen_height * 7 / 126), (screen_height * 7 / 24) + ((screen_height / 7) * np.floor(i / 2))))
elif move == 'PAPER':
self.screen.blit(paper, ((screen_width / 2) + offset((i + 1) % 2, screen_height / 9, screen_height * 7 / 126), (screen_height * 7 / 24) + ((screen_height / 7) * np.floor(i / 2))))
elif move == 'SCISSORS':
self.screen.blit(scissors, ((screen_width / 2) + offset((i + 1) % 2, screen_height / 9, screen_height * 7 / 126), (screen_height * 7 / 24) + ((screen_height / 7) * np.floor(i / 2))))
elif move == 'SPOCK':
self.screen.blit(spock, ((screen_width / 2) + offset((i + 1) % 2, screen_height / 9, screen_height * 7 / 126), (screen_height * 7 / 24) + ((screen_height / 7) * np.floor(i / 2))))
elif move == 'LIZARD':
self.screen.blit(lizard, ((screen_width / 2) + offset((i + 1) % 2, screen_height / 9, screen_height * 7 / 126), (screen_height * 7 / 24) + ((screen_height / 7) * np.floor(i / 2))))
# Scale images in current game
paper = pygame.transform.scale(paper, (int(screen_height / 7), int(screen_height / 7 * (14 / 12))))
rock = pygame.transform.scale(rock, (int(screen_height / 7), int(screen_height / 7 * (10 / 13))))
scissors = pygame.transform.scale(scissors, (int(screen_height / 7), int(screen_height / 7 * (14 / 13))))
spock = pygame.transform.scale(spock, (int(screen_height / 7), int(screen_height / 7)))
lizard = pygame.transform.scale(lizard, (int(screen_height / 7 * (9 / 18)), int(screen_height / 7)))
if len(self.agents) > 1:
for i in range(0, 2):
# Text for each agent
text = font.render('Agent ' + str(i + 1), True, black)
textRect = text.get_rect()
textRect.center = ((screen_width / 2) + offset(i, 0, screen_width * 11 / 40), screen_height / 40)
self.screen.blit(text, textRect)
# Blit agent action
if self._moves[self.state[self.agents[i]]] == 'ROCK':
self.screen.blit(rock, ((screen_width / 2) + offset(i, screen_height / 7, screen_height / 42), screen_height / 12))
elif self._moves[self.state[self.agents[i]]] == 'PAPER':
self.screen.blit(paper, ((screen_width / 2) + offset(i, screen_height / 7, screen_height / 42), screen_height / 12))
elif self._moves[self.state[self.agents[i]]] == 'SCISSORS':
self.screen.blit(scissors, ((screen_width / 2) + offset(i, screen_height / 7, screen_height / 42), screen_height / 12))
elif self._moves[self.state[self.agents[i]]] == 'SPOCK':
self.screen.blit(spock, ((screen_width / 2) + offset(i, screen_height / 7, screen_height / 42), screen_height / 12))
elif self._moves[self.state[self.agents[i]]] == 'LIZARD':
self.screen.blit(lizard, ((screen_width / 2) + offset(i, screen_height / 7, screen_height / 42), screen_height / 12))
if self._moves[self.state[self.agents[1]]] != 'None':
self.history = [self._moves[self.state[self.agents[i]]]] + self.history[:-1]
if mode == "human":
pygame.display.update()
observation = np.array(pygame.surfarray.pixels3d(self.screen))
return np.transpose(observation, axes=(1, 0, 2)) if mode == "rgb_array" else None
def observe(self, agent):
# observation of one agent is the previous state of the other
return np.array(self.observations[agent])
def close(self):
pass
def reset(self):
self.reinit()
def step(self, action):
if self.dones[self.agent_selection]:
return self._was_done_step(action)
agent = self.agent_selection
self.state[self.agent_selection] = action
# collect reward if it is the last agent to act
if self._agent_selector.is_last():
# same action => 0 reward each agent
if self.state[self.agents[0]] == self.state[self.agents[1]]:
rewards = (0, 0)
else:
# same action parity => lower action number wins
if (self.state[self.agents[0]] + self.state[self.agents[1]]) % 2 == 0:
if self.state[self.agents[0]] > self.state[self.agents[1]]:
rewards = (-1, 1)
else:
rewards = (1, -1)
# different action parity => higher action number wins
else:
if self.state[self.agents[0]] > self.state[self.agents[1]]:
rewards = (1, -1)
else:
rewards = (-1, 1)
self.rewards[self.agents[0]], self.rewards[self.agents[1]] = rewards
self.num_moves += 1
self.dones = {agent: self.num_moves >= self.max_cycles for agent in self.agents}
# observe the current state
for i in self.agents:
self.observations[i] = self.state[self.agents[1 - self.agent_name_mapping[i]]]
else:
self.state[self.agents[1 - self.agent_name_mapping[agent]]] = self._none
self._clear_rewards()
self._cumulative_rewards[self.agent_selection] = 0
self.agent_selection = self._agent_selector.next()
self._accumulate_rewards()
|
the-stack_106_13231
|
from . import views
from django.conf.urls import include, url
urlpatterns = [
url(r'^cadastro/', views.cadastro, name="cadastro"),
url(r'^sobre/', views.Sobre, name="Sobre"),
url(r'^perfil/', views.perfil, name="perfil"),
url(r'^editarcadastro/', views.alterar_dados, name="alterardados"),
url(r'^editarsenha/', views.alterar_senha, name="alterarsenha"),
url(r'^login/$', views.login, name="login"),
url(r'^login/(?P<tk>[-\w ]+)', views.loginEMailConfirmado, name="loginEMailConfirmado"),
url(r'^logout/', views.logout, name="logout"),
url(r'^mapa/', views.mapa, name="mapa"),
url(r'^$', views.Home, name="Home"),
url(r'^ativa/token=(?P<token>[-\w ]+)', views.Ativa, name="Ativa"),
url(r'^recuperar-senha/', views.recuperarsenha, name="recuperarsenha"),
url(r'^novasenha/(?P<idusuario>[-\w ]+)', views.novasenha, name="novasenha"),
url(r'^confirmacao-email/', views.confirmacaoemail, name="confirmacaoemail"),
]
|
the-stack_106_13232
|
from aerosandbox_legacy_v0 import *
import autograd.numpy as np
from autograd import grad
def f(x):
a = Airplane(
name="Single Wing",
xyz_ref=[0, 0, 0],
wings=[
Wing(
name="Wing",
xyz_le=[0, 0, 0],
symmetric=True,
xsecs=[
WingXSec(
xyz_le=[0, 0, 0],
chord=x,
twist=0,
airfoil=Airfoil(name="naca0012")
),
WingXSec(
xyz_le=[0, 1, 0],
chord=0.5,
twist=0,
airfoil=Airfoil(name="naca0012")
)
]
)
]
)
a.set_ref_dims_from_wing()
ap = vlm3(
airplane=a,
op_point=OperatingPoint(velocity=10,
alpha=5,
beta=0),
)
ap.run(verbose=False)
return ap.CL_over_CDi
val = 1.0 # nominal value of parameter
# # Finite Difference
h = 1e-8 # step size
dfdx_fd = (f(val + h) - f(val)) / h
print('d(CL/CDi)/dx, finite difference = ', dfdx_fd)
# Autograd
dfdx_ag = grad(f)(val)
print('d(CL/CDi)/dx, reverse-mode AD = ', dfdx_ag)
|
the-stack_106_13233
|
r"""Use pre-trained tokenizer to tokenize text.
Pre-trained tokenizer must exist, i.e., perform tokenizer training first then
use this script.
See Also
========
lmp.script.train_tokenizer
Train tokenizer.
lmp.tknzr
All available tokenizers.
Examples
========
The following example used pre-trained tokenizer under experiment ``my_exp`` to
tokenize text ``'hello world'``.
.. code-block:: sh
python -m lmp.script.tokenize \
--exp_name my_exp \
--txt "Hello World"
Use ``-h`` or ``--help`` options to get list of available options.
.. code-block:: sh
python -m lmp.script.train_tokenizer -h
"""
import argparse
import lmp.dset
import lmp.tknzr
import lmp.util.cfg
import lmp.util.tknzr
def parse_arg() -> argparse.Namespace:
r"""Parse arguments from CLI.
Parse pre-trained tokenizer experiment name and text to be tokenized.
--exp_name Pre-trained tokenizer experiment name.
--txt Text to be tokenized.
Returns
=======
argparse.Namespace
Arguments from CLI.
"""
# Create parser.
parser = argparse.ArgumentParser(
'python -m lmp.script.tokenize',
description='Use pre-trained tokenizer to tokenize text.',
)
# Required arguments.
parser.add_argument(
'--exp_name',
help='Pre-trained tokenizer experiment name.',
required=True,
type=str,
)
parser.add_argument(
'--txt',
help='Text to be tokenized.',
required=True,
type=str,
)
return parser.parse_args()
def main() -> None:
r"""Script entry point."""
# Parse command-line argument.
args = parse_arg()
# Load pre-trained tokenizer configuration.
tknzr_cfg = lmp.util.cfg.load(exp_name=args.exp_name)
# Load pre-trained tokenizer instance.
tknzr = lmp.util.tknzr.load(
exp_name=args.exp_name,
tknzr_name=tknzr_cfg.tknzr_name,
)
# Tokenize text.
print(tknzr.tknz(args.txt))
if __name__ == '__main__':
main()
|
the-stack_106_13234
|
# app/tests/test_quotes.py
import json
import pytest
from app.api.models import Author, Quote
def test_all_quotes(test_app, test_database, add_quote):
test_database.session.query(Quote).delete()
test_database.session.query(Author).delete()
add_quote("Marilyn Monroe", "Imperfection is beauty.")
add_quote("Albert Einstein", "The world as we have created it is a process of our thinking.")
client = test_app.test_client()
resp = client.get("/quotes")
data = json.loads(resp.data)
assert resp.status_code == 200
# 2 quotes inserted above
assert len(data) == 2
assert "Imperfection is beauty." in data[0]["content"]
assert "Marilyn Monroe" in data[0]["author_name"]
assert "Albert Einstein" not in data[0]["author_name"]
def test_random_quotes(test_app, test_database, add_quote):
test_database.session.query(Quote).delete()
test_database.session.query(Author).delete()
add_quote("Marilyn Monroe", "Imperfection is beauty.")
add_quote("Albert Einstein", "The world as we have created it is a process of our thinking.")
add_quote("Steve Martin", "A day without sunshine is like, you know, night.")
add_quote("Jane Austen", "The person, be it gentleman or lady, who has not pleasure in a good novel, must be intolerably stupid.")
add_quote("J.K. Rowling", "It is our choices, Harry, that show what we truly are, far more than our abilities.")
client = test_app.test_client()
resp = client.get("/quotes/random")
data = json.loads(resp.data)
assert resp.status_code == 200
# 5 quotes inserted above
# make sure only 3 returned
assert len(data) == 3
|
the-stack_106_13236
|
#!/usr/bin/env python3
# Copyright (c) 2020-2021 The Eleccoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test ping message
"""
import time
from test_framework.messages import msg_pong
from test_framework.p2p import P2PInterface
from test_framework.test_framework import EleccoinTestFramework
from test_framework.util import assert_equal
PING_INTERVAL = 2 * 60
class msg_pong_corrupt(msg_pong):
def serialize(self):
return b""
class NodePongAdd1(P2PInterface):
def on_ping(self, message):
self.send_message(msg_pong(message.nonce + 1))
class NodeNoPong(P2PInterface):
def on_ping(self, message):
pass
class PingPongTest(EleccoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [['-peertimeout=3']]
def check_peer_info(self, *, pingtime, minping, pingwait):
stats = self.nodes[0].getpeerinfo()[0]
assert_equal(stats.pop('pingtime', None), pingtime)
assert_equal(stats.pop('minping', None), minping)
assert_equal(stats.pop('pingwait', None), pingwait)
def mock_forward(self, delta):
self.mock_time += delta
self.nodes[0].setmocktime(self.mock_time)
def run_test(self):
self.mock_time = int(time.time())
self.mock_forward(0)
self.log.info('Check that ping is sent after connection is established')
no_pong_node = self.nodes[0].add_p2p_connection(NodeNoPong())
self.mock_forward(3)
assert no_pong_node.last_message.pop('ping').nonce != 0
self.check_peer_info(pingtime=None, minping=None, pingwait=3)
self.log.info('Reply without nonce cancels ping')
with self.nodes[0].assert_debug_log(['pong peer=0: Short payload']):
no_pong_node.send_and_ping(msg_pong_corrupt())
self.check_peer_info(pingtime=None, minping=None, pingwait=None)
self.log.info('Reply without ping')
with self.nodes[0].assert_debug_log([
'pong peer=0: Unsolicited pong without ping, 0 expected, 0 received, 8 bytes',
]):
no_pong_node.send_and_ping(msg_pong())
self.check_peer_info(pingtime=None, minping=None, pingwait=None)
self.log.info('Reply with wrong nonce does not cancel ping')
assert 'ping' not in no_pong_node.last_message
with self.nodes[0].assert_debug_log(['pong peer=0: Nonce mismatch']):
# mock time PING_INTERVAL ahead to trigger node into sending a ping
self.mock_forward(PING_INTERVAL + 1)
no_pong_node.wait_until(lambda: 'ping' in no_pong_node.last_message)
self.mock_forward(9)
# Send the wrong pong
no_pong_node.send_and_ping(msg_pong(no_pong_node.last_message.pop('ping').nonce - 1))
self.check_peer_info(pingtime=None, minping=None, pingwait=9)
self.log.info('Reply with zero nonce does cancel ping')
with self.nodes[0].assert_debug_log(['pong peer=0: Nonce zero']):
no_pong_node.send_and_ping(msg_pong(0))
self.check_peer_info(pingtime=None, minping=None, pingwait=None)
self.log.info('Check that ping is properly reported on RPC')
assert 'ping' not in no_pong_node.last_message
# mock time PING_INTERVAL ahead to trigger node into sending a ping
self.mock_forward(PING_INTERVAL + 1)
no_pong_node.wait_until(lambda: 'ping' in no_pong_node.last_message)
ping_delay = 29
self.mock_forward(ping_delay)
no_pong_node.wait_until(lambda: 'ping' in no_pong_node.last_message)
no_pong_node.send_and_ping(msg_pong(no_pong_node.last_message.pop('ping').nonce))
self.check_peer_info(pingtime=ping_delay, minping=ping_delay, pingwait=None)
self.log.info('Check that minping is decreased after a fast roundtrip')
# mock time PING_INTERVAL ahead to trigger node into sending a ping
self.mock_forward(PING_INTERVAL + 1)
no_pong_node.wait_until(lambda: 'ping' in no_pong_node.last_message)
ping_delay = 9
self.mock_forward(ping_delay)
no_pong_node.wait_until(lambda: 'ping' in no_pong_node.last_message)
no_pong_node.send_and_ping(msg_pong(no_pong_node.last_message.pop('ping').nonce))
self.check_peer_info(pingtime=ping_delay, minping=ping_delay, pingwait=None)
self.log.info('Check that peer is disconnected after ping timeout')
assert 'ping' not in no_pong_node.last_message
self.nodes[0].ping()
no_pong_node.wait_until(lambda: 'ping' in no_pong_node.last_message)
with self.nodes[0].assert_debug_log(['ping timeout: 1201.000000s']):
self.mock_forward(20 * 60 + 1)
time.sleep(4) # peertimeout + 1
if __name__ == '__main__':
PingPongTest().main()
|
the-stack_106_13238
|
import json
import logging
import os
import string
import tempfile
from datetime import date
from mercurial import (
mdiff,
patch
)
from sqlalchemy import and_, false, null
import tool_shed.grids.repository_grids as repository_grids
import tool_shed.grids.util as grids_util
import tool_shed.repository_types.util as rt_util
from galaxy import (
util,
web
)
from galaxy.tools.repositories import ValidationContext
from galaxy.web.base.controller import BaseUIController
from galaxy.web.form_builder import CheckboxField, SelectField
from galaxy.webapps.reports.framework import grids
from galaxy.webapps.tool_shed.util import ratings_util
from tool_shed.capsule import capsule_manager
from tool_shed.dependencies.repository import relation_builder
from tool_shed.galaxy_install import dependency_display
from tool_shed.metadata import repository_metadata_manager
from tool_shed.tools import (
tool_validator,
tool_version_manager
)
from tool_shed.util import (
basic_util,
common_util,
encoding_util,
hg_util,
metadata_util,
readme_util,
repository_util,
search_util,
shed_util_common as suc,
tool_util,
workflow_util
)
from tool_shed.util.web_util import escape
from tool_shed.utility_containers import ToolShedUtilityContainerManager
log = logging.getLogger(__name__)
malicious_error = " This changeset cannot be downloaded because it potentially produces malicious behavior or contains inappropriate content."
malicious_error_can_push = " Correct this changeset as soon as possible, it potentially produces malicious behavior or contains inappropriate content."
class RepositoryController(BaseUIController, ratings_util.ItemRatings):
category_grid = repository_grids.CategoryGrid()
datatypes_grid = repository_grids.DatatypesGrid()
deprecated_repositories_i_own_grid = repository_grids.DeprecatedRepositoriesIOwnGrid()
email_alerts_repository_grid = repository_grids.EmailAlertsRepositoryGrid()
docker_image_grid = repository_grids.DockerImageGrid()
install_matched_repository_grid = repository_grids.InstallMatchedRepositoryGrid()
matched_repository_grid = repository_grids.MatchedRepositoryGrid()
my_writable_repositories_grid = repository_grids.MyWritableRepositoriesGrid()
my_writable_repositories_missing_tool_test_components_grid = repository_grids.MyWritableRepositoriesMissingToolTestComponentsGrid()
repositories_by_user_grid = repository_grids.RepositoriesByUserGrid()
repositories_i_own_grid = repository_grids.RepositoriesIOwnGrid()
repositories_i_can_administer_grid = repository_grids.RepositoriesICanAdministerGrid()
repositories_in_category_grid = repository_grids.RepositoriesInCategoryGrid()
repositories_missing_tool_test_components_grid = repository_grids.RepositoriesMissingToolTestComponentsGrid()
repositories_with_invalid_tools_grid = repository_grids.RepositoriesWithInvalidToolsGrid()
repository_dependencies_grid = repository_grids.RepositoryDependenciesGrid()
repository_grid = repository_grids.RepositoryGrid()
# The repository_metadata_grid is not currently displayed, but is sub-classed by several grids.
repository_metadata_grid = repository_grids.RepositoryMetadataGrid()
tool_dependencies_grid = repository_grids.ToolDependenciesGrid()
tools_grid = repository_grids.ToolsGrid()
valid_category_grid = repository_grids.ValidCategoryGrid()
valid_repository_grid = repository_grids.ValidRepositoryGrid()
def _redirect_if_necessary(self, trans, **kwd):
if 'operation' in kwd:
operation = kwd['operation'].lower()
if operation == "view_or_manage_repository":
return trans.response.send_redirect(web.url_for(controller='repository',
action='view_or_manage_repository',
**kwd))
elif operation == "repositories_by_user":
return trans.response.send_redirect(web.url_for(controller='repository',
action='browse_repositories_by_user',
**kwd))
elif operation in ['mark as deprecated', 'mark as not deprecated']:
kwd['mark_deprecated'] = operation == 'mark as deprecated'
return trans.response.send_redirect(web.url_for(controller='repository',
action='deprecate',
**kwd))
@web.expose
def browse_categories(self, trans, **kwd):
# The request came from the tool shed.
if 'f-free-text-search' in kwd:
# Trick to enable searching repository name, description from the CategoryGrid.
# What we've done is rendered the search box for the RepositoryGrid on the grid.mako
# template for the CategoryGrid. See ~/templates/webapps/tool_shed/category/grid.mako.
# Since we are searching repositories and not categories, redirect to browse_repositories().
if 'id' in kwd and 'f-free-text-search' in kwd and kwd['id'] == kwd['f-free-text-search']:
# The value of 'id' has been set to the search string, which is a repository name.
# We'll try to get the desired encoded repository id to pass on.
try:
repository_name = kwd['id']
repository = repository_util.get_repository_by_name(trans.app, repository_name)
kwd['id'] = trans.security.encode_id(repository.id)
except Exception:
pass
return trans.response.send_redirect(web.url_for(controller='repository',
action='browse_repositories',
**kwd))
if 'operation' in kwd:
operation = kwd['operation'].lower()
if operation in ["repositories_by_category", "repositories_by_user"]:
# Eliminate the current filters if any exist.
for k, v in list(kwd.items()):
if k.startswith('f-'):
del kwd[k]
return trans.response.send_redirect(web.url_for(controller='repository',
action='browse_repositories',
**kwd))
title = trans.app.repository_grid_filter_manager.get_grid_title(trans,
trailing_string='by Category',
default='Repositories')
self.category_grid.title = title
return self.category_grid(trans, **kwd)
@web.expose
def browse_datatypes(self, trans, **kwd):
if 'operation' in kwd:
operation = kwd['operation'].lower()
# The received id is a RepositoryMetadata id.
repository_metadata_id = kwd['id']
repository_metadata = metadata_util.get_repository_metadata_by_id(trans.app, repository_metadata_id)
repository_id = trans.security.encode_id(repository_metadata.repository_id)
changeset_revision = repository_metadata.changeset_revision
new_kwd = dict(id=repository_id,
changeset_revision=changeset_revision)
if operation == "view_or_manage_repository":
return trans.response.send_redirect(web.url_for(controller='repository',
action='view_or_manage_repository',
**new_kwd))
return self.datatypes_grid(trans, **kwd)
@web.expose
def browse_deprecated_repositories_i_own(self, trans, **kwd):
if 'operation' in kwd:
operation = kwd['operation'].lower()
if operation == "view_or_manage_repository":
return trans.response.send_redirect(web.url_for(controller='repository',
action='view_or_manage_repository',
**kwd))
selected_changeset_revision, repository = suc.get_repository_from_refresh_on_change(trans.app, **kwd)
if repository:
return trans.response.send_redirect(web.url_for(controller='repository',
action='browse_repositories',
operation='view_or_manage_repository',
id=trans.security.encode_id(repository.id),
changeset_revision=selected_changeset_revision))
return self.deprecated_repositories_i_own_grid(trans, **kwd)
@web.expose
def browse_my_writable_repositories(self, trans, **kwd):
_redir = self._redirect_if_necessary(trans, **kwd)
if _redir is not None:
return _redir
selected_changeset_revision, repository = suc.get_repository_from_refresh_on_change(trans.app, **kwd)
if repository:
return trans.response.send_redirect(web.url_for(controller='repository',
action='browse_repositories',
operation='view_or_manage_repository',
id=trans.security.encode_id(repository.id),
changeset_revision=selected_changeset_revision))
return self.my_writable_repositories_grid(trans, **kwd)
@web.expose
def browse_my_writable_repositories_missing_tool_test_components(self, trans, **kwd):
_redir = self._redirect_if_necessary(trans, **kwd)
if _redir is not None:
return _redir
if 'message' not in kwd:
message = 'This list contains repositories that match the following criteria:<br>'
message += '<ul>'
message += '<li>you are authorized to update them</li>'
message += '<li>the latest installable revision contains at least 1 tool with no defined tests <b>OR</b>:</li>'
message += '<li>the latest installable revision contains at least 1 tool with a test that requires a missing test data file</li>'
message += '</ul>'
kwd['message'] = message
kwd['status'] = 'warning'
return self.my_writable_repositories_missing_tool_test_components_grid(trans, **kwd)
@web.expose
def browse_my_writable_repositories_with_invalid_tools(self, trans, **kwd):
_redir = self._redirect_if_necessary(trans, **kwd)
if _redir is not None:
return _redir
if 'message' not in kwd:
message = 'This list contains repositories that match the following criteria:<br>'
message += '<ul>'
message += '<li>you are authorized to update them</li>'
message += '<li>the latest metadata revision contains at least 1 invalid tool</li>'
message += '</ul>'
message += 'Click the tool config file name to see why the tool is invalid.'
kwd['message'] = message
kwd['status'] = 'warning'
return self.my_writable_repositories_with_invalid_tools_grid(trans, **kwd)
@web.expose
def browse_repositories(self, trans, **kwd):
# We add params to the keyword dict in this method in order to rename the param with an "f-" prefix,
# simulating filtering by clicking a search link. We have to take this approach because the "-"
# character is illegal in HTTP requests.
if 'operation' in kwd:
operation = kwd['operation'].lower()
if operation == "view_or_manage_repository":
return trans.response.send_redirect(web.url_for(controller='repository',
action='view_or_manage_repository',
**kwd))
elif operation == "edit_repository":
return trans.response.send_redirect(web.url_for(controller='repository',
action='edit_repository',
**kwd))
elif operation == "repositories_by_user":
return trans.response.send_redirect(web.url_for(controller='repository',
action='browse_repositories_by_user',
**kwd))
elif operation == "reviewed_repositories_i_own":
return trans.response.send_redirect(web.url_for(controller='repository_review',
action='reviewed_repositories_i_own'))
elif operation == "repositories_by_category":
category_id = kwd.get('id', None)
message = escape(kwd.get('message', ''))
status = kwd.get('status', 'done')
return trans.response.send_redirect(web.url_for(controller='repository',
action='browse_repositories_in_category',
id=category_id,
message=message,
status=status))
elif operation == "receive email alerts":
if trans.user:
if kwd['id']:
kwd['caller'] = 'browse_repositories'
return trans.response.send_redirect(web.url_for(controller='repository',
action='set_email_alerts',
**kwd))
else:
kwd['message'] = 'You must be logged in to set email alerts.'
kwd['status'] = 'error'
del kwd['operation']
selected_changeset_revision, repository = suc.get_repository_from_refresh_on_change(trans.app, **kwd)
if repository:
return trans.response.send_redirect(web.url_for(controller='repository',
action='browse_repositories',
operation='view_or_manage_repository',
id=trans.security.encode_id(repository.id),
changeset_revision=selected_changeset_revision))
title = trans.app.repository_grid_filter_manager.get_grid_title(trans,
trailing_string='',
default='Repositories')
self.repository_grid.title = title
return self.repository_grid(trans, **kwd)
@web.expose
def browse_repositories_by_user(self, trans, **kwd):
"""Display the list of repositories owned by a specified user."""
# Eliminate the current search filters if any exist.
for k, v in list(kwd.items()):
if k.startswith('f-'):
del kwd[k]
if 'operation' in kwd:
operation = kwd['operation'].lower()
if operation == "view_or_manage_repository":
return trans.response.send_redirect(web.url_for(controller='repository',
action='view_or_manage_repository',
**kwd))
user_id = kwd.get('user_id', None)
if user_id is None:
# The received id is the repository id, so we need to get the id of the user that owns the repository.
repository_id = kwd.get('id', None)
if repository_id:
repository = repository_util.get_repository_in_tool_shed(trans.app, repository_id)
user_id = trans.security.encode_id(repository.user.id)
kwd['user_id'] = user_id
else:
# The user selected a repository revision which results in a refresh_on_change.
selected_changeset_revision, repository = suc.get_repository_from_refresh_on_change(trans.app, **kwd)
if repository:
return trans.response.send_redirect(web.url_for(controller='repository',
action='view_or_manage_repository',
id=trans.security.encode_id(repository.id),
changeset_revision=selected_changeset_revision))
if user_id:
user = suc.get_user(trans.app, user_id)
trailing_string = ''
default = 'Repositories Owned by %s' % str(user.username)
else:
trailing_string = ''
default = 'Repositories'
title = trans.app.repository_grid_filter_manager.get_grid_title(trans,
trailing_string=trailing_string,
default=default)
self.repositories_by_user_grid.title = title
return self.repositories_by_user_grid(trans, **kwd)
@web.expose
def browse_repositories_i_can_administer(self, trans, **kwd):
_redir = self._redirect_if_necessary(trans, **kwd)
if _redir is not None:
return _redir
selected_changeset_revision, repository = suc.get_repository_from_refresh_on_change(trans.app, **kwd)
if repository:
return trans.response.send_redirect(web.url_for(controller='repository',
action='browse_repositories',
operation='view_or_manage_repository',
id=trans.security.encode_id(repository.id),
changeset_revision=selected_changeset_revision))
return self.repositories_i_can_administer_grid(trans, **kwd)
@web.expose
def browse_repositories_i_own(self, trans, **kwd):
_redir = self._redirect_if_necessary(trans, **kwd)
if _redir is not None:
return _redir
selected_changeset_revision, repository = suc.get_repository_from_refresh_on_change(trans.app, **kwd)
if repository:
return trans.response.send_redirect(web.url_for(controller='repository',
action='browse_repositories',
operation='view_or_manage_repository',
id=trans.security.encode_id(repository.id),
changeset_revision=selected_changeset_revision))
return self.repositories_i_own_grid(trans, **kwd)
@web.expose
def browse_repositories_in_category(self, trans, **kwd):
if 'operation' in kwd:
operation = kwd['operation'].lower()
if operation == "view_or_manage_repository":
return trans.response.send_redirect(web.url_for(controller='repository',
action='view_or_manage_repository',
**kwd))
if operation == 'repositories_by_user':
user_id = kwd.get('user_id', None)
if user_id is None:
# The received id is the repository id, so we need to get the id of the user that owns the repository.
repository_id = kwd.get('id', None)
if repository_id:
repository = repository_util.get_repository_in_tool_shed(trans.app, repository_id)
user_id = trans.security.encode_id(repository.user.id)
user = suc.get_user(trans.app, user_id)
self.repositories_by_user_grid.title = "Repositories owned by %s" % user.username
kwd['user_id'] = user_id
return self.repositories_by_user_grid(trans, **kwd)
selected_changeset_revision, repository = suc.get_repository_from_refresh_on_change(trans.app, **kwd)
if repository:
# The user selected a repository revision which results in a refresh_on_change.
return trans.response.send_redirect(web.url_for(controller='repository',
action='view_or_manage_repository',
id=trans.security.encode_id(repository.id),
changeset_revision=selected_changeset_revision))
category_id = kwd.get('id', None)
if category_id:
category = suc.get_category(trans.app, category_id)
if category:
trailing_string = 'in Category %s' % str(category.name)
else:
trailing_string = 'in Category'
else:
trailing_string = 'in Category'
title = trans.app.repository_grid_filter_manager.get_grid_title(trans,
trailing_string=trailing_string,
default='Repositories')
self.repositories_in_category_grid.title = title
return self.repositories_in_category_grid(trans, **kwd)
@web.expose
def browse_repositories_missing_tool_test_components(self, trans, **kwd):
_redir = self._redirect_if_necessary(trans, **kwd)
if _redir is not None:
return _redir
if 'message' not in kwd:
message = 'This list contains repositories that match the following criteria:<br>'
message += '<ul>'
message += '<li>the latest installable revision contains at least 1 tool with no defined tests <b>OR</b>:</li>'
message += '<li>the latest installable revision contains at least 1 tool with a test that requires a missing test data file</li>'
message += '</ul>'
kwd['message'] = message
kwd['status'] = 'warning'
return self.repositories_missing_tool_test_components_grid(trans, **kwd)
@web.expose
def browse_repositories_with_invalid_tools(self, trans, **kwd):
_redir = self._redirect_if_necessary(trans, **kwd)
if _redir is not None:
return _redir
if 'message' not in kwd:
message = 'This list contains repositories that match the following criteria:<br>'
message += '<ul>'
message += '<li>the latest metadata revision contains at least 1 invalid tool</li>'
message += '</ul>'
message += 'Click the tool config file name to see why the tool is invalid.'
kwd['message'] = message
kwd['status'] = 'warning'
return self.repositories_with_invalid_tools_grid(trans, **kwd)
@web.expose
def browse_repository(self, trans, id, **kwd):
message = escape(kwd.get('message', ''))
status = kwd.get('status', 'done')
commit_message = escape(kwd.get('commit_message', 'Deleted selected files'))
repository = repository_util.get_repository_in_tool_shed(trans.app, id)
repo = hg_util.get_repo_for_repository(trans.app, repository=repository, repo_path=None, create=False)
# Update repository files for browsing.
hg_util.update_repository(repo)
changeset_revision = repository.tip(trans.app)
metadata = metadata_util.get_repository_metadata_by_repository_id_changeset_revision(trans.app,
id,
changeset_revision,
metadata_only=True)
repository_type_select_field = rt_util.build_repository_type_select_field(trans, repository=repository)
return trans.fill_template('/webapps/tool_shed/repository/browse_repository.mako',
repository=repository,
changeset_revision=changeset_revision,
metadata=metadata,
commit_message=commit_message,
repository_type_select_field=repository_type_select_field,
message=message,
status=status)
@web.expose
def browse_repository_dependencies(self, trans, **kwd):
if 'operation' in kwd:
operation = kwd['operation'].lower()
# The received id is a RepositoryMetadata id.
repository_metadata_id = kwd['id']
repository_metadata = metadata_util.get_repository_metadata_by_id(trans.app, repository_metadata_id)
repository_id = trans.security.encode_id(repository_metadata.repository_id)
changeset_revision = repository_metadata.changeset_revision
new_kwd = dict(id=repository_id,
changeset_revision=changeset_revision)
if operation == "browse_repository":
return trans.response.send_redirect(web.url_for(controller='repository',
action='browse_repository',
**new_kwd))
if operation == "view_or_manage_repository":
return trans.response.send_redirect(web.url_for(controller='repository',
action='view_or_manage_repository',
**new_kwd))
return self.repository_dependencies_grid(trans, **kwd)
@web.expose
def browse_tools(self, trans, **kwd):
if 'operation' in kwd:
operation = kwd['operation'].lower()
# The received id is a RepositoryMetadata id.
repository_metadata_id = kwd['id']
repository_metadata = metadata_util.get_repository_metadata_by_id(trans.app, repository_metadata_id)
repository_id = trans.security.encode_id(repository_metadata.repository_id)
changeset_revision = repository_metadata.changeset_revision
new_kwd = dict(id=repository_id,
changeset_revision=changeset_revision)
if operation == "view_or_manage_repository":
return trans.response.send_redirect(web.url_for(controller='repository',
action='view_or_manage_repository',
**new_kwd))
return self.tools_grid(trans, **kwd)
@web.expose
def browse_tool_dependencies(self, trans, **kwd):
if 'operation' in kwd:
operation = kwd['operation'].lower()
# The received id is a RepositoryMetadata id.
repository_metadata_id = kwd['id']
repository_metadata = metadata_util.get_repository_metadata_by_id(trans.app, repository_metadata_id)
repository_id = trans.security.encode_id(repository_metadata.repository_id)
changeset_revision = repository_metadata.changeset_revision
new_kwd = dict(id=repository_id,
changeset_revision=changeset_revision)
if operation == "view_or_manage_repository":
return trans.response.send_redirect(web.url_for(controller='repository',
action='view_or_manage_repository',
**new_kwd))
return self.tool_dependencies_grid(trans, **kwd)
@web.expose
def browse_valid_categories(self, trans, **kwd):
"""Filter repositories per category by those that are valid for installing into Galaxy."""
# The request came from Galaxy, so restrict category links to display only valid repository changeset revisions.
galaxy_url = common_util.handle_galaxy_url(trans, **kwd)
if galaxy_url:
kwd['galaxy_url'] = galaxy_url
if 'f-free-text-search' in kwd:
if kwd['f-free-text-search'] == 'All':
# The user performed a search, then clicked the "x" to eliminate the search criteria.
new_kwd = {}
return self.valid_category_grid(trans, **new_kwd)
# Since we are searching valid repositories and not categories, redirect to browse_valid_repositories().
if 'id' in kwd and 'f-free-text-search' in kwd and kwd['id'] == kwd['f-free-text-search']:
# The value of 'id' has been set to the search string, which is a repository name.
# We'll try to get the desired encoded repository id to pass on.
try:
name = kwd['id']
repository = repository_util.get_repository_by_name(trans.app, name)
kwd['id'] = trans.security.encode_id(repository.id)
except Exception:
pass
return self.browse_valid_repositories(trans, **kwd)
if 'operation' in kwd:
operation = kwd['operation'].lower()
if operation in ["valid_repositories_by_category", "valid_repositories_by_user"]:
# Eliminate the current filters if any exist.
for k, v in list(kwd.items()):
if k.startswith('f-'):
del kwd[k]
return trans.response.send_redirect(web.url_for(controller='repository',
action='browse_valid_repositories',
**kwd))
title = trans.app.repository_grid_filter_manager.get_grid_title(trans,
trailing_string='by Category',
default='Categories of Valid Repositories')
self.valid_category_grid.title = title
return self.valid_category_grid(trans, **kwd)
@web.expose
def browse_valid_repositories(self, trans, **kwd):
"""Filter repositories to those that are installable into Galaxy."""
galaxy_url = common_util.handle_galaxy_url(trans, **kwd)
if galaxy_url:
kwd['galaxy_url'] = galaxy_url
repository_id = kwd.get('id', None)
if 'f-free-text-search' in kwd:
if 'f-Category.name' in kwd:
# The user browsed to a category and then entered a search string, so get the category associated with its value.
category_name = kwd['f-Category.name']
category = suc.get_category_by_name(trans.app, category_name)
# Set the id value in kwd since it is required by the ValidRepositoryGrid.build_initial_query method.
kwd['id'] = trans.security.encode_id(category.id)
if 'operation' in kwd:
operation = kwd['operation'].lower()
if operation == "preview_tools_in_changeset":
repository = repository_util.get_repository_in_tool_shed(trans.app, repository_id)
repository_metadata = metadata_util.get_latest_repository_metadata(trans.app, repository.id, downloadable=True)
latest_installable_changeset_revision = repository_metadata.changeset_revision
return trans.response.send_redirect(web.url_for(controller='repository',
action='preview_tools_in_changeset',
repository_id=repository_id,
changeset_revision=latest_installable_changeset_revision))
elif operation == "valid_repositories_by_category":
# Eliminate the current filters if any exist.
for k, v in list(kwd.items()):
if k.startswith('f-'):
del kwd[k]
category_id = kwd.get('id', None)
category = suc.get_category(trans.app, category_id)
kwd['f-Category.name'] = category.name
selected_changeset_revision, repository = suc.get_repository_from_refresh_on_change(trans.app, **kwd)
if repository:
return trans.response.send_redirect(web.url_for(controller='repository',
action='preview_tools_in_changeset',
repository_id=trans.security.encode_id(repository.id),
changeset_revision=selected_changeset_revision))
url_args = dict(action='browse_valid_repositories',
operation='preview_tools_in_changeset',
repository_id=repository_id)
self.valid_repository_grid.operations = [grids.GridOperation("Preview and install",
url_args=url_args,
allow_multiple=False,
async_compatible=False)]
title = trans.app.repository_grid_filter_manager.get_grid_title(trans,
trailing_string='',
default='Valid Repositories')
self.valid_repository_grid.title = title
return self.valid_repository_grid(trans, **kwd)
@web.expose
def check_for_updates(self, trans, **kwd):
"""Handle a request from a local Galaxy instance."""
message = escape(kwd.get('message', ''))
# If the request originated with the UpdateRepositoryManager, it will not include a galaxy_url.
galaxy_url = common_util.handle_galaxy_url(trans, **kwd)
name = kwd.get('name', None)
owner = kwd.get('owner', None)
changeset_revision = kwd.get('changeset_revision', None)
repository = repository_util.get_repository_by_name_and_owner(trans.app, name, owner)
repo = hg_util.get_repo_for_repository(trans.app, repository=repository, repo_path=None, create=False)
# Default to the current changeset revision.
update_to_ctx = hg_util.get_changectx_for_changeset(repo, changeset_revision)
latest_changeset_revision = changeset_revision
from_update_manager = kwd.get('from_update_manager', False)
if from_update_manager:
update = 'true'
no_update = 'false'
elif galaxy_url:
# Start building up the url to redirect back to the calling Galaxy instance.
params = dict(tool_shed_url=web.url_for('/', qualified=True),
name=str(repository.name),
owner=str(repository.user.username),
changeset_revision=changeset_revision)
pathspec = ['admin_toolshed', 'update_to_changeset_revision']
else:
message = 'Unable to check for updates due to an invalid Galaxy URL: <b>%s</b>. ' % galaxy_url
message += 'You may need to enable third-party cookies in your browser. '
return trans.show_error_message(message)
if changeset_revision == repository.tip(trans.app):
# If changeset_revision is the repository tip, there are no additional updates.
if from_update_manager:
return no_update
# Return the same value for changeset_revision and latest_changeset_revision.
else:
repository_metadata = metadata_util.get_repository_metadata_by_changeset_revision(trans.app,
trans.security.encode_id(repository.id),
changeset_revision)
if repository_metadata:
# If changeset_revision is in the repository_metadata table for this repository, there are no
# additional updates.
if from_update_manager:
return no_update
# Return the same value for changeset_revision and latest_changeset_revision.
else:
# The changeset_revision column in the repository_metadata table has been updated with a new
# changeset_revision value since the repository was installed. We need to find the changeset_revision
# to which we need to update.
update_to_changeset_hash = None
for changeset in repo.changelog:
changeset_hash = str(repo.changectx(changeset))
hg_util.get_changectx_for_changeset(repo, changeset_hash)
if update_to_changeset_hash:
if changeset_hash == repository.tip(trans.app):
update_to_ctx = hg_util.get_changectx_for_changeset(repo, changeset_hash)
latest_changeset_revision = changeset_hash
break
else:
repository_metadata = metadata_util.get_repository_metadata_by_changeset_revision(trans.app,
trans.security.encode_id(repository.id),
changeset_hash)
if repository_metadata:
# We found a RepositoryMetadata record.
update_to_ctx = hg_util.get_changectx_for_changeset(repo, changeset_hash)
latest_changeset_revision = changeset_hash
break
else:
update_to_changeset_hash = changeset_hash
else:
if changeset_hash == changeset_revision:
# We've found the changeset in the changelog for which we need to get the next update.
update_to_changeset_hash = changeset_hash
if from_update_manager:
if latest_changeset_revision == changeset_revision:
return no_update
return update
params['latest_changeset_revision'] = str(latest_changeset_revision)
params['latest_ctx_rev'] = str(update_to_ctx.rev())
url = util.build_url(galaxy_url, pathspec=pathspec, params=params)
return trans.response.send_redirect(url)
@web.expose
def contact_owner(self, trans, id, **kwd):
message = escape(kwd.get('message', ''))
status = kwd.get('status', 'done')
repository = repository_util.get_repository_in_tool_shed(trans.app, id)
metadata = metadata_util.get_repository_metadata_by_repository_id_changeset_revision(trans.app,
id,
repository.tip(trans.app),
metadata_only=True)
if trans.user and trans.user.email:
return trans.fill_template("/webapps/tool_shed/repository/contact_owner.mako",
repository=repository,
metadata=metadata,
message=message,
status=status)
else:
# Do all we can to eliminate spam.
return trans.show_error_message("You must be logged in to contact the owner of a repository.")
@web.expose
def create_galaxy_docker_image(self, trans, **kwd):
message = escape(kwd.get('message', ''))
status = kwd.get('status', 'done')
repository_ids = util.listify(kwd.get('id', ''))
if 'operation' in kwd:
if repository_ids:
operation = kwd['operation'].lower()
if operation == "include in docker image":
repository_tups = []
for repository_id in repository_ids:
repository = repository_util.get_repository_by_id(trans.app, repository_id)
repository_tups.append((str(repository.name),
str(repository.user.username),
str(repository.type)))
return trans.fill_template("/webapps/tool_shed/repository/docker_image_repositories.mako",
id=','.join(repository_ids),
repository_tups=repository_tups,
message=message,
status=status)
else:
# This can only occur when there is a multi-select grid with check boxes and an operation,
# and the user clicked the operation button without checking any of the check boxes.
kwd['message'] = "No items were selected."
kwd['status'] = 'error'
elif kwd.get('create_docker_image_button', False):
tmp_image_dir = tempfile.mkdtemp(prefix="tmp-toolshed-cdidir")
docker_file_name = 'Dockerfile'
docker_file_path = os.path.join(tmp_image_dir, docker_file_name)
tool_shed_url = tool_shed_url = web.url_for('/', qualified=True)
repository_string = ''
for repository_id in repository_ids:
repository = repository_util.get_repository_by_id(trans.app, repository_id)
template = basic_util.SELECTED_REPOSITORIES_TEMPLATE
repository_template = \
string.Template(template).safe_substitute(tool_shed_url=tool_shed_url,
repository_owner=str(repository.user.username),
repository_name=str(repository.name))
repository_string = '%s\n%s' % (repository_string, repository_template)
template = basic_util.DOCKER_IMAGE_TEMPLATE
docker_image_template = \
string.Template(template).safe_substitute(selected_repositories=repository_string)
docker_image_string = docker_image_template
trans.response.set_content_type('application/text/plain')
trans.response.headers["Content-Disposition"] = 'attachment; filename="%s"' % docker_file_name
opened_file = open(docker_file_path, "w")
opened_file.write(docker_image_string)
opened_file.close()
opened_file = open(docker_file_path, "r")
# Make sure the file is removed from disk after the contents have been downloaded.
os.unlink(docker_file_path)
docker_file_path, docker_file_name = os.path.split(docker_file_path)
basic_util.remove_dir(docker_file_path)
return opened_file
return self.docker_image_grid(trans, **kwd)
@web.expose
def create_repository(self, trans, **kwd):
message = escape(kwd.get('message', ''))
status = kwd.get('status', 'done')
categories = suc.get_categories(trans)
if not categories:
message = 'No categories have been configured in this instance of the Galaxy Tool Shed. '
message += 'An administrator needs to create some via the Administrator control panel before creating repositories.'
status = 'error'
return trans.response.send_redirect(web.url_for(controller='repository',
action='browse_repositories',
message=message,
status=status))
name = kwd.get('name', '').strip()
remote_repository_url = kwd.get('remote_repository_url', '')
homepage_url = kwd.get('homepage_url', '')
description = kwd.get('description', '')
long_description = kwd.get('long_description', '')
category_ids = util.listify(kwd.get('category_id', ''))
selected_categories = [trans.security.decode_id(id) for id in category_ids]
repository_type = kwd.get('repository_type', rt_util.UNRESTRICTED)
if kwd.get('create_repository_button', False):
error = False
message = repository_util.validate_repository_name(trans.app, name, trans.user)
if message:
error = True
if not description:
message = 'Enter a description.'
error = True
if error:
status = 'error'
else:
repository, message = repository_util.create_repository(trans.app,
name,
repository_type,
description,
long_description,
user_id=trans.user.id,
category_ids=category_ids,
remote_repository_url=remote_repository_url,
homepage_url=homepage_url)
trans.response.send_redirect(web.url_for(controller='repository',
action='manage_repository',
message=message,
id=trans.security.encode_id(repository.id)))
repository_type_select_field = rt_util.build_repository_type_select_field(trans)
return trans.fill_template('/webapps/tool_shed/repository/create_repository.mako',
name=name,
remote_repository_url=remote_repository_url,
homepage_url=homepage_url,
description=description,
long_description=long_description,
selected_categories=selected_categories,
categories=categories,
repository_type_select_field=repository_type_select_field,
message=message,
status=status)
@web.expose
@web.require_login("deprecate repository")
def deprecate(self, trans, **kwd):
"""Mark a repository in the tool shed as deprecated or not deprecated."""
# Marking a repository in the tool shed as deprecated has no effect on any downloadable changeset
# revisions that may be associated with the repository. Revisions are not marked as not downlaodable
# because those that have installed the repository must be allowed to get updates.
message = escape(kwd.get('message', ''))
status = kwd.get('status', 'done')
repository_id = kwd.get('id', None)
repository = repository_util.get_repository_in_tool_shed(trans.app, repository_id)
mark_deprecated = util.string_as_bool(kwd.get('mark_deprecated', False))
repository.deprecated = mark_deprecated
trans.sa_session.add(repository)
trans.sa_session.flush()
if mark_deprecated:
# Update the repository registry.
trans.app.repository_registry.remove_entry(repository)
message = 'The repository <b>%s</b> has been marked as deprecated.' % escape(repository.name)
else:
# Update the repository registry.
trans.app.repository_registry.add_entry(repository)
message = 'The repository <b>%s</b> has been marked as not deprecated.' % escape(repository.name)
trans.response.send_redirect(web.url_for(controller='repository',
action='browse_repositories',
operation='repositories_i_own',
message=message,
status=status))
@web.expose
def display_image_in_repository(self, trans, **kwd):
"""
Open an image file that is contained in repository or that is referenced by a URL for display. The image can be defined in
either a README.rst file contained in the repository or the help section of a Galaxy tool config that is contained in the repository.
The following image definitions are all supported. The former $PATH_TO_IMAGES is no longer required, and is now ignored.
.. image:: https://raw.github.com/galaxy/some_image.png
.. image:: $PATH_TO_IMAGES/some_image.png
.. image:: /static/images/some_image.gif
.. image:: some_image.jpg
.. image:: /deep/some_image.png
"""
repository_id = kwd.get('repository_id', None)
relative_path_to_image_file = kwd.get('image_file', None)
if repository_id and relative_path_to_image_file:
repository = repository_util.get_repository_in_tool_shed(trans.app, repository_id)
if repository:
repo_files_dir = repository.repo_path(trans.app)
path_to_file = repository_util.get_absolute_path_to_file_in_repository(repo_files_dir, relative_path_to_image_file)
if os.path.exists(path_to_file):
file_name = os.path.basename(relative_path_to_image_file)
try:
extension = file_name.split('.')[-1]
except Exception:
extension = None
if extension:
mimetype = trans.app.datatypes_registry.get_mimetype_by_extension(extension)
if mimetype:
trans.response.set_content_type(mimetype)
return open(path_to_file, 'r')
return None
@web.expose
def display_tool(self, trans, repository_id, tool_config, changeset_revision, **kwd):
message = escape(kwd.get('message', ''))
status = kwd.get('status', 'done')
render_repository_actions_for = kwd.get('render_repository_actions_for', 'tool_shed')
with ValidationContext.from_app(trans.app) as validation_context:
tv = tool_validator.ToolValidator(validation_context)
repository, tool, message = tv.load_tool_from_changeset_revision(repository_id,
changeset_revision,
tool_config)
if message:
status = 'error'
tool_state = tool_util.new_state(trans, tool, invalid=False)
metadata = metadata_util.get_repository_metadata_by_repository_id_changeset_revision(trans.app,
repository_id,
changeset_revision,
metadata_only=True)
try:
return trans.fill_template("/webapps/tool_shed/repository/tool_form.mako",
repository=repository,
render_repository_actions_for=render_repository_actions_for,
metadata=metadata,
changeset_revision=changeset_revision,
tool=tool,
tool_state=tool_state,
message=message,
status=status)
except Exception as e:
message = "Error displaying tool, probably due to a problem in the tool config. The exception is: %s." % str(e)
if trans.webapp.name == 'galaxy' or render_repository_actions_for == 'galaxy':
return trans.response.send_redirect(web.url_for(controller='repository',
action='preview_tools_in_changeset',
repository_id=repository_id,
changeset_revision=changeset_revision,
message=message,
status='error'))
return trans.response.send_redirect(web.url_for(controller='repository',
action='browse_repositories',
operation='view_or_manage_repository',
id=repository_id,
changeset_revision=changeset_revision,
message=message,
status='error'))
@web.expose
def download(self, trans, repository_id, changeset_revision, file_type, **kwd):
"""Download an archive of the repository files compressed as zip, gz or bz2."""
# FIXME: thgis will currently only download the repository tip, no matter which installable changeset_revision is being viewed.
# This should be enhanced to use the export method below, which accounts for the currently viewed changeset_revision.
repository = repository_util.get_repository_in_tool_shed(trans.app, repository_id)
# Allow hgweb to handle the download. This requires the tool shed
# server account's .hgrc file to include the following setting:
# [web]
# allow_archive = bz2, gz, zip
file_type_str = basic_util.get_file_type_str(changeset_revision, file_type)
repository.times_downloaded += 1
trans.sa_session.add(repository)
trans.sa_session.flush()
tool_shed_url = web.url_for('/', qualified=True)
pathspec = ['repos', str(repository.user.username), str(repository.name), 'archive', file_type_str]
download_url = util.build_url(tool_shed_url, pathspec=pathspec)
return trans.response.send_redirect(download_url)
@web.expose
def export(self, trans, repository_id, changeset_revision, **kwd):
message = escape(kwd.get('message', ''))
status = kwd.get('status', 'done')
export_repository_dependencies = kwd.get('export_repository_dependencies', '')
repository = repository_util.get_repository_in_tool_shed(trans.app, repository_id)
if kwd.get('export_repository_button', False):
# We'll currently support only gzip-compressed tar archives.
export_repository_dependencies = CheckboxField.is_checked(export_repository_dependencies)
tool_shed_url = web.url_for('/', qualified=True)
erm = capsule_manager.ExportRepositoryManager(app=trans.app,
user=trans.user,
tool_shed_url=tool_shed_url,
repository=repository,
changeset_revision=changeset_revision,
export_repository_dependencies=export_repository_dependencies,
using_api=False)
repositories_archive, error_message = erm.export_repository()
repositories_archive_filename = os.path.basename(repositories_archive.name)
if error_message:
message = error_message
status = 'error'
else:
trans.response.set_content_type('application/x-gzip')
trans.response.headers["Content-Disposition"] = 'attachment; filename="%s"' % (repositories_archive_filename)
opened_archive = open(repositories_archive.name)
# Make sure the file is removed from disk after the contents have been downloaded.
os.unlink(repositories_archive.name)
repositories_archive_path, file_name = os.path.split(repositories_archive.name)
basic_util.remove_dir(repositories_archive_path)
return opened_archive
repository_metadata = metadata_util.get_repository_metadata_by_changeset_revision(trans.app, repository_id, changeset_revision)
metadata = repository_metadata.metadata
toolshed_base_url = str(web.url_for('/', qualified=True)).rstrip('/')
# Initialize the repository dependency RelationBuilder.
rb = relation_builder.RelationBuilder(trans.app, repository, repository_metadata, toolshed_base_url)
# Work-around to ensure repositories that contain packages needed only for compiling
# a dependent package are included in the capsule.
rb.set_filter_dependencies_needed_for_compiling(False)
# Get a dictionary of all repositories upon which the contents of the current repository_metadata record depend.
repository_dependencies = rb.get_repository_dependencies_for_changeset_revision()
if repository_dependencies:
# Only display repository dependencies if they exist.
exclude = ['datatypes', 'invalid_repository_dependencies', 'invalid_tool_dependencies', 'invalid_tools',
'readme_files', 'tool_dependencies', 'tools', 'workflows', 'data_manager']
tsucm = ToolShedUtilityContainerManager(trans.app)
containers_dict = tsucm.build_repository_containers(repository,
changeset_revision,
repository_dependencies,
repository_metadata,
exclude=exclude)
export_repository_dependencies_check_box = CheckboxField('export_repository_dependencies', value=True)
else:
containers_dict = None
export_repository_dependencies_check_box = None
revision_label = hg_util.get_revision_label(trans.app, repository, changeset_revision, include_date=True)
return trans.fill_template("/webapps/tool_shed/repository/export_repository.mako",
changeset_revision=changeset_revision,
containers_dict=containers_dict,
export_repository_dependencies_check_box=export_repository_dependencies_check_box,
repository=repository,
repository_metadata=repository_metadata,
revision_label=revision_label,
metadata=metadata,
message=message,
status=status)
@web.expose
def export_via_api(self, trans, **kwd):
"""Return an exported gzip compressed repository archive file opened for reading."""
encoded_repositories_archive_name = kwd.get('encoded_repositories_archive_name', None)
if encoded_repositories_archive_name:
repositories_archive_name = encoding_util.tool_shed_decode(encoded_repositories_archive_name)
opened_archive = open(repositories_archive_name)
# Make sure the file is removed from disk after the contents have been downloaded.
os.unlink(repositories_archive_name)
return opened_archive
return ''
@web.expose
def find_tools(self, trans, **kwd):
message = escape(kwd.get('message', ''))
status = kwd.get('status', 'done')
common_util.handle_galaxy_url(trans, **kwd)
if 'operation' in kwd:
item_id = kwd.get('id', '')
if item_id:
operation = kwd['operation'].lower()
is_admin = trans.user_is_admin()
if operation == "view_or_manage_repository":
# The received id is a RepositoryMetadata id, so we have to get the repository id.
repository_metadata = metadata_util.get_repository_metadata_by_id(trans.app, item_id)
repository_id = trans.security.encode_id(repository_metadata.repository.id)
repository = repository_util.get_repository_in_tool_shed(trans.app, repository_id)
kwd['id'] = repository_id
kwd['changeset_revision'] = repository_metadata.changeset_revision
if trans.webapp.name == 'tool_shed' and (is_admin or repository.user == trans.user):
a = 'manage_repository'
else:
a = 'view_repository'
return trans.response.send_redirect(web.url_for(controller='repository',
action=a,
**kwd))
if operation == "install to galaxy":
# We've received a list of RepositoryMetadata ids, so we need to build a list of associated Repository ids.
encoded_repository_ids = []
changeset_revisions = []
for repository_metadata_id in util.listify(item_id):
repository_metadata = metadata_util.get_repository_metadata_by_id(trans.app, repository_metadata_id)
encoded_repository_ids.append(trans.security.encode_id(repository_metadata.repository.id))
changeset_revisions.append(repository_metadata.changeset_revision)
new_kwd = {}
new_kwd['repository_ids'] = encoded_repository_ids
new_kwd['changeset_revisions'] = changeset_revisions
return trans.response.send_redirect(web.url_for(controller='repository',
action='install_repositories_by_revision',
**new_kwd))
else:
# This can only occur when there is a multi-select grid with check boxes and an operation,
# and the user clicked the operation button without checking any of the check boxes.
return trans.show_error_message("No items were selected.")
tool_ids = [item.lower() for item in util.listify(kwd.get('tool_id', ''))]
tool_names = [item.lower() for item in util.listify(kwd.get('tool_name', ''))]
tool_versions = [item.lower() for item in util.listify(kwd.get('tool_version', ''))]
exact_matches = kwd.get('exact_matches', '')
exact_matches_checked = CheckboxField.is_checked(exact_matches)
match_tuples = []
ok = True
if tool_ids or tool_names or tool_versions:
ok, match_tuples = search_util.search_repository_metadata(trans.app,
exact_matches_checked,
tool_ids=tool_ids,
tool_names=tool_names,
tool_versions=tool_versions)
if ok:
kwd['match_tuples'] = match_tuples
# Render the list view
if trans.webapp.name == 'galaxy':
# Our initial request originated from a Galaxy instance.
global_actions = [grids.GridAction("Browse valid repositories",
dict(controller='repository', action='browse_valid_categories')),
grids.GridAction("Search for valid tools",
dict(controller='repository', action='find_tools')),
grids.GridAction("Search for workflows",
dict(controller='repository', action='find_workflows'))]
self.install_matched_repository_grid.global_actions = global_actions
install_url_args = dict(controller='repository', action='find_tools')
operations = [grids.GridOperation("Install", url_args=install_url_args, allow_multiple=True, async_compatible=False)]
self.install_matched_repository_grid.operations = operations
return self.install_matched_repository_grid(trans, **kwd)
else:
kwd['message'] = "tool id: <b>%s</b><br/>tool name: <b>%s</b><br/>tool version: <b>%s</b><br/>exact matches only: <b>%s</b>" % \
(basic_util.stringify(tool_ids),
escape(basic_util.stringify(tool_names)),
escape(basic_util.stringify(tool_versions)),
str(exact_matches_checked))
self.matched_repository_grid.title = "Repositories with matching tools"
return self.matched_repository_grid(trans, **kwd)
else:
message = "No search performed - each field must contain the same number of comma-separated items."
status = "error"
exact_matches_check_box = CheckboxField('exact_matches', value=exact_matches_checked)
return trans.fill_template('/webapps/tool_shed/repository/find_tools.mako',
tool_id=basic_util.stringify(tool_ids),
tool_name=basic_util.stringify(tool_names),
tool_version=basic_util.stringify(tool_versions),
exact_matches_check_box=exact_matches_check_box,
message=message,
status=status)
@web.expose
def find_workflows(self, trans, **kwd):
message = escape(kwd.get('message', ''))
status = kwd.get('status', 'done')
common_util.handle_galaxy_url(trans, **kwd)
if 'operation' in kwd:
item_id = kwd.get('id', '')
if item_id:
operation = kwd['operation'].lower()
is_admin = trans.user_is_admin()
if operation == "view_or_manage_repository":
# The received id is a RepositoryMetadata id, so we have to get the repository id.
repository_metadata = metadata_util.get_repository_metadata_by_id(trans.app, item_id)
repository_id = trans.security.encode_id(repository_metadata.repository.id)
repository = repository_util.get_repository_in_tool_shed(trans.app, repository_id)
kwd['id'] = repository_id
kwd['changeset_revision'] = repository_metadata.changeset_revision
if trans.webapp.name == 'tool_shed' and (is_admin or repository.user == trans.user):
a = 'manage_repository'
else:
a = 'view_repository'
return trans.response.send_redirect(web.url_for(controller='repository',
action=a,
**kwd))
if operation == "install to galaxy":
# We've received a list of RepositoryMetadata ids, so we need to build a list of associated Repository ids.
encoded_repository_ids = []
changeset_revisions = []
for repository_metadata_id in util.listify(item_id):
repository_metadata = metadata_util.get_repository_metadata_by_id(trans.app, item_id)
encoded_repository_ids.append(trans.security.encode_id(repository_metadata.repository.id))
changeset_revisions.append(repository_metadata.changeset_revision)
new_kwd = {}
new_kwd['repository_ids'] = encoded_repository_ids
new_kwd['changeset_revisions'] = changeset_revisions
return trans.response.send_redirect(web.url_for(controller='repository',
action='install_repositories_by_revision',
**new_kwd))
else:
# This can only occur when there is a multi-select grid with check boxes and an operation,
# and the user clicked the operation button without checking any of the check boxes.
return trans.show_error_message("No items were selected.")
if 'find_workflows_button' in kwd:
workflow_names = [item.lower() for item in util.listify(kwd.get('workflow_name', ''))]
exact_matches = kwd.get('exact_matches', '')
exact_matches_checked = CheckboxField.is_checked(exact_matches)
match_tuples = []
ok = True
if workflow_names:
ok, match_tuples = search_util.search_repository_metadata(trans.app,
exact_matches_checked,
workflow_names=workflow_names)
else:
ok, match_tuples = search_util.search_repository_metadata(trans.app,
exact_matches_checked,
workflow_names=[],
all_workflows=True)
if ok:
kwd['match_tuples'] = match_tuples
if trans.webapp.name == 'galaxy':
# Our initial request originated from a Galaxy instance.
global_actions = [grids.GridAction("Browse valid repositories",
dict(controller='repository', action='browse_valid_repositories')),
grids.GridAction("Search for valid tools",
dict(controller='repository', action='find_tools')),
grids.GridAction("Search for workflows",
dict(controller='repository', action='find_workflows'))]
self.install_matched_repository_grid.global_actions = global_actions
install_url_args = dict(controller='repository', action='find_workflows')
operations = [grids.GridOperation("Install", url_args=install_url_args, allow_multiple=True, async_compatible=False)]
self.install_matched_repository_grid.operations = operations
return self.install_matched_repository_grid(trans, **kwd)
else:
kwd['message'] = "workflow name: <b>%s</b><br/>exact matches only: <b>%s</b>" % \
(escape(basic_util.stringify(workflow_names)), str(exact_matches_checked))
self.matched_repository_grid.title = "Repositories with matching workflows"
return self.matched_repository_grid(trans, **kwd)
else:
message = "No search performed - each field must contain the same number of comma-separated items."
status = "error"
else:
exact_matches_checked = False
workflow_names = []
exact_matches_check_box = CheckboxField('exact_matches', value=exact_matches_checked)
return trans.fill_template('/webapps/tool_shed/repository/find_workflows.mako',
workflow_name=basic_util.stringify(workflow_names),
exact_matches_check_box=exact_matches_check_box,
message=message,
status=status)
@web.expose
def generate_workflow_image(self, trans, workflow_name, repository_metadata_id=None):
"""Return an svg image representation of a workflow dictionary created when the workflow was exported."""
return workflow_util.generate_workflow_image(trans, workflow_name, repository_metadata_id=repository_metadata_id, repository_id=None)
@web.expose
def get_changeset_revision_and_ctx_rev(self, trans, **kwd):
"""Handle a request from a local Galaxy instance to retrieve the changeset revision hash to which an installed repository can be updated."""
def has_galaxy_utilities(repository_metadata):
has_galaxy_utilities_dict = dict(includes_data_managers=False,
includes_datatypes=False,
includes_tools=False,
includes_tools_for_display_in_tool_panel=False,
has_repository_dependencies=False,
has_repository_dependencies_only_if_compiling_contained_td=False,
includes_tool_dependencies=False,
includes_workflows=False)
if repository_metadata:
metadata = repository_metadata.metadata
if metadata:
if 'data_manager' in metadata:
has_galaxy_utilities_dict['includes_data_managers'] = True
if 'datatypes' in metadata:
has_galaxy_utilities_dict['includes_datatypes'] = True
if 'tools' in metadata:
has_galaxy_utilities_dict['includes_tools'] = True
if 'tool_dependencies' in metadata:
has_galaxy_utilities_dict['includes_tool_dependencies'] = True
repository_dependencies_dict = metadata.get('repository_dependencies', {})
repository_dependencies = repository_dependencies_dict.get('repository_dependencies', [])
has_repository_dependencies, has_repository_dependencies_only_if_compiling_contained_td = \
repository_util.get_repository_dependency_types(repository_dependencies)
has_galaxy_utilities_dict['has_repository_dependencies'] = has_repository_dependencies
has_galaxy_utilities_dict['has_repository_dependencies_only_if_compiling_contained_td'] = \
has_repository_dependencies_only_if_compiling_contained_td
if 'workflows' in metadata:
has_galaxy_utilities_dict['includes_workflows'] = True
return has_galaxy_utilities_dict
name = kwd.get('name', None)
owner = kwd.get('owner', None)
changeset_revision = kwd.get('changeset_revision', None)
repository = repository_util.get_repository_by_name_and_owner(trans.app, name, owner)
repository_metadata = metadata_util.get_repository_metadata_by_changeset_revision(trans.app,
trans.security.encode_id(repository.id),
changeset_revision)
has_galaxy_utilities_dict = has_galaxy_utilities(repository_metadata)
includes_data_managers = has_galaxy_utilities_dict['includes_data_managers']
includes_datatypes = has_galaxy_utilities_dict['includes_datatypes']
includes_tools = has_galaxy_utilities_dict['includes_tools']
includes_tools_for_display_in_tool_panel = has_galaxy_utilities_dict['includes_tools_for_display_in_tool_panel']
includes_tool_dependencies = has_galaxy_utilities_dict['includes_tool_dependencies']
has_repository_dependencies = has_galaxy_utilities_dict['has_repository_dependencies']
has_repository_dependencies_only_if_compiling_contained_td = \
has_galaxy_utilities_dict['has_repository_dependencies_only_if_compiling_contained_td']
includes_workflows = has_galaxy_utilities_dict['includes_workflows']
repo = hg_util.get_repo_for_repository(trans.app, repository=repository, repo_path=None, create=False)
# Default to the received changeset revision and ctx_rev.
update_to_ctx = hg_util.get_changectx_for_changeset(repo, changeset_revision)
ctx_rev = str(update_to_ctx.rev())
latest_changeset_revision = changeset_revision
update_dict = dict(changeset_revision=changeset_revision,
ctx_rev=ctx_rev,
includes_data_managers=includes_data_managers,
includes_datatypes=includes_datatypes,
includes_tools=includes_tools,
includes_tools_for_display_in_tool_panel=includes_tools_for_display_in_tool_panel,
includes_tool_dependencies=includes_tool_dependencies,
has_repository_dependencies=has_repository_dependencies,
has_repository_dependencies_only_if_compiling_contained_td=has_repository_dependencies_only_if_compiling_contained_td,
includes_workflows=includes_workflows)
if changeset_revision == repository.tip(trans.app):
# If changeset_revision is the repository tip, there are no additional updates.
return encoding_util.tool_shed_encode(update_dict)
else:
if repository_metadata:
# If changeset_revision is in the repository_metadata table for this repository, there are no additional updates.
return encoding_util.tool_shed_encode(update_dict)
else:
# The changeset_revision column in the repository_metadata table has been updated with a new changeset_revision value since the
# repository was installed. We need to find the changeset_revision to which we need to update.
update_to_changeset_hash = None
for changeset in repo.changelog:
includes_tools = False
has_repository_dependencies = False
has_repository_dependencies_only_if_compiling_contained_td = False
changeset_hash = str(repo.changectx(changeset))
hg_util.get_changectx_for_changeset(repo, changeset_hash)
if update_to_changeset_hash:
update_to_repository_metadata = metadata_util.get_repository_metadata_by_changeset_revision(trans.app,
trans.security.encode_id(repository.id),
changeset_hash)
if update_to_repository_metadata:
has_galaxy_utilities_dict = has_galaxy_utilities(repository_metadata)
includes_data_managers = has_galaxy_utilities_dict['includes_data_managers']
includes_datatypes = has_galaxy_utilities_dict['includes_datatypes']
includes_tools = has_galaxy_utilities_dict['includes_tools']
includes_tools_for_display_in_tool_panel = has_galaxy_utilities_dict['includes_tools_for_display_in_tool_panel']
includes_tool_dependencies = has_galaxy_utilities_dict['includes_tool_dependencies']
has_repository_dependencies = has_galaxy_utilities_dict['has_repository_dependencies']
has_repository_dependencies_only_if_compiling_contained_td = has_galaxy_utilities_dict['has_repository_dependencies_only_if_compiling_contained_td']
includes_workflows = has_galaxy_utilities_dict['includes_workflows']
# We found a RepositoryMetadata record.
if changeset_hash == repository.tip(trans.app):
# The current ctx is the repository tip, so use it.
update_to_ctx = hg_util.get_changectx_for_changeset(repo, changeset_hash)
latest_changeset_revision = changeset_hash
else:
update_to_ctx = hg_util.get_changectx_for_changeset(repo, update_to_changeset_hash)
latest_changeset_revision = update_to_changeset_hash
break
elif not update_to_changeset_hash and changeset_hash == changeset_revision:
# We've found the changeset in the changelog for which we need to get the next update.
update_to_changeset_hash = changeset_hash
update_dict['includes_data_managers'] = includes_data_managers
update_dict['includes_datatypes'] = includes_datatypes
update_dict['includes_tools'] = includes_tools
update_dict['includes_tools_for_display_in_tool_panel'] = includes_tools_for_display_in_tool_panel
update_dict['includes_tool_dependencies'] = includes_tool_dependencies
update_dict['includes_workflows'] = includes_workflows
update_dict['has_repository_dependencies'] = has_repository_dependencies
update_dict['has_repository_dependencies_only_if_compiling_contained_td'] = has_repository_dependencies_only_if_compiling_contained_td
update_dict['changeset_revision'] = str(latest_changeset_revision)
update_dict['ctx_rev'] = str(update_to_ctx.rev())
return encoding_util.tool_shed_encode(update_dict)
@web.expose
def get_ctx_rev(self, trans, **kwd):
"""Given a repository and changeset_revision, return the correct ctx.rev() value."""
repository_name = kwd['name']
repository_owner = kwd['owner']
changeset_revision = kwd['changeset_revision']
repository = repository_util.get_repository_by_name_and_owner(trans.app, repository_name, repository_owner)
repo = hg_util.get_repo_for_repository(trans.app, repository=repository, repo_path=None, create=False)
ctx = hg_util.get_changectx_for_changeset(repo, changeset_revision)
if ctx:
return str(ctx.rev())
return ''
@web.json
def get_file_contents(self, trans, file_path, repository_id):
# Avoid caching
trans.response.headers['Pragma'] = 'no-cache'
trans.response.headers['Expires'] = '0'
is_admin = trans.user_is_admin()
return suc.get_repository_file_contents(trans.app, file_path, repository_id, is_admin)
@web.json
def get_latest_downloadable_changeset_revision(self, trans, **kwd):
"""
Return the latest installable changeset revision for the repository associated with the received
name and owner. This method is called from Galaxy when attempting to install the latest revision
of an installed repository.
"""
repository_name = kwd.get('name', None)
repository_owner = kwd.get('owner', None)
if repository_name is not None and repository_owner is not None:
repository = repository_util.get_repository_by_name_and_owner(trans.app, repository_name, repository_owner)
if repository:
repo = hg_util.get_repo_for_repository(trans.app, repository=repository, repo_path=None, create=False)
return metadata_util.get_latest_downloadable_changeset_revision(trans.app, repository, repo)
return hg_util.INITIAL_CHANGELOG_HASH
@web.json
def get_readme_files(self, trans, **kwd):
"""
This method is called when installing or re-installing a single repository into a Galaxy instance.
If the received changeset_revision includes one or more readme files, return them in a dictionary.
"""
repository_name = kwd.get('name', None)
repository_owner = kwd.get('owner', None)
changeset_revision = kwd.get('changeset_revision', None)
if repository_name is not None and repository_owner is not None and changeset_revision is not None:
repository = repository_util.get_repository_by_name_and_owner(trans.app, repository_name, repository_owner)
if repository:
repository_metadata = \
metadata_util.get_repository_metadata_by_changeset_revision(trans.app,
trans.security.encode_id(repository.id),
changeset_revision)
if repository_metadata:
metadata = repository_metadata.metadata
if metadata:
return readme_util.build_readme_files_dict(trans.app,
repository,
changeset_revision,
repository_metadata.metadata)
return {}
@web.json
def get_repository_dependencies(self, trans, **kwd):
"""
Return an encoded dictionary of all repositories upon which the contents of the received repository
depends.
"""
name = kwd.get('name', None)
owner = kwd.get('owner', None)
changeset_revision = kwd.get('changeset_revision', None)
repository = repository_util.get_repository_by_name_and_owner(trans.app, name, owner)
# get_repository_dependencies( self, app, changeset, toolshed_url )
dependencies = repository.get_repository_dependencies(trans.app, changeset_revision, web.url_for('/', qualified=True))
if dependencies:
return encoding_util.tool_shed_encode(dependencies)
return ''
@web.expose
def get_repository_id(self, trans, **kwd):
"""Given a repository name and owner, return the encoded repository id."""
repository_name = kwd['name']
repository_owner = kwd['owner']
repository = repository_util.get_repository_by_name_and_owner(trans.app, repository_name, repository_owner)
if repository:
return trans.security.encode_id(repository.id)
return ''
@web.json
def get_repository_information(self, trans, repository_ids, changeset_revisions, **kwd):
"""
Generate a list of dictionaries, each of which contains the information about a repository that will
be necessary for installing it into a local Galaxy instance.
"""
includes_tools = False
includes_tools_for_display_in_tool_panel = False
has_repository_dependencies = False
has_repository_dependencies_only_if_compiling_contained_td = False
includes_tool_dependencies = False
repo_info_dicts = []
for tup in zip(util.listify(repository_ids), util.listify(changeset_revisions)):
repository_id, changeset_revision = tup
repo_info_dict, cur_includes_tools, cur_includes_tool_dependencies, cur_includes_tools_for_display_in_tool_panel, \
cur_has_repository_dependencies, cur_has_repository_dependencies_only_if_compiling_contained_td = \
repository_util.get_repo_info_dict(trans.app, trans.user, repository_id, changeset_revision)
if cur_has_repository_dependencies and not has_repository_dependencies:
has_repository_dependencies = True
if cur_has_repository_dependencies_only_if_compiling_contained_td and not has_repository_dependencies_only_if_compiling_contained_td:
has_repository_dependencies_only_if_compiling_contained_td = True
if cur_includes_tools and not includes_tools:
includes_tools = True
if cur_includes_tool_dependencies and not includes_tool_dependencies:
includes_tool_dependencies = True
if cur_includes_tools_for_display_in_tool_panel and not includes_tools_for_display_in_tool_panel:
includes_tools_for_display_in_tool_panel = True
repo_info_dicts.append(encoding_util.tool_shed_encode(repo_info_dict))
return dict(includes_tools=includes_tools,
includes_tools_for_display_in_tool_panel=includes_tools_for_display_in_tool_panel,
has_repository_dependencies=has_repository_dependencies,
has_repository_dependencies_only_if_compiling_contained_td=has_repository_dependencies_only_if_compiling_contained_td,
includes_tool_dependencies=includes_tool_dependencies,
repo_info_dicts=repo_info_dicts)
@web.expose
def get_repository_type(self, trans, **kwd):
"""Given a repository name and owner, return the type."""
repository_name = kwd['name']
repository_owner = kwd['owner']
repository = repository_util.get_repository_by_name_and_owner(trans.app, repository_name, repository_owner)
return str(repository.type)
@web.json
def get_required_repo_info_dict(self, trans, encoded_str=None):
"""
Retrieve and return a dictionary that includes a list of dictionaries that each contain all of the
information needed to install the list of repositories defined by the received encoded_str.
"""
repo_info_dict = {}
if encoded_str:
encoded_required_repository_str = encoding_util.tool_shed_decode(encoded_str)
encoded_required_repository_tups = encoded_required_repository_str.split(encoding_util.encoding_sep2)
decoded_required_repository_tups = []
for encoded_required_repository_tup in encoded_required_repository_tups:
decoded_required_repository_tups.append(encoded_required_repository_tup.split(encoding_util.encoding_sep))
encoded_repository_ids = []
changeset_revisions = []
for required_repository_tup in decoded_required_repository_tups:
tool_shed, name, owner, changeset_revision, prior_installation_required, only_if_compiling_contained_td = \
common_util.parse_repository_dependency_tuple(required_repository_tup)
repository = repository_util.get_repository_by_name_and_owner(trans.app, name, owner)
encoded_repository_ids.append(trans.security.encode_id(repository.id))
changeset_revisions.append(changeset_revision)
if encoded_repository_ids and changeset_revisions:
repo_info_dict = json.loads(self.get_repository_information(trans, encoded_repository_ids, changeset_revisions))
return repo_info_dict
@web.expose
def get_tool_dependencies(self, trans, **kwd):
"""
Handle a request from a Galaxy instance to get the tool_dependencies entry from the metadata
for a specified changeset revision.
"""
name = kwd.get('name', None)
owner = kwd.get('owner', None)
changeset_revision = kwd.get('changeset_revision', None)
repository = repository_util.get_repository_by_name_and_owner(trans.app, name, owner)
dependencies = repository.get_tool_dependencies(trans.app, changeset_revision)
if len(dependencies) > 0:
return encoding_util.tool_shed_encode(dependencies)
return ''
@web.expose
def get_tool_dependencies_config_contents(self, trans, **kwd):
"""
Handle a request from a Galaxy instance to get the tool_dependencies.xml file contents for a
specified changeset revision.
"""
name = kwd.get('name', None)
owner = kwd.get('owner', None)
repository = repository_util.get_repository_by_name_and_owner(trans.app, name, owner)
# TODO: We're currently returning the tool_dependencies.xml file that is available on disk. We need
# to enhance this process to retrieve older versions of the tool-dependencies.xml file from the repository
# manafest.
repo_dir = repository.repo_path(trans.app)
# Get the tool_dependencies.xml file from disk.
tool_dependencies_config = hg_util.get_config_from_disk(rt_util.TOOL_DEPENDENCY_DEFINITION_FILENAME, repo_dir)
# Return the encoded contents of the tool_dependencies.xml file.
if tool_dependencies_config:
tool_dependencies_config_file = open(tool_dependencies_config, 'rb')
contents = tool_dependencies_config_file.read()
tool_dependencies_config_file.close()
return contents
return ''
@web.json
def get_tool_dependency_definition_metadata(self, trans, **kwd):
"""
Given a repository name and ownerof a repository whose type is
tool_dependency_definition, return the current metadata.
"""
repository_name = kwd['name']
repository_owner = kwd['owner']
repository = repository_util.get_repository_by_name_and_owner(trans.app, repository_name, repository_owner)
encoded_id = trans.app.security.encode_id(repository.id)
repository_tip = repository.tip(trans.app)
repository_metadata = metadata_util.get_repository_metadata_by_changeset_revision(trans.app,
encoded_id,
repository_tip)
return repository_metadata.metadata
@web.expose
def get_tool_versions(self, trans, **kwd):
"""
For each valid /downloadable change set (up to the received changeset_revision) in the repository's
change log, append the changeset tool_versions dictionary to the list that will be returned.
"""
name = kwd['name']
owner = kwd['owner']
changeset_revision = kwd['changeset_revision']
repository = repository_util.get_repository_by_name_and_owner(trans.app, name, owner)
repo = hg_util.get_repo_for_repository(trans.app, repository=repository, repo_path=None, create=False)
tool_version_dicts = []
for changeset in repo.changelog:
current_changeset_revision = str(repo.changectx(changeset))
repository_metadata = metadata_util.get_repository_metadata_by_changeset_revision(trans.app,
trans.security.encode_id(repository.id),
current_changeset_revision)
if repository_metadata and repository_metadata.tool_versions:
tool_version_dicts.append(repository_metadata.tool_versions)
if current_changeset_revision == changeset_revision:
break
if tool_version_dicts:
return json.dumps(tool_version_dicts)
return ''
@web.json
def get_updated_repository_information(self, trans, name, owner, changeset_revision, **kwd):
"""
Generate a dictionary that contains the information about a repository that is necessary for installing
it into a local Galaxy instance.
"""
repository = repository_util.get_repository_by_name_and_owner(trans.app, name, owner)
repository_id = trans.security.encode_id(repository.id)
repository_clone_url = common_util.generate_clone_url_for_repository_in_tool_shed(trans.user, repository)
repo = hg_util.get_repo_for_repository(trans.app, repository=repository, repo_path=None, create=False)
repository_metadata = metadata_util.get_repository_metadata_by_changeset_revision(trans.app, repository_id, changeset_revision)
if not repository_metadata:
# The received changeset_revision is no longer associated with metadata, so get the next changeset_revision in the repository
# changelog that is associated with metadata.
changeset_revision = metadata_util.get_next_downloadable_changeset_revision(repository,
repo,
after_changeset_revision=changeset_revision)
repository_metadata = metadata_util.get_repository_metadata_by_changeset_revision(trans.app, repository_id, changeset_revision)
ctx = hg_util.get_changectx_for_changeset(repo, changeset_revision)
repo_info_dict = repository_util.create_repo_info_dict(app=trans.app,
repository_clone_url=repository_clone_url,
changeset_revision=changeset_revision,
ctx_rev=str(ctx.rev()),
repository_owner=repository.user.username,
repository_name=repository.name,
repository=repository,
repository_metadata=repository_metadata,
tool_dependencies=None,
repository_dependencies=None)
includes_data_managers = False
includes_datatypes = False
includes_tools = False
includes_tools_for_display_in_tool_panel = False
includes_workflows = False
readme_files_dict = None
metadata = repository_metadata.metadata
if metadata:
if 'data_manager' in metadata:
includes_data_managers = True
if 'datatypes' in metadata:
includes_datatypes = True
if 'tools' in metadata:
includes_tools = True
# Handle includes_tools_for_display_in_tool_panel.
tool_dicts = metadata['tools']
for tool_dict in tool_dicts:
if tool_dict.get('includes_tools_for_display_in_tool_panel', False):
includes_tools_for_display_in_tool_panel = True
break
if 'workflows' in metadata:
includes_workflows = True
readme_files_dict = readme_util.build_readme_files_dict(trans.app, repository, changeset_revision, metadata)
# See if the repo_info_dict was populated with repository_dependencies or tool_dependencies.
has_repository_dependencies = False
has_repository_dependencies_only_if_compiling_contained_td = False
includes_tool_dependencies = False
for name, repo_info_tuple in repo_info_dict.items():
if not has_repository_dependencies or not has_repository_dependencies_only_if_compiling_contained_td or not includes_tool_dependencies:
description, reposectory_clone_url, changeset_revision, ctx_rev, repository_owner, repository_dependencies, tool_dependencies = \
repository_util.get_repo_info_tuple_contents(repo_info_tuple)
for rd_key, rd_tups in repository_dependencies.items():
if rd_key in ['root_key', 'description']:
continue
curr_has_repository_dependencies, curr_has_repository_dependencies_only_if_compiling_contained_td = \
repository_util.get_repository_dependency_types(rd_tups)
if curr_has_repository_dependencies and not has_repository_dependencies:
has_repository_dependencies = True
if curr_has_repository_dependencies_only_if_compiling_contained_td and not has_repository_dependencies_only_if_compiling_contained_td:
has_repository_dependencies_only_if_compiling_contained_td = True
if tool_dependencies and not includes_tool_dependencies:
includes_tool_dependencies = True
return dict(includes_data_managers=includes_data_managers,
includes_datatypes=includes_datatypes,
includes_tools=includes_tools,
includes_tools_for_display_in_tool_panel=includes_tools_for_display_in_tool_panel,
has_repository_dependencies=has_repository_dependencies,
has_repository_dependencies_only_if_compiling_contained_td=has_repository_dependencies_only_if_compiling_contained_td,
includes_tool_dependencies=includes_tool_dependencies,
includes_workflows=includes_workflows,
readme_files_dict=readme_files_dict,
repo_info_dict=repo_info_dict)
@web.expose
def help(self, trans, **kwd):
message = escape(kwd.get('message', ''))
status = kwd.get('status', 'done')
return trans.fill_template('/webapps/tool_shed/repository/help.mako', message=message, status=status, **kwd)
@web.expose
def import_capsule(self, trans, **kwd):
message = escape(kwd.get('message', ''))
status = kwd.get('status', 'done')
capsule_file_name = kwd.get('capsule_file_name', None)
encoded_file_path = kwd.get('encoded_file_path', None)
file_path = encoding_util.tool_shed_decode(encoded_file_path)
export_info_file_path = os.path.join(file_path, 'export_info.xml')
irm = capsule_manager.ImportRepositoryManager(trans.app,
trans.request.host,
trans.user,
trans.user_is_admin())
export_info_dict = irm.get_export_info_dict(export_info_file_path)
manifest_file_path = os.path.join(file_path, 'manifest.xml')
# The manifest.xml file has already been validated, so no error_message should be returned here.
repository_info_dicts, error_message = irm.get_repository_info_from_manifest(manifest_file_path)
# Determine the status for each exported repository archive contained within the capsule.
repository_status_info_dicts = irm.get_repository_status_from_tool_shed(repository_info_dicts)
if 'import_capsule_button' in kwd:
# Generate a list of repository name / import results message tuples for display after the capsule is imported.
import_results_tups = []
# Only create repositories that do not yet exist and that the current user is authorized to create. The
# status will be None for repositories that fall into the intersection of these 2 categories.
for repository_status_info_dict in repository_status_info_dicts:
# Add the capsule_file_name and encoded_file_path to the repository_status_info_dict.
repository_status_info_dict['capsule_file_name'] = capsule_file_name
repository_status_info_dict['encoded_file_path'] = encoded_file_path
import_results_tups = irm.create_repository_and_import_archive(repository_status_info_dict,
import_results_tups)
irm.check_status_and_reset_downloadable(import_results_tups)
basic_util.remove_dir(file_path)
return trans.fill_template('/webapps/tool_shed/repository/import_capsule_results.mako',
export_info_dict=export_info_dict,
import_results_tups=import_results_tups,
message=message,
status=status)
return trans.fill_template('/webapps/tool_shed/repository/import_capsule.mako',
encoded_file_path=encoded_file_path,
export_info_dict=export_info_dict,
repository_status_info_dicts=repository_status_info_dicts,
message=message,
status=status)
@web.expose
def index(self, trans, **kwd):
message = escape(kwd.get('message', ''))
status = kwd.get('status', 'done')
# See if there are any RepositoryMetadata records since menu items require them.
repository_metadata = trans.sa_session.query(trans.model.RepositoryMetadata).first()
current_user = trans.user
# TODO: move the following to some in-memory register so these queries can be done once
# at startup. The in-memory register can then be managed during the current session.
can_administer_repositories = False
has_reviewed_repositories = False
has_deprecated_repositories = False
if current_user:
# See if the current user owns any repositories that have been reviewed.
for repository in current_user.active_repositories:
if repository.reviews:
has_reviewed_repositories = True
break
# See if the current user has any repositories that have been marked as deprecated.
for repository in current_user.active_repositories:
if repository.deprecated:
has_deprecated_repositories = True
break
# See if the current user can administer any repositories, but only if not an admin user.
if not trans.user_is_admin():
if current_user.active_repositories:
can_administer_repositories = True
else:
for repository in trans.sa_session.query(trans.model.Repository) \
.filter(trans.model.Repository.table.c.deleted == false()):
if trans.app.security_agent.user_can_administer_repository(current_user, repository):
can_administer_repositories = True
break
# Route in may have been from a sharable URL, in whcih case we'll have a user_id and possibly a name
# The received user_id will be the id of the repository owner.
user_id = kwd.get('user_id', None)
repository_id = kwd.get('repository_id', None)
changeset_revision = kwd.get('changeset_revision', None)
return trans.fill_template('/webapps/tool_shed/index.mako',
repository_metadata=repository_metadata,
can_administer_repositories=can_administer_repositories,
has_reviewed_repositories=has_reviewed_repositories,
has_deprecated_repositories=has_deprecated_repositories,
user_id=user_id,
repository_id=repository_id,
changeset_revision=changeset_revision,
message=message,
status=status)
@web.expose
def install_repositories_by_revision(self, trans, **kwd):
"""
Send the list of repository_ids and changeset_revisions to Galaxy so it can begin the installation
process. If the value of repository_ids is not received, then the name and owner of a single repository
must be received to install a single repository.
"""
repository_ids = kwd.get('repository_ids', None)
changeset_revisions = kwd.get('changeset_revisions', None)
name = kwd.get('name', None)
owner = kwd.get('owner', None)
if not repository_ids:
repository = repository_util.get_repository_by_name_and_owner(trans.app, name, owner)
repository_ids = trans.security.encode_id(repository.id)
galaxy_url = common_util.handle_galaxy_url(trans, **kwd)
if galaxy_url:
# Redirect back to local Galaxy to perform install.
params = dict(tool_shed_url=web.url_for('/', qualified=True),
repository_ids=','.join(util.listify(repository_ids)),
changeset_revisions=','.join(util.listify(changeset_revisions)))
pathspec = ['admin_toolshed', 'prepare_for_install']
url = util.build_url(galaxy_url, pathspec=pathspec, params=params)
return trans.response.send_redirect(url)
else:
message = 'Repository installation is not possible due to an invalid Galaxy URL: <b>%s</b>. ' % galaxy_url
message += 'You may need to enable third-party cookies in your browser. '
status = 'error'
return trans.response.send_redirect(web.url_for(controller='repository',
action='browse_valid_categories',
message=message,
status=status))
@web.expose
def load_invalid_tool(self, trans, repository_id, tool_config, changeset_revision, **kwd):
message = escape(kwd.get('message', ''))
render_repository_actions_for = kwd.get('render_repository_actions_for', 'tool_shed')
with ValidationContext.from_app(trans.app) as validation_context:
tv = tool_validator.ToolValidator(validation_context)
repository, tool, error_message = tv.load_tool_from_changeset_revision(repository_id,
changeset_revision,
tool_config)
tool_state = tool_util.new_state(trans, tool, invalid=True)
invalid_file_tups = []
if tool:
invalid_file_tups = tv.check_tool_input_params(repository.repo_path(trans.app),
tool_config,
tool,
[])
if invalid_file_tups:
message = tool_util.generate_message_for_invalid_tools(trans.app,
invalid_file_tups,
repository,
{},
as_html=True,
displaying_invalid_tool=True)
elif error_message:
message = error_message
try:
return trans.fill_template("/webapps/tool_shed/repository/tool_form.mako",
repository=repository,
render_repository_actions_for=render_repository_actions_for,
changeset_revision=changeset_revision,
tool=tool,
tool_state=tool_state,
message=message,
status='error')
except Exception as e:
message = "Exception thrown attempting to display tool: %s." % str(e)
if trans.webapp.name == 'galaxy':
return trans.response.send_redirect(web.url_for(controller='repository',
action='preview_tools_in_changeset',
repository_id=repository_id,
changeset_revision=changeset_revision,
message=message,
status='error'))
return trans.response.send_redirect(web.url_for(controller='repository',
action='browse_repositories',
operation='view_or_manage_repository',
id=repository_id,
changeset_revision=changeset_revision,
message=message,
status='error'))
@web.expose
@web.require_login("manage email alerts")
def manage_email_alerts(self, trans, **kwd):
message = escape(kwd.get('message', ''))
status = kwd.get('status', 'done')
new_repo_alert = kwd.get('new_repo_alert', '')
new_repo_alert_checked = CheckboxField.is_checked(new_repo_alert)
user = trans.user
if kwd.get('new_repo_alert_button', False):
user.new_repo_alert = new_repo_alert_checked
trans.sa_session.add(user)
trans.sa_session.flush()
if new_repo_alert_checked:
message = 'You will receive email alerts for all new valid tool shed repositories.'
else:
message = 'You will not receive any email alerts for new valid tool shed repositories.'
checked = new_repo_alert_checked or (user and user.new_repo_alert)
new_repo_alert_check_box = CheckboxField('new_repo_alert', value=checked)
email_alert_repositories = []
for repository in trans.sa_session.query(trans.model.Repository) \
.filter(and_(trans.model.Repository.table.c.deleted == false(),
trans.model.Repository.table.c.email_alerts != null())) \
.order_by(trans.model.Repository.table.c.name):
if user.email in repository.email_alerts:
email_alert_repositories.append(repository)
return trans.fill_template("/webapps/tool_shed/user/manage_email_alerts.mako",
new_repo_alert_check_box=new_repo_alert_check_box,
email_alert_repositories=email_alert_repositories,
message=message,
status=status)
@web.expose
@web.require_login("manage repository")
def manage_repository(self, trans, id, **kwd):
message = escape(kwd.get('message', ''))
status = kwd.get('status', 'done')
repository = repository_util.get_repository_in_tool_shed(trans.app, id)
repository_type = kwd.get('repository_type', str(repository.type))
repo_dir = repository.repo_path(trans.app)
repo = hg_util.get_repo_for_repository(trans.app, repository=None, repo_path=repo_dir, create=False)
repo_name = kwd.get('repo_name', repository.name)
changeset_revision = kwd.get('changeset_revision', repository.tip(trans.app))
repository.share_url = repository_util.generate_sharable_link_for_repository_in_tool_shed(repository, changeset_revision=changeset_revision)
repository.clone_url = common_util.generate_clone_url_for_repository_in_tool_shed(trans.user, repository)
remote_repository_url = kwd.get('remote_repository_url', repository.remote_repository_url)
homepage_url = kwd.get('homepage_url', repository.homepage_url)
description = kwd.get('description', repository.description)
long_description = kwd.get('long_description', repository.long_description)
avg_rating, num_ratings = self.get_ave_item_rating_data(trans.sa_session, repository, webapp_model=trans.model)
display_reviews = util.string_as_bool(kwd.get('display_reviews', False))
alerts = kwd.get('alerts', '')
alerts_checked = CheckboxField.is_checked(alerts)
category_ids = util.listify(kwd.get('category_id', ''))
if repository.email_alerts:
email_alerts = json.loads(repository.email_alerts)
else:
email_alerts = []
allow_push = kwd.get('allow_push', '')
error = False
user = trans.user
if kwd.get('edit_repository_button', False):
update_kwds = dict(
name=repo_name,
description=description,
long_description=long_description,
remote_repository_url=remote_repository_url,
homepage_url=homepage_url,
type=repository_type,
)
repository, message = repository_util.update_repository(app=trans.app, trans=trans, id=id, **update_kwds)
if repository is None:
return trans.response.send_redirect(web.url_for(controller='repository',
action='view_repository',
id=id,
message=message,
status='error'))
elif kwd.get('manage_categories_button', False):
flush_needed = False
# Delete all currently existing categories.
for rca in repository.categories:
trans.sa_session.delete(rca)
trans.sa_session.flush()
if category_ids:
# Create category associations
for category_id in category_ids:
category = trans.sa_session.query(trans.model.Category).get(trans.security.decode_id(category_id))
rca = trans.app.model.RepositoryCategoryAssociation(repository, category)
trans.sa_session.add(rca)
trans.sa_session.flush()
message = "The repository information has been updated."
elif kwd.get('user_access_button', False):
if allow_push not in ['none']:
remove_auth = kwd.get('remove_auth', '')
if remove_auth:
usernames = ''
else:
user_ids = util.listify(allow_push)
usernames = []
for user_id in user_ids:
user = trans.sa_session.query(trans.model.User).get(trans.security.decode_id(user_id))
usernames.append(user.username)
usernames = ','.join(usernames)
repository.set_allow_push(trans.app, usernames, remove_auth=remove_auth)
message = "The repository information has been updated."
elif kwd.get('receive_email_alerts_button', False):
flush_needed = False
if alerts_checked:
if user.email not in email_alerts:
email_alerts.append(user.email)
repository.email_alerts = json.dumps(email_alerts)
flush_needed = True
else:
if user.email in email_alerts:
email_alerts.remove(user.email)
repository.email_alerts = json.dumps(email_alerts)
flush_needed = True
if flush_needed:
trans.sa_session.add(repository)
trans.sa_session.flush()
message = "The repository information has been updated."
if error:
status = 'error'
allow_push_select_field = SelectField(name='allow_push',
multiple=True)
current_allow_push = repository.allow_push(trans.app)
if current_allow_push:
current_allow_push_list = current_allow_push.split(',')
else:
current_allow_push_list = []
options = []
for user in trans.sa_session.query(trans.model.User):
if user.username not in current_allow_push_list:
options.append(user)
for obj in options:
label = getattr(obj, 'username')
allow_push_select_field.add_option(label, trans.security.encode_id(obj.id))
checked = alerts_checked or user.email in email_alerts
alerts_check_box = CheckboxField('alerts', value=checked)
changeset_revision_select_field = grids_util.build_changeset_revision_select_field(trans,
repository,
selected_value=changeset_revision,
add_id_to_name=False,
downloadable=False)
revision_label = hg_util.get_revision_label(trans.app, repository, repository.tip(trans.app), include_date=False)
repository_metadata = None
metadata = None
is_malicious = False
repository_dependencies = None
if changeset_revision != hg_util.INITIAL_CHANGELOG_HASH:
repository_metadata = metadata_util.get_repository_metadata_by_changeset_revision(trans.app, id, changeset_revision)
if repository_metadata:
revision_label = hg_util.get_revision_label(trans.app, repository, changeset_revision, include_date=False)
metadata = repository_metadata.metadata
is_malicious = repository_metadata.malicious
else:
# There is no repository_metadata defined for the changeset_revision, so see if it was defined in a previous
# changeset in the changelog.
previous_changeset_revision = \
metadata_util.get_previous_metadata_changeset_revision(repository, repo, changeset_revision, downloadable=False)
if previous_changeset_revision != hg_util.INITIAL_CHANGELOG_HASH:
repository_metadata = metadata_util.get_repository_metadata_by_changeset_revision(trans.app, id, previous_changeset_revision)
if repository_metadata:
revision_label = hg_util.get_revision_label(trans.app, repository, previous_changeset_revision, include_date=False)
metadata = repository_metadata.metadata
is_malicious = repository_metadata.malicious
changeset_revision = previous_changeset_revision
if repository_metadata:
metadata = repository_metadata.metadata
# Get a dictionary of all repositories upon which the contents of the current repository_metadata record depend.
toolshed_base_url = str(web.url_for('/', qualified=True)).rstrip('/')
rb = relation_builder.RelationBuilder(trans.app, repository, repository_metadata, toolshed_base_url)
repository_dependencies = rb.get_repository_dependencies_for_changeset_revision()
if str(repository.type) != rt_util.REPOSITORY_SUITE_DEFINITION:
# Handle messaging for resetting repository type to the optimal value.
change_repository_type_message = rt_util.generate_message_for_repository_type_change(trans.app,
repository)
if change_repository_type_message:
message += change_repository_type_message
status = 'warning'
elif str(repository.type) != rt_util.TOOL_DEPENDENCY_DEFINITION:
# Handle messaging for resetting repository type to the optimal value.
change_repository_type_message = rt_util.generate_message_for_repository_type_change(trans.app,
repository)
if change_repository_type_message:
message += change_repository_type_message
status = 'warning'
else:
# Handle messaging for orphan tool dependency definitions.
dd = dependency_display.DependencyDisplayer(trans.app)
orphan_message = dd.generate_message_for_orphan_tool_dependencies(repository, metadata)
if orphan_message:
message += orphan_message
status = 'warning'
if is_malicious:
if trans.app.security_agent.can_push(trans.app, trans.user, repository):
message += malicious_error_can_push
else:
message += malicious_error
status = 'error'
repository_type_select_field = rt_util.build_repository_type_select_field(trans, repository=repository)
malicious_check_box = CheckboxField('malicious', value=is_malicious)
categories = suc.get_categories(trans.app)
selected_categories = [_rca.category_id for _rca in repository.categories]
tsucm = ToolShedUtilityContainerManager(trans.app)
containers_dict = tsucm.build_repository_containers(repository,
changeset_revision,
repository_dependencies,
repository_metadata)
heads = hg_util.get_repository_heads(repo)
deprecated_repository_dependency_tups = \
metadata_util.get_repository_dependency_tups_from_repository_metadata(trans.app,
repository_metadata,
deprecated_only=True)
return trans.fill_template('/webapps/tool_shed/repository/manage_repository.mako',
repo_name=repo_name,
remote_repository_url=remote_repository_url,
homepage_url=homepage_url,
description=description,
long_description=long_description,
current_allow_push_list=current_allow_push_list,
allow_push_select_field=allow_push_select_field,
deprecated_repository_dependency_tups=deprecated_repository_dependency_tups,
repo=repo,
heads=heads,
repository=repository,
containers_dict=containers_dict,
repository_metadata=repository_metadata,
changeset_revision=changeset_revision,
changeset_revision_select_field=changeset_revision_select_field,
revision_label=revision_label,
selected_categories=selected_categories,
categories=categories,
metadata=metadata,
avg_rating=avg_rating,
display_reviews=display_reviews,
num_ratings=num_ratings,
alerts_check_box=alerts_check_box,
malicious_check_box=malicious_check_box,
repository_type_select_field=repository_type_select_field,
message=message,
status=status)
@web.expose
@web.require_login("manage repository administrators")
def manage_repository_admins(self, trans, id, **kwd):
message = escape(kwd.get('message', ''))
status = kwd.get('status', 'done')
repository = repository_util.get_repository_in_tool_shed(trans.app, id)
changeset_revision = kwd.get('changeset_revision', repository.tip(trans.app))
metadata = None
if changeset_revision != hg_util.INITIAL_CHANGELOG_HASH:
repository_metadata = metadata_util.get_repository_metadata_by_changeset_revision(trans.app, id, changeset_revision)
if repository_metadata:
metadata = repository_metadata.metadata
else:
# There is no repository_metadata defined for the changeset_revision, so see if it was defined
# in a previous changeset in the changelog.
repo = hg_util.get_repo_for_repository(trans.app, repository=repository, repo_path=None, create=False)
previous_changeset_revision = \
metadata_util.get_previous_metadata_changeset_revision(repository,
repo,
changeset_revision,
downloadable=False)
if previous_changeset_revision != hg_util.INITIAL_CHANGELOG_HASH:
repository_metadata = metadata_util.get_repository_metadata_by_changeset_revision(trans.app,
id,
previous_changeset_revision)
if repository_metadata:
metadata = repository_metadata.metadata
role = repository.admin_role
associations_dict = repository_util.handle_role_associations(trans.app,
role,
repository,
**kwd)
in_users = associations_dict.get('in_users', [])
out_users = associations_dict.get('out_users', [])
in_groups = associations_dict.get('in_groups', [])
out_groups = associations_dict.get('out_groups', [])
message = associations_dict.get('message', '')
status = associations_dict.get('status', 'done')
return trans.fill_template('/webapps/tool_shed/role/role.mako',
in_admin_controller=False,
repository=repository,
metadata=metadata,
changeset_revision=changeset_revision,
role=role,
in_users=in_users,
out_users=out_users,
in_groups=in_groups,
out_groups=out_groups,
message=message,
status=status)
@web.expose
@web.require_login("review repository revision")
def manage_repository_reviews_of_revision(self, trans, **kwd):
return trans.response.send_redirect(web.url_for(controller='repository_review',
action='manage_repository_reviews_of_revision',
**kwd))
@web.expose
@web.require_login("multi select email alerts")
def multi_select_email_alerts(self, trans, **kwd):
if 'operation' in kwd:
operation = kwd['operation'].lower()
if operation == "receive email alerts":
if trans.user:
if kwd['id']:
kwd['caller'] = 'multi_select_email_alerts'
return trans.response.send_redirect(web.url_for(controller='repository',
action='set_email_alerts',
**kwd))
else:
kwd['message'] = 'You must be logged in to set email alerts.'
kwd['status'] = 'error'
del kwd['operation']
elif operation == "view_or_manage_repository":
return trans.response.send_redirect(web.url_for(controller='repository',
action='view_or_manage_repository',
**kwd))
self.email_alerts_repository_grid.title = "Set email alerts for repository changes"
return self.email_alerts_repository_grid(trans, **kwd)
@web.expose
def next_installable_changeset_revision(self, trans, **kwd):
"""
Handle a request from a Galaxy instance where the changeset_revision defined for a repository
in a dependency definition file is older than the changeset_revision associated with the installed
repository.
"""
name = kwd.get('name', None)
owner = kwd.get('owner', None)
changeset_revision = kwd.get('changeset_revision', None)
repository = repository_util.get_repository_by_name_and_owner(trans.app, name, owner)
repo = hg_util.get_repo_for_repository(trans.app, repository=repository, repo_path=None, create=False)
# Get the next installable changeset_revision beyond the received changeset_revision.
next_changeset_revision = metadata_util.get_next_downloadable_changeset_revision(repository, repo, changeset_revision)
if next_changeset_revision and next_changeset_revision != changeset_revision:
return next_changeset_revision
return ''
@web.json
def open_folder(self, trans, folder_path, repository_id):
# Avoid caching
trans.response.headers['Pragma'] = 'no-cache'
trans.response.headers['Expires'] = '0'
is_admin = trans.user_is_admin()
return suc.open_repository_files_folder(trans.app, folder_path, repository_id, is_admin)
@web.expose
def preview_tools_in_changeset(self, trans, repository_id, **kwd):
message = escape(kwd.get('message', ''))
status = kwd.get('status', 'done')
repository = repository_util.get_repository_in_tool_shed(trans.app, repository_id)
changeset_revision = kwd.get('changeset_revision', repository.tip(trans.app))
repository_metadata = metadata_util.get_repository_metadata_by_changeset_revision(trans.app, repository_id, changeset_revision)
if repository_metadata:
repository_metadata_id = trans.security.encode_id(repository_metadata.id),
metadata = repository_metadata.metadata
# Get a dictionary of all repositories upon which the contents of the current repository_metadata record depend.
toolshed_base_url = str(web.url_for('/', qualified=True)).rstrip('/')
rb = relation_builder.RelationBuilder(trans.app, repository, repository_metadata, toolshed_base_url)
repository_dependencies = rb.get_repository_dependencies_for_changeset_revision()
if metadata:
if 'repository_dependencies' in metadata and not repository_dependencies:
# See if we have an invalid repository dependency definition or if the repository dependency is required
# only for compiling the repository's tool dependency.
invalid = False
repository_dependencies_dict = metadata['repository_dependencies']
rd_tups = repository_dependencies_dict.get('repository_dependencies', [])
for rd_tup in rd_tups:
rdtool_shed, \
rd_name, \
rd_owner, \
rd_changeset_revision, \
rd_prior_installation_required, \
rd_only_if_compiling_contained_td = \
common_util.parse_repository_dependency_tuple(rd_tup)
if not util.asbool(rd_only_if_compiling_contained_td):
invalid = True
break
if invalid:
dd = dependency_display.DependencyDisplayer(trans.app)
message = dd.generate_message_for_invalid_repository_dependencies(metadata,
error_from_tuple=False)
status = 'error'
else:
repository_metadata_id = None
metadata = None
repository_dependencies = None
revision_label = hg_util.get_revision_label(trans.app, repository, changeset_revision, include_date=True)
changeset_revision_select_field = grids_util.build_changeset_revision_select_field(trans,
repository,
selected_value=changeset_revision,
add_id_to_name=False,
downloadable=False)
tsucm = ToolShedUtilityContainerManager(trans.app)
containers_dict = tsucm.build_repository_containers(repository,
changeset_revision,
repository_dependencies,
repository_metadata)
return trans.fill_template('/webapps/tool_shed/repository/preview_tools_in_changeset.mako',
repository=repository,
containers_dict=containers_dict,
repository_metadata_id=repository_metadata_id,
changeset_revision=changeset_revision,
revision_label=revision_label,
changeset_revision_select_field=changeset_revision_select_field,
metadata=metadata,
message=message,
status=status)
@web.expose
def previous_changeset_revisions(self, trans, from_tip=False, **kwd):
"""
Handle a request from a local Galaxy instance. This method will handle two scenarios: (1) the
repository was previously installed using an older changeset_revsion, but later the repository
was updated in the tool shed and the Galaxy admin is trying to install the latest changeset
revision of the same repository instead of updating the one that was previously installed. (2)
the admin is attempting to get updates for an installed repository that has a repository dependency
and both the repository and its dependency have available updates. In this case, the from_tip
parameter will be True because the repository dependency definition may define a changeset hash
for the dependency that is newer than the installed changeset revision of the dependency (this is
due to the behavior of "Tool dependency definition" repositories, whose metadata is always the tip),
so the complete list of changset hashes in the changelog must be returned.
"""
name = kwd.get('name', None)
owner = kwd.get('owner', None)
if name is not None and owner is not None:
repository = repository_util.get_repository_by_name_and_owner(trans.app, name, owner)
from_tip = util.string_as_bool(from_tip)
if from_tip:
changeset_revision = repository.tip(trans.app)
else:
changeset_revision = kwd.get('changeset_revision', None)
if changeset_revision is not None:
repo = hg_util.get_repo_for_repository(trans.app, repository=repository, repo_path=None, create=False)
# Get the lower bound changeset revision.
lower_bound_changeset_revision = metadata_util.get_previous_metadata_changeset_revision(repository,
repo,
changeset_revision,
downloadable=True)
# Build the list of changeset revision hashes.
changeset_hashes = []
for changeset in hg_util.reversed_lower_upper_bounded_changelog(repo,
lower_bound_changeset_revision,
changeset_revision):
changeset_hashes.append(str(repo.changectx(changeset)))
if changeset_hashes:
changeset_hashes_str = ','.join(changeset_hashes)
return changeset_hashes_str
return ''
@web.expose
@web.require_login("rate repositories")
def rate_repository(self, trans, **kwd):
""" Rate a repository and return updated rating data. """
message = escape(kwd.get('message', ''))
status = kwd.get('status', 'done')
id = kwd.get('id', None)
if not id:
return trans.response.send_redirect(web.url_for(controller='repository',
action='browse_repositories',
message='Select a repository to rate',
status='error'))
repository = repository_util.get_repository_in_tool_shed(trans.app, id)
changeset_revision = repository.tip(trans.app)
if repository.user == trans.user:
return trans.response.send_redirect(web.url_for(controller='repository',
action='browse_repositories',
message="You are not allowed to rate your own repository",
status='error'))
if kwd.get('rate_button', False):
rating = int(kwd.get('rating', '0'))
comment = kwd.get('comment', '')
rating = self.rate_item(trans, trans.user, repository, rating, comment)
avg_rating, num_ratings = self.get_ave_item_rating_data(trans.sa_session, repository, webapp_model=trans.model)
display_reviews = util.string_as_bool(kwd.get('display_reviews', False))
rra = self.get_user_item_rating(trans.sa_session, trans.user, repository, webapp_model=trans.model)
metadata = metadata_util.get_repository_metadata_by_repository_id_changeset_revision(trans.app,
id,
changeset_revision,
metadata_only=True)
repository_type_select_field = rt_util.build_repository_type_select_field(trans, repository=repository)
revision_label = hg_util.get_revision_label(trans.app, repository, changeset_revision, include_date=True)
return trans.fill_template('/webapps/tool_shed/repository/rate_repository.mako',
repository=repository,
metadata=metadata,
revision_label=revision_label,
avg_rating=avg_rating,
display_reviews=display_reviews,
num_ratings=num_ratings,
rra=rra,
repository_type_select_field=repository_type_select_field,
message=message,
status=status)
@web.expose
def reset_all_metadata(self, trans, id, **kwd):
"""Reset all metadata on the complete changelog for a single repository in the tool shed."""
# This method is called only from the ~/templates/webapps/tool_shed/repository/manage_repository.mako template.
repository = repository_util.get_repository_in_tool_shed(trans.app, id)
rmm = repository_metadata_manager.RepositoryMetadataManager(app=trans.app,
user=trans.user,
repository=repository,
resetting_all_metadata_on_repository=True)
rmm.reset_all_metadata_on_repository_in_tool_shed()
rmm_metadata_dict = rmm.get_metadata_dict()
rmm_invalid_file_tups = rmm.get_invalid_file_tups()
if rmm_invalid_file_tups:
message = tool_util.generate_message_for_invalid_tools(trans.app,
rmm_invalid_file_tups,
repository,
rmm_metadata_dict)
status = 'error'
else:
message = "All repository metadata has been reset. "
status = 'done'
return trans.response.send_redirect(web.url_for(controller='repository',
action='manage_repository',
id=id,
message=message,
status=status))
@web.expose
def reset_metadata_on_my_writable_repositories_in_tool_shed(self, trans, **kwd):
rmm = repository_metadata_manager.RepositoryMetadataManager(trans.app, trans.user, resetting_all_metadata_on_repository=True)
if 'reset_metadata_on_selected_repositories_button' in kwd:
message, status = rmm.reset_metadata_on_selected_repositories(**kwd)
else:
message = escape(kwd.get('message', ''))
status = kwd.get('status', 'done')
repositories_select_field = rmm.build_repository_ids_select_field(name='repository_ids',
multiple=True,
display='checkboxes',
my_writable=True)
return trans.fill_template('/webapps/tool_shed/common/reset_metadata_on_selected_repositories.mako',
repositories_select_field=repositories_select_field,
message=message,
status=status)
@web.expose
def select_files_to_delete(self, trans, id, **kwd):
message = escape(kwd.get('message', ''))
status = kwd.get('status', 'done')
commit_message = escape(kwd.get('commit_message', 'Deleted selected files'))
repository = repository_util.get_repository_in_tool_shed(trans.app, id)
repo_dir = repository.repo_path(trans.app)
repo = hg_util.get_repo_for_repository(trans.app, repository=None, repo_path=repo_dir, create=False)
selected_files_to_delete = kwd.get('selected_files_to_delete', '')
if kwd.get('select_files_to_delete_button', False):
if selected_files_to_delete:
selected_files_to_delete = selected_files_to_delete.split(',')
# Get the current repository tip.
tip = repository.tip(trans.app)
for selected_file in selected_files_to_delete:
try:
hg_util.remove_file(repo.ui, repo, selected_file, force=True)
except Exception as e:
log.debug("Error removing the following file using the mercurial API:\n %s" % str(selected_file))
log.debug("The error was: %s" % str(e))
log.debug("Attempting to remove the file using a different approach.")
relative_selected_file = selected_file.split('repo_%d' % repository.id)[1].lstrip('/')
repo.dirstate.remove(relative_selected_file)
repo.dirstate.write()
absolute_selected_file = os.path.abspath(selected_file)
if os.path.isdir(absolute_selected_file):
try:
os.rmdir(absolute_selected_file)
except OSError as e:
# The directory is not empty
pass
elif os.path.isfile(absolute_selected_file):
os.remove(absolute_selected_file)
dir = os.path.split(absolute_selected_file)[0]
try:
os.rmdir(dir)
except OSError as e:
# The directory is not empty
pass
# Commit the change set.
if not commit_message:
commit_message = 'Deleted selected files'
hg_util.commit_changeset(repo.ui,
repo,
full_path_to_changeset=repo_dir,
username=trans.user.username,
message=commit_message)
suc.handle_email_alerts(trans.app, trans.request.host, repository)
# Update the repository files for browsing.
hg_util.update_repository(repo)
# Get the new repository tip.
if tip == repository.tip(trans.app):
message += 'No changes to repository. '
else:
rmm = repository_metadata_manager.RepositoryMetadataManager(app=trans.app,
user=trans.user,
repository=repository)
status, error_message = rmm.set_repository_metadata_due_to_new_tip(trans.request.host, **kwd)
if error_message:
message = error_message
else:
message += 'The selected files were deleted from the repository. '
else:
message = "Select at least 1 file to delete from the repository before clicking <b>Delete selected files</b>."
status = "error"
repository_type_select_field = rt_util.build_repository_type_select_field(trans, repository=repository)
changeset_revision = repository.tip(trans.app)
metadata = metadata_util.get_repository_metadata_by_repository_id_changeset_revision(trans.app,
id,
changeset_revision,
metadata_only=True)
return trans.fill_template('/webapps/tool_shed/repository/browse_repository.mako',
repo=repo,
repository=repository,
changeset_revision=changeset_revision,
metadata=metadata,
commit_message=commit_message,
repository_type_select_field=repository_type_select_field,
message=message,
status=status)
@web.expose
def send_to_owner(self, trans, id, message=''):
repository = repository_util.get_repository_in_tool_shed(trans.app, id)
if not message:
message = 'Enter a message'
status = 'error'
elif trans.user and trans.user.email:
smtp_server = trans.app.config.smtp_server
from_address = trans.app.config.email_from
if smtp_server is None or from_address is None:
return trans.show_error_message("Mail is not configured for this Galaxy tool shed instance")
to_address = repository.user.email
# Get the name of the server hosting the tool shed instance.
host = trans.request.host
# Build the email message
body = string.Template(suc.contact_owner_template) \
.safe_substitute(username=trans.user.username,
repository_name=repository.name,
email=trans.user.email,
message=message,
host=host)
subject = "Regarding your tool shed repository named %s" % repository.name
# Send it
try:
util.send_mail(from_address, to_address, subject, body, trans.app.config)
message = "Your message has been sent"
status = "done"
except Exception as e:
message = "An error occurred sending your message by email: %s" % str(e)
status = "error"
else:
# Do all we can to eliminate spam.
return trans.show_error_message("You must be logged in to contact the owner of a repository.")
return trans.response.send_redirect(web.url_for(controller='repository',
action='contact_owner',
id=id,
message=message,
status=status))
@web.expose
@web.require_login("set email alerts")
def set_email_alerts(self, trans, **kwd):
"""Set email alerts for selected repositories."""
# This method is called from multiple grids, so the caller must be passed.
caller = kwd['caller']
user = trans.user
if user:
repository_ids = util.listify(kwd.get('id', ''))
total_alerts_added = 0
total_alerts_removed = 0
flush_needed = False
for repository_id in repository_ids:
repository = repository_util.get_repository_in_tool_shed(trans.app, repository_id)
if repository.email_alerts:
email_alerts = json.loads(repository.email_alerts)
else:
email_alerts = []
if user.email in email_alerts:
email_alerts.remove(user.email)
repository.email_alerts = json.dumps(email_alerts)
trans.sa_session.add(repository)
flush_needed = True
total_alerts_removed += 1
else:
email_alerts.append(user.email)
repository.email_alerts = json.dumps(email_alerts)
trans.sa_session.add(repository)
flush_needed = True
total_alerts_added += 1
if flush_needed:
trans.sa_session.flush()
message = 'Total alerts added: %d, total alerts removed: %d' % (total_alerts_added, total_alerts_removed)
kwd['message'] = message
kwd['status'] = 'done'
del kwd['operation']
return trans.response.send_redirect(web.url_for(controller='repository',
action=caller,
**kwd))
@web.expose
@web.require_login("set repository as malicious")
def set_malicious(self, trans, id, ctx_str, **kwd):
malicious = kwd.get('malicious', '')
if kwd.get('malicious_button', False):
repository_metadata = metadata_util.get_repository_metadata_by_changeset_revision(trans.app, id, ctx_str)
malicious_checked = CheckboxField.is_checked(malicious)
repository_metadata.malicious = malicious_checked
trans.sa_session.add(repository_metadata)
trans.sa_session.flush()
if malicious_checked:
message = "The repository tip has been defined as malicious."
else:
message = "The repository tip has been defined as <b>not</b> malicious."
status = 'done'
return trans.response.send_redirect(web.url_for(controller='repository',
action='manage_repository',
id=id,
changeset_revision=ctx_str,
malicious=malicious,
message=message,
status=status))
@web.expose
def sharable_owner(self, trans, owner):
"""Support for sharable URL for each repository owner's tools, e.g. http://example.org/view/owner."""
try:
user = common_util.get_user_by_username(trans, owner)
except Exception:
user = None
if user:
user_id = trans.security.encode_id(user.id)
return trans.response.send_redirect(web.url_for(controller='repository',
action='index',
user_id=user_id))
else:
return trans.show_error_message("The tool shed <b>%s</b> contains no repositories owned by <b>%s</b>." %
(web.url_for('/', qualified=True).rstrip('/'), str(owner)))
@web.expose
def sharable_repository(self, trans, owner, name):
"""Support for sharable URL for a specified repository, e.g. http://example.org/view/owner/name."""
try:
repository = repository_util.get_repository_by_name_and_owner(trans.app, name, owner)
except Exception:
repository = None
if repository:
repository_id = trans.security.encode_id(repository.id)
return trans.response.send_redirect(web.url_for(controller='repository',
action='index',
repository_id=repository_id))
else:
# If the owner is valid, then show all of their repositories.
try:
user = common_util.get_user_by_username(trans, owner)
except Exception:
user = None
if user:
user_id = trans.security.encode_id(user.id)
message = "This list of repositories owned by <b>%s</b>, does not include one named <b>%s</b>." % (str(owner), str(name))
return trans.response.send_redirect(web.url_for(controller='repository',
action='index',
user_id=user_id,
message=message,
status='error'))
else:
return trans.show_error_message("The tool shed <b>%s</b> contains no repositories named <b>%s</b> with owner <b>%s</b>." %
(web.url_for('/', qualified=True).rstrip('/'), str(name), str(owner)))
@web.expose
def sharable_repository_revision(self, trans, owner, name, changeset_revision):
"""Support for sharable URL for a specified repository revision, e.g. http://example.org/view/owner/name/changeset_revision."""
try:
repository = repository_util.get_repository_by_name_and_owner(trans.app, name, owner)
except Exception:
repository = None
if repository:
repository_id = trans.security.encode_id(repository.id)
repository_metadata = metadata_util.get_repository_metadata_by_repository_id_changeset_revision(trans.app,
repository_id,
changeset_revision)
if not repository_metadata:
# Get updates to the received changeset_revision if any exist.
repo = hg_util.get_repo_for_repository(trans.app, repository=repository, repo_path=None, create=False)
upper_bound_changeset_revision = metadata_util.get_next_downloadable_changeset_revision(repository, repo, changeset_revision)
if upper_bound_changeset_revision and upper_bound_changeset_revision != changeset_revision:
changeset_revision = upper_bound_changeset_revision
repository_metadata = metadata_util.get_repository_metadata_by_repository_id_changeset_revision(trans.app,
repository_id,
changeset_revision)
if repository_metadata:
return trans.response.send_redirect(web.url_for(controller='repository',
action='index',
repository_id=repository_id,
changeset_revision=changeset_revision))
else:
message = "The change log for the repository named <b>%s</b> owned by <b>%s</b> does not include revision <b>%s</b>." % \
(escape(str(name)), escape(str(owner)), escape(str(changeset_revision)))
return trans.response.send_redirect(web.url_for(controller='repository',
action='index',
repository_id=repository_id,
message=message,
status='error'))
else:
# See if the owner is valid.
return trans.response.send_redirect(web.url_for(controller='repository',
action='sharable_owner',
owner=owner))
@web.expose
def updated_changeset_revisions(self, trans, **kwd):
"""
Handle a request from a local Galaxy instance to retrieve the list of changeset revisions to which an
installed repository can be updated. This method will return a string of comma-separated changeset revision
hashes for all available updates to the received changeset revision. Among other things , this method
handles the scenario where an installed tool shed repository's tool_dependency definition file defines a
changeset revision for a complex repository dependency that is outdated. In other words, a defined changeset
revision is older than the current changeset revision for the required repository, making it impossible to
discover the repository without knowledge of revisions to which it could have been updated.
"""
name = kwd.get('name', None)
owner = kwd.get('owner', None)
changeset_revision = kwd.get('changeset_revision', None)
if name and owner and changeset_revision:
return metadata_util.get_updated_changeset_revisions(trans.app, name, owner, changeset_revision)
return ''
@web.expose
def upload_capsule(self, trans, **kwd):
message = escape(kwd.get('message', ''))
status = kwd.get('status', 'done')
url = kwd.get('url', '')
if 'upload_capsule_button' in kwd:
irm = capsule_manager.ImportRepositoryManager(trans.app,
trans.request.host,
trans.user,
trans.user_is_admin())
capsule_dict = irm.upload_capsule(**kwd)
status = capsule_dict.get('status', 'error')
if status == 'error':
message = capsule_dict.get('error_message', '')
else:
capsule_dict = irm.extract_capsule_files(**capsule_dict)
capsule_dict = irm.validate_capsule(**capsule_dict)
status = capsule_dict.get('status', 'error')
if status == 'ok':
return trans.response.send_redirect(web.url_for(controller='repository',
action='import_capsule',
**capsule_dict))
else:
message = 'The capsule contents are invalid and cannot be imported:<br/>%s' % \
str(capsule_dict.get('error_message', ''))
return trans.fill_template('/webapps/tool_shed/repository/upload_capsule.mako',
url=url,
message=message,
status=status)
@web.expose
def view_changelog(self, trans, id, **kwd):
message = escape(kwd.get('message', ''))
status = kwd.get('status', 'done')
repository = repository_util.get_repository_in_tool_shed(trans.app, id)
repo = hg_util.get_repo_for_repository(trans.app, repository=repository, repo_path=None, create=False)
changesets = []
for changeset in repo.changelog:
ctx = repo.changectx(changeset)
if metadata_util.get_repository_metadata_by_changeset_revision(trans.app, id, str(ctx)):
has_metadata = True
else:
has_metadata = False
change_dict = {'ctx': ctx,
'rev': str(ctx.rev()),
'date': date,
'display_date': hg_util.get_readable_ctx_date(ctx),
'description': ctx.description(),
'files': ctx.files(),
'user': ctx.user(),
'parent': ctx.parents()[0],
'has_metadata': has_metadata}
# Make sure we'll view latest changeset first.
changesets.insert(0, change_dict)
metadata = metadata_util.get_repository_metadata_by_repository_id_changeset_revision(trans.app,
id,
repository.tip(trans.app),
metadata_only=True)
return trans.fill_template('/webapps/tool_shed/repository/view_changelog.mako',
repository=repository,
metadata=metadata,
changesets=changesets,
message=message,
status=status)
@web.expose
def view_changeset(self, trans, id, ctx_str, **kwd):
message = escape(kwd.get('message', ''))
status = kwd.get('status', 'done')
repository = repository_util.get_repository_in_tool_shed(trans.app, id)
repo = hg_util.get_repo_for_repository(trans.app, repository=repository, repo_path=None, create=False)
ctx = hg_util.get_changectx_for_changeset(repo, ctx_str)
if ctx is None:
message = "Repository does not include changeset revision '%s'." % str(ctx_str)
status = 'error'
return trans.response.send_redirect(web.url_for(controller='repository',
action='view_changelog',
id=id,
message=message,
status=status))
ctx_parent = ctx.parents()[0]
if ctx.children():
ctx_child = ctx.children()[0]
else:
ctx_child = None
diffs = []
options_dict = hg_util.get_mercurial_default_options_dict('diff')
# Not quite sure if the following settings make any difference, but with a combination of them and the size check on each
# diff, we don't run out of memory when viewing the changelog of the cisortho2 repository on the test tool shed.
options_dict['maxfile'] = basic_util.MAXDIFFSIZE
options_dict['maxtotal'] = basic_util.MAXDIFFSIZE
diffopts = mdiff.diffopts(**options_dict)
for diff in patch.diff(repo, node1=ctx_parent.node(), node2=ctx.node(), opts=diffopts):
if len(diff) > basic_util.MAXDIFFSIZE:
diff = util.shrink_string_by_size(diff, basic_util.MAXDIFFSIZE)
diffs.append(basic_util.to_html_string(diff))
modified, added, removed, deleted, unknown, ignored, clean = repo.status(node1=ctx_parent.node(), node2=ctx.node())
anchors = modified + added + removed + deleted + unknown + ignored + clean
metadata = metadata_util.get_repository_metadata_by_repository_id_changeset_revision(trans.app,
id,
ctx_str,
metadata_only=True)
# For rendering the prev button.
if ctx_parent:
ctx_parent_date = hg_util.get_readable_ctx_date(ctx_parent)
ctx_parent_rev = ctx_parent.rev()
if ctx_parent_rev < 0:
prev = None
else:
prev = "<b>%s:%s</b> <i>(%s)</i>" % (ctx_parent_rev, ctx_parent, ctx_parent_date)
else:
prev = None
if ctx_child:
ctx_child_date = hg_util.get_readable_ctx_date(ctx_child)
ctx_child_rev = ctx_child.rev()
next = "<b>%s:%s</b> <i>(%s)</i>" % (ctx_child_rev, ctx_child, ctx_child_date)
else:
next = None
return trans.fill_template('/webapps/tool_shed/repository/view_changeset.mako',
repository=repository,
metadata=metadata,
prev=prev,
next=next,
ctx=ctx,
ctx_parent=ctx_parent,
ctx_child=ctx_child,
anchors=anchors,
modified=modified,
added=added,
removed=removed,
deleted=deleted,
unknown=unknown,
ignored=ignored,
clean=clean,
diffs=diffs,
message=message,
status=status)
@web.expose
def view_or_manage_repository(self, trans, **kwd):
repository_id = kwd.get('id', None)
if repository_id:
repository = repository_util.get_repository_in_tool_shed(trans.app, repository_id)
user = trans.user
if repository:
if user is not None and (trans.user_is_admin() or
trans.app.security_agent.user_can_administer_repository(user, repository)):
return trans.response.send_redirect(web.url_for(controller='repository',
action='manage_repository',
**kwd))
else:
return trans.response.send_redirect(web.url_for(controller='repository',
action='view_repository',
**kwd))
return trans.show_error_message("Invalid repository id '%s' received." % repository_id)
return trans.show_error_message("The repository id was not received.")
@web.expose
def view_repository(self, trans, id, **kwd):
message = escape(kwd.get('message', ''))
status = kwd.get('status', 'done')
repository = repository_util.get_repository_in_tool_shed(trans.app, id)
repo = hg_util.get_repo_for_repository(trans.app, repository=repository, repo_path=None, create=False)
avg_rating, num_ratings = self.get_ave_item_rating_data(trans.sa_session, repository, webapp_model=trans.model)
changeset_revision = kwd.get('changeset_revision', repository.tip(trans.app))
repository.share_url = repository_util.generate_sharable_link_for_repository_in_tool_shed(repository, changeset_revision=changeset_revision)
repository.clone_url = common_util.generate_clone_url_for_repository_in_tool_shed(trans.user, repository)
display_reviews = kwd.get('display_reviews', False)
alerts = kwd.get('alerts', '')
alerts_checked = CheckboxField.is_checked(alerts)
if repository.email_alerts:
email_alerts = json.loads(repository.email_alerts)
else:
email_alerts = []
repository_dependencies = None
user = trans.user
if user and kwd.get('receive_email_alerts_button', False):
flush_needed = False
if alerts_checked:
if user.email not in email_alerts:
email_alerts.append(user.email)
repository.email_alerts = json.dumps(email_alerts)
flush_needed = True
else:
if user.email in email_alerts:
email_alerts.remove(user.email)
repository.email_alerts = json.dumps(email_alerts)
flush_needed = True
if flush_needed:
trans.sa_session.add(repository)
trans.sa_session.flush()
checked = alerts_checked or (user and user.email in email_alerts)
alerts_check_box = CheckboxField('alerts', value=checked)
changeset_revision_select_field = grids_util.build_changeset_revision_select_field(trans,
repository,
selected_value=changeset_revision,
add_id_to_name=False,
downloadable=False)
revision_label = hg_util.get_revision_label(trans.app, repository, changeset_revision, include_date=False)
repository_metadata = metadata_util.get_repository_metadata_by_changeset_revision(trans.app, id, changeset_revision)
if repository_metadata:
metadata = repository_metadata.metadata
# Get a dictionary of all repositories upon which the contents of the current repository_metadata record depend.
toolshed_base_url = str(web.url_for('/', qualified=True)).rstrip('/')
rb = relation_builder.RelationBuilder(trans.app, repository, repository_metadata, toolshed_base_url)
repository_dependencies = rb.get_repository_dependencies_for_changeset_revision()
if str(repository.type) != rt_util.TOOL_DEPENDENCY_DEFINITION:
# Handle messaging for orphan tool dependency definitions.
dd = dependency_display.DependencyDisplayer(trans.app)
orphan_message = dd.generate_message_for_orphan_tool_dependencies(repository, metadata)
if orphan_message:
message += orphan_message
status = 'warning'
else:
metadata = None
is_malicious = metadata_util.is_malicious(trans.app, id, repository.tip(trans.app))
if is_malicious:
if trans.app.security_agent.can_push(trans.app, trans.user, repository):
message += malicious_error_can_push
else:
message += malicious_error
status = 'error'
tsucm = ToolShedUtilityContainerManager(trans.app)
containers_dict = tsucm.build_repository_containers(repository,
changeset_revision,
repository_dependencies,
repository_metadata)
repository_type_select_field = rt_util.build_repository_type_select_field(trans, repository=repository)
heads = hg_util.get_repository_heads(repo)
return trans.fill_template('/webapps/tool_shed/repository/view_repository.mako',
repo=repo,
heads=heads,
repository=repository,
repository_metadata=repository_metadata,
metadata=metadata,
containers_dict=containers_dict,
avg_rating=avg_rating,
display_reviews=display_reviews,
num_ratings=num_ratings,
alerts_check_box=alerts_check_box,
changeset_revision=changeset_revision,
changeset_revision_select_field=changeset_revision_select_field,
revision_label=revision_label,
repository_type_select_field=repository_type_select_field,
message=message,
status=status)
@web.expose
def view_tool_metadata(self, trans, repository_id, changeset_revision, tool_id, **kwd):
message = escape(kwd.get('message', ''))
status = kwd.get('status', 'done')
render_repository_actions_for = kwd.get('render_repository_actions_for', 'tool_shed')
repository = repository_util.get_repository_in_tool_shed(trans.app, repository_id)
repo_files_dir = repository.repo_path(trans.app)
repo = hg_util.get_repo_for_repository(trans.app, repository=None, repo_path=repo_files_dir, create=False)
tool_metadata_dict = {}
tool_lineage = []
tool = None
guid = None
revision_label = hg_util.get_revision_label(trans.app, repository, changeset_revision, include_date=False)
repository_metadata = metadata_util.get_repository_metadata_by_changeset_revision(trans.app, repository_id, changeset_revision)
if repository_metadata:
repository_metadata_id = trans.security.encode_id(repository_metadata.id)
metadata = repository_metadata.metadata
if metadata:
if 'tools' in metadata:
with ValidationContext.from_app(trans.app) as validation_context:
tv = tool_validator.ToolValidator(validation_context)
for tool_metadata_dict in metadata['tools']:
if tool_metadata_dict['id'] == tool_id:
work_dir = tempfile.mkdtemp()
relative_path_to_tool_config = tool_metadata_dict['tool_config']
guid = tool_metadata_dict['guid']
full_path_to_tool_config = os.path.abspath(relative_path_to_tool_config)
full_path_to_dir, tool_config_filename = os.path.split(full_path_to_tool_config)
can_use_disk_file = tv.can_use_tool_config_disk_file(repository,
repo,
full_path_to_tool_config,
changeset_revision)
if can_use_disk_file:
tool, valid, message, sample_files = \
tv.handle_sample_files_and_load_tool_from_disk(repo_files_dir,
repository_id,
full_path_to_tool_config,
work_dir)
if message:
status = 'error'
else:
tool, message, sample_files = \
tv.handle_sample_files_and_load_tool_from_tmp_config(repo,
repository_id,
changeset_revision,
tool_config_filename,
work_dir)
if message:
status = 'error'
basic_util.remove_dir(work_dir)
break
if guid:
tvm = tool_version_manager.ToolVersionManager(trans.app)
tool_lineage = tvm.get_version_lineage_for_tool(repository_id,
repository_metadata,
guid)
else:
repository_metadata_id = None
metadata = None
changeset_revision_select_field = grids_util.build_changeset_revision_select_field(trans,
repository,
selected_value=changeset_revision,
add_id_to_name=False,
downloadable=False)
return trans.fill_template("/webapps/tool_shed/repository/view_tool_metadata.mako",
render_repository_actions_for=render_repository_actions_for,
repository=repository,
repository_metadata_id=repository_metadata_id,
metadata=metadata,
tool=tool,
tool_metadata_dict=tool_metadata_dict,
tool_lineage=tool_lineage,
changeset_revision=changeset_revision,
revision_label=revision_label,
changeset_revision_select_field=changeset_revision_select_field,
message=message,
status=status)
@web.expose
def view_workflow(self, trans, workflow_name, repository_metadata_id, **kwd):
"""Retrieve necessary information about a workflow from the database so that it can be displayed in an svg image."""
message = escape(kwd.get('message', ''))
status = kwd.get('status', 'done')
render_repository_actions_for = kwd.get('render_repository_actions_for', 'tool_shed')
if workflow_name:
workflow_name = encoding_util.tool_shed_decode(workflow_name)
repository_metadata = metadata_util.get_repository_metadata_by_id(trans.app, repository_metadata_id)
repository = repository_util.get_repository_in_tool_shed(trans.app, trans.security.encode_id(repository_metadata.repository_id))
changeset_revision = repository_metadata.changeset_revision
metadata = repository_metadata.metadata
return trans.fill_template("/webapps/tool_shed/repository/view_workflow.mako",
repository=repository,
render_repository_actions_for=render_repository_actions_for,
changeset_revision=changeset_revision,
repository_metadata_id=repository_metadata_id,
workflow_name=workflow_name,
metadata=metadata,
message=message,
status=status)
|
the-stack_106_13239
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.api_core import operations_v1 # type: ignore
from google import auth # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.vision_v1p4beta1.types import product_search_service
from google.longrunning import operations_pb2 as operations # type: ignore
from google.protobuf import empty_pb2 as empty # type: ignore
from .base import ProductSearchTransport, DEFAULT_CLIENT_INFO
from .grpc import ProductSearchGrpcTransport
class ProductSearchGrpcAsyncIOTransport(ProductSearchTransport):
"""gRPC AsyncIO backend transport for ProductSearch.
Manages Products and ProductSets of reference images for use in
product search. It uses the following resource model:
- The API has a collection of
[ProductSet][google.cloud.vision.v1p4beta1.ProductSet] resources,
named ``projects/*/locations/*/productSets/*``, which acts as a
way to put different products into groups to limit
identification.
In parallel,
- The API has a collection of
[Product][google.cloud.vision.v1p4beta1.Product] resources, named
``projects/*/locations/*/products/*``
- Each [Product][google.cloud.vision.v1p4beta1.Product] has a
collection of
[ReferenceImage][google.cloud.vision.v1p4beta1.ReferenceImage]
resources, named
``projects/*/locations/*/products/*/referenceImages/*``
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "vision.googleapis.com",
credentials: credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
scopes = scopes or cls.AUTH_SCOPES
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
**kwargs,
)
def __init__(
self,
*,
host: str = "vision.googleapis.com",
credentials: credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsAsyncClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsAsyncClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def create_product_set(
self,
) -> Callable[
[product_search_service.CreateProductSetRequest],
Awaitable[product_search_service.ProductSet],
]:
r"""Return a callable for the create product set method over gRPC.
Creates and returns a new ProductSet resource.
Possible errors:
- Returns INVALID_ARGUMENT if display_name is missing, or is
longer than 4096 characters.
Returns:
Callable[[~.CreateProductSetRequest],
Awaitable[~.ProductSet]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_product_set" not in self._stubs:
self._stubs["create_product_set"] = self.grpc_channel.unary_unary(
"/google.cloud.vision.v1p4beta1.ProductSearch/CreateProductSet",
request_serializer=product_search_service.CreateProductSetRequest.serialize,
response_deserializer=product_search_service.ProductSet.deserialize,
)
return self._stubs["create_product_set"]
@property
def list_product_sets(
self,
) -> Callable[
[product_search_service.ListProductSetsRequest],
Awaitable[product_search_service.ListProductSetsResponse],
]:
r"""Return a callable for the list product sets method over gRPC.
Lists ProductSets in an unspecified order.
Possible errors:
- Returns INVALID_ARGUMENT if page_size is greater than 100, or
less than 1.
Returns:
Callable[[~.ListProductSetsRequest],
Awaitable[~.ListProductSetsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_product_sets" not in self._stubs:
self._stubs["list_product_sets"] = self.grpc_channel.unary_unary(
"/google.cloud.vision.v1p4beta1.ProductSearch/ListProductSets",
request_serializer=product_search_service.ListProductSetsRequest.serialize,
response_deserializer=product_search_service.ListProductSetsResponse.deserialize,
)
return self._stubs["list_product_sets"]
@property
def get_product_set(
self,
) -> Callable[
[product_search_service.GetProductSetRequest],
Awaitable[product_search_service.ProductSet],
]:
r"""Return a callable for the get product set method over gRPC.
Gets information associated with a ProductSet.
Possible errors:
- Returns NOT_FOUND if the ProductSet does not exist.
Returns:
Callable[[~.GetProductSetRequest],
Awaitable[~.ProductSet]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_product_set" not in self._stubs:
self._stubs["get_product_set"] = self.grpc_channel.unary_unary(
"/google.cloud.vision.v1p4beta1.ProductSearch/GetProductSet",
request_serializer=product_search_service.GetProductSetRequest.serialize,
response_deserializer=product_search_service.ProductSet.deserialize,
)
return self._stubs["get_product_set"]
@property
def update_product_set(
self,
) -> Callable[
[product_search_service.UpdateProductSetRequest],
Awaitable[product_search_service.ProductSet],
]:
r"""Return a callable for the update product set method over gRPC.
Makes changes to a ProductSet resource. Only display_name can be
updated currently.
Possible errors:
- Returns NOT_FOUND if the ProductSet does not exist.
- Returns INVALID_ARGUMENT if display_name is present in
update_mask but missing from the request or longer than 4096
characters.
Returns:
Callable[[~.UpdateProductSetRequest],
Awaitable[~.ProductSet]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_product_set" not in self._stubs:
self._stubs["update_product_set"] = self.grpc_channel.unary_unary(
"/google.cloud.vision.v1p4beta1.ProductSearch/UpdateProductSet",
request_serializer=product_search_service.UpdateProductSetRequest.serialize,
response_deserializer=product_search_service.ProductSet.deserialize,
)
return self._stubs["update_product_set"]
@property
def delete_product_set(
self,
) -> Callable[
[product_search_service.DeleteProductSetRequest], Awaitable[empty.Empty]
]:
r"""Return a callable for the delete product set method over gRPC.
Permanently deletes a ProductSet. Products and
ReferenceImages in the ProductSet are not deleted.
The actual image files are not deleted from Google Cloud
Storage.
Returns:
Callable[[~.DeleteProductSetRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_product_set" not in self._stubs:
self._stubs["delete_product_set"] = self.grpc_channel.unary_unary(
"/google.cloud.vision.v1p4beta1.ProductSearch/DeleteProductSet",
request_serializer=product_search_service.DeleteProductSetRequest.serialize,
response_deserializer=empty.Empty.FromString,
)
return self._stubs["delete_product_set"]
@property
def create_product(
self,
) -> Callable[
[product_search_service.CreateProductRequest],
Awaitable[product_search_service.Product],
]:
r"""Return a callable for the create product method over gRPC.
Creates and returns a new product resource.
Possible errors:
- Returns INVALID_ARGUMENT if display_name is missing or longer
than 4096 characters.
- Returns INVALID_ARGUMENT if description is longer than 4096
characters.
- Returns INVALID_ARGUMENT if product_category is missing or
invalid.
Returns:
Callable[[~.CreateProductRequest],
Awaitable[~.Product]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_product" not in self._stubs:
self._stubs["create_product"] = self.grpc_channel.unary_unary(
"/google.cloud.vision.v1p4beta1.ProductSearch/CreateProduct",
request_serializer=product_search_service.CreateProductRequest.serialize,
response_deserializer=product_search_service.Product.deserialize,
)
return self._stubs["create_product"]
@property
def list_products(
self,
) -> Callable[
[product_search_service.ListProductsRequest],
Awaitable[product_search_service.ListProductsResponse],
]:
r"""Return a callable for the list products method over gRPC.
Lists products in an unspecified order.
Possible errors:
- Returns INVALID_ARGUMENT if page_size is greater than 100 or
less than 1.
Returns:
Callable[[~.ListProductsRequest],
Awaitable[~.ListProductsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_products" not in self._stubs:
self._stubs["list_products"] = self.grpc_channel.unary_unary(
"/google.cloud.vision.v1p4beta1.ProductSearch/ListProducts",
request_serializer=product_search_service.ListProductsRequest.serialize,
response_deserializer=product_search_service.ListProductsResponse.deserialize,
)
return self._stubs["list_products"]
@property
def get_product(
self,
) -> Callable[
[product_search_service.GetProductRequest],
Awaitable[product_search_service.Product],
]:
r"""Return a callable for the get product method over gRPC.
Gets information associated with a Product.
Possible errors:
- Returns NOT_FOUND if the Product does not exist.
Returns:
Callable[[~.GetProductRequest],
Awaitable[~.Product]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_product" not in self._stubs:
self._stubs["get_product"] = self.grpc_channel.unary_unary(
"/google.cloud.vision.v1p4beta1.ProductSearch/GetProduct",
request_serializer=product_search_service.GetProductRequest.serialize,
response_deserializer=product_search_service.Product.deserialize,
)
return self._stubs["get_product"]
@property
def update_product(
self,
) -> Callable[
[product_search_service.UpdateProductRequest],
Awaitable[product_search_service.Product],
]:
r"""Return a callable for the update product method over gRPC.
Makes changes to a Product resource. Only the ``display_name``,
``description``, and ``labels`` fields can be updated right now.
If labels are updated, the change will not be reflected in
queries until the next index time.
Possible errors:
- Returns NOT_FOUND if the Product does not exist.
- Returns INVALID_ARGUMENT if display_name is present in
update_mask but is missing from the request or longer than
4096 characters.
- Returns INVALID_ARGUMENT if description is present in
update_mask but is longer than 4096 characters.
- Returns INVALID_ARGUMENT if product_category is present in
update_mask.
Returns:
Callable[[~.UpdateProductRequest],
Awaitable[~.Product]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_product" not in self._stubs:
self._stubs["update_product"] = self.grpc_channel.unary_unary(
"/google.cloud.vision.v1p4beta1.ProductSearch/UpdateProduct",
request_serializer=product_search_service.UpdateProductRequest.serialize,
response_deserializer=product_search_service.Product.deserialize,
)
return self._stubs["update_product"]
@property
def delete_product(
self,
) -> Callable[
[product_search_service.DeleteProductRequest], Awaitable[empty.Empty]
]:
r"""Return a callable for the delete product method over gRPC.
Permanently deletes a product and its reference
images.
Metadata of the product and all its images will be
deleted right away, but search queries against
ProductSets containing the product may still work until
all related caches are refreshed.
Returns:
Callable[[~.DeleteProductRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_product" not in self._stubs:
self._stubs["delete_product"] = self.grpc_channel.unary_unary(
"/google.cloud.vision.v1p4beta1.ProductSearch/DeleteProduct",
request_serializer=product_search_service.DeleteProductRequest.serialize,
response_deserializer=empty.Empty.FromString,
)
return self._stubs["delete_product"]
@property
def create_reference_image(
self,
) -> Callable[
[product_search_service.CreateReferenceImageRequest],
Awaitable[product_search_service.ReferenceImage],
]:
r"""Return a callable for the create reference image method over gRPC.
Creates and returns a new ReferenceImage resource.
The ``bounding_poly`` field is optional. If ``bounding_poly`` is
not specified, the system will try to detect regions of interest
in the image that are compatible with the product_category on
the parent product. If it is specified, detection is ALWAYS
skipped. The system converts polygons into non-rotated
rectangles.
Note that the pipeline will resize the image if the image
resolution is too large to process (above 50MP).
Possible errors:
- Returns INVALID_ARGUMENT if the image_uri is missing or
longer than 4096 characters.
- Returns INVALID_ARGUMENT if the product does not exist.
- Returns INVALID_ARGUMENT if bounding_poly is not provided,
and nothing compatible with the parent product's
product_category is detected.
- Returns INVALID_ARGUMENT if bounding_poly contains more than
10 polygons.
Returns:
Callable[[~.CreateReferenceImageRequest],
Awaitable[~.ReferenceImage]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_reference_image" not in self._stubs:
self._stubs["create_reference_image"] = self.grpc_channel.unary_unary(
"/google.cloud.vision.v1p4beta1.ProductSearch/CreateReferenceImage",
request_serializer=product_search_service.CreateReferenceImageRequest.serialize,
response_deserializer=product_search_service.ReferenceImage.deserialize,
)
return self._stubs["create_reference_image"]
@property
def delete_reference_image(
self,
) -> Callable[
[product_search_service.DeleteReferenceImageRequest], Awaitable[empty.Empty]
]:
r"""Return a callable for the delete reference image method over gRPC.
Permanently deletes a reference image.
The image metadata will be deleted right away, but
search queries against ProductSets containing the image
may still work until all related caches are refreshed.
The actual image files are not deleted from Google Cloud
Storage.
Returns:
Callable[[~.DeleteReferenceImageRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_reference_image" not in self._stubs:
self._stubs["delete_reference_image"] = self.grpc_channel.unary_unary(
"/google.cloud.vision.v1p4beta1.ProductSearch/DeleteReferenceImage",
request_serializer=product_search_service.DeleteReferenceImageRequest.serialize,
response_deserializer=empty.Empty.FromString,
)
return self._stubs["delete_reference_image"]
@property
def list_reference_images(
self,
) -> Callable[
[product_search_service.ListReferenceImagesRequest],
Awaitable[product_search_service.ListReferenceImagesResponse],
]:
r"""Return a callable for the list reference images method over gRPC.
Lists reference images.
Possible errors:
- Returns NOT_FOUND if the parent product does not exist.
- Returns INVALID_ARGUMENT if the page_size is greater than
100, or less than 1.
Returns:
Callable[[~.ListReferenceImagesRequest],
Awaitable[~.ListReferenceImagesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_reference_images" not in self._stubs:
self._stubs["list_reference_images"] = self.grpc_channel.unary_unary(
"/google.cloud.vision.v1p4beta1.ProductSearch/ListReferenceImages",
request_serializer=product_search_service.ListReferenceImagesRequest.serialize,
response_deserializer=product_search_service.ListReferenceImagesResponse.deserialize,
)
return self._stubs["list_reference_images"]
@property
def get_reference_image(
self,
) -> Callable[
[product_search_service.GetReferenceImageRequest],
Awaitable[product_search_service.ReferenceImage],
]:
r"""Return a callable for the get reference image method over gRPC.
Gets information associated with a ReferenceImage.
Possible errors:
- Returns NOT_FOUND if the specified image does not exist.
Returns:
Callable[[~.GetReferenceImageRequest],
Awaitable[~.ReferenceImage]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_reference_image" not in self._stubs:
self._stubs["get_reference_image"] = self.grpc_channel.unary_unary(
"/google.cloud.vision.v1p4beta1.ProductSearch/GetReferenceImage",
request_serializer=product_search_service.GetReferenceImageRequest.serialize,
response_deserializer=product_search_service.ReferenceImage.deserialize,
)
return self._stubs["get_reference_image"]
@property
def add_product_to_product_set(
self,
) -> Callable[
[product_search_service.AddProductToProductSetRequest], Awaitable[empty.Empty]
]:
r"""Return a callable for the add product to product set method over gRPC.
Adds a Product to the specified ProductSet. If the Product is
already present, no change is made.
One Product can be added to at most 100 ProductSets.
Possible errors:
- Returns NOT_FOUND if the Product or the ProductSet doesn't
exist.
Returns:
Callable[[~.AddProductToProductSetRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "add_product_to_product_set" not in self._stubs:
self._stubs["add_product_to_product_set"] = self.grpc_channel.unary_unary(
"/google.cloud.vision.v1p4beta1.ProductSearch/AddProductToProductSet",
request_serializer=product_search_service.AddProductToProductSetRequest.serialize,
response_deserializer=empty.Empty.FromString,
)
return self._stubs["add_product_to_product_set"]
@property
def remove_product_from_product_set(
self,
) -> Callable[
[product_search_service.RemoveProductFromProductSetRequest],
Awaitable[empty.Empty],
]:
r"""Return a callable for the remove product from product
set method over gRPC.
Removes a Product from the specified ProductSet.
Returns:
Callable[[~.RemoveProductFromProductSetRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "remove_product_from_product_set" not in self._stubs:
self._stubs[
"remove_product_from_product_set"
] = self.grpc_channel.unary_unary(
"/google.cloud.vision.v1p4beta1.ProductSearch/RemoveProductFromProductSet",
request_serializer=product_search_service.RemoveProductFromProductSetRequest.serialize,
response_deserializer=empty.Empty.FromString,
)
return self._stubs["remove_product_from_product_set"]
@property
def list_products_in_product_set(
self,
) -> Callable[
[product_search_service.ListProductsInProductSetRequest],
Awaitable[product_search_service.ListProductsInProductSetResponse],
]:
r"""Return a callable for the list products in product set method over gRPC.
Lists the Products in a ProductSet, in an unspecified order. If
the ProductSet does not exist, the products field of the
response will be empty.
Possible errors:
- Returns INVALID_ARGUMENT if page_size is greater than 100 or
less than 1.
Returns:
Callable[[~.ListProductsInProductSetRequest],
Awaitable[~.ListProductsInProductSetResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_products_in_product_set" not in self._stubs:
self._stubs["list_products_in_product_set"] = self.grpc_channel.unary_unary(
"/google.cloud.vision.v1p4beta1.ProductSearch/ListProductsInProductSet",
request_serializer=product_search_service.ListProductsInProductSetRequest.serialize,
response_deserializer=product_search_service.ListProductsInProductSetResponse.deserialize,
)
return self._stubs["list_products_in_product_set"]
@property
def import_product_sets(
self,
) -> Callable[
[product_search_service.ImportProductSetsRequest],
Awaitable[operations.Operation],
]:
r"""Return a callable for the import product sets method over gRPC.
Asynchronous API that imports a list of reference images to
specified product sets based on a list of image information.
The [google.longrunning.Operation][google.longrunning.Operation]
API can be used to keep track of the progress and results of the
request. ``Operation.metadata`` contains
``BatchOperationMetadata``. (progress) ``Operation.response``
contains ``ImportProductSetsResponse``. (results)
The input source of this method is a csv file on Google Cloud
Storage. For the format of the csv file please see
[ImportProductSetsGcsSource.csv_file_uri][google.cloud.vision.v1p4beta1.ImportProductSetsGcsSource.csv_file_uri].
Returns:
Callable[[~.ImportProductSetsRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "import_product_sets" not in self._stubs:
self._stubs["import_product_sets"] = self.grpc_channel.unary_unary(
"/google.cloud.vision.v1p4beta1.ProductSearch/ImportProductSets",
request_serializer=product_search_service.ImportProductSetsRequest.serialize,
response_deserializer=operations.Operation.FromString,
)
return self._stubs["import_product_sets"]
@property
def purge_products(
self,
) -> Callable[
[product_search_service.PurgeProductsRequest], Awaitable[operations.Operation]
]:
r"""Return a callable for the purge products method over gRPC.
Asynchronous API to delete all Products in a ProductSet or all
Products that are in no ProductSet.
If a Product is a member of the specified ProductSet in addition
to other ProductSets, the Product will still be deleted.
It is recommended to not delete the specified ProductSet until
after this operation has completed. It is also recommended to
not add any of the Products involved in the batch delete to a
new ProductSet while this operation is running because those
Products may still end up deleted.
It's not possible to undo the PurgeProducts operation.
Therefore, it is recommended to keep the csv files used in
ImportProductSets (if that was how you originally built the
Product Set) before starting PurgeProducts, in case you need to
re-import the data after deletion.
If the plan is to purge all of the Products from a ProductSet
and then re-use the empty ProductSet to re-import new Products
into the empty ProductSet, you must wait until the PurgeProducts
operation has finished for that ProductSet.
The [google.longrunning.Operation][google.longrunning.Operation]
API can be used to keep track of the progress and results of the
request. ``Operation.metadata`` contains
``BatchOperationMetadata``. (progress)
Returns:
Callable[[~.PurgeProductsRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "purge_products" not in self._stubs:
self._stubs["purge_products"] = self.grpc_channel.unary_unary(
"/google.cloud.vision.v1p4beta1.ProductSearch/PurgeProducts",
request_serializer=product_search_service.PurgeProductsRequest.serialize,
response_deserializer=operations.Operation.FromString,
)
return self._stubs["purge_products"]
__all__ = ("ProductSearchGrpcAsyncIOTransport",)
|
the-stack_106_13240
|
import logging
import numpy as np
import pandas as pd
from collections import abc
from sklearn import utils
from typing import Dict, Iterable
from fairness_benchmark.gan_metadata import GANMetadata
from fairness_benchmark.generation_interval import GenerationInterval
from fairness_benchmark.model import load_gan
from fairness_benchmark.prevalence import feature_prevalence, Prevalence
from fairness_benchmark.time import TemporalDistribution, TimeInterval
from fairness_benchmark import verifications
class SampleGenerator:
"""
Class to generate the sample of data, using pre-trained GANs
"""
def __init__(self, *gans_metadata: GANMetadata):
"""
Initialize the generator object with information regarding the GANs that are
going to be used when generating synthetic data
Parameters
----------
gans_metadata: Iterable[GANMetadata]
metadata about all of the GANs that the generator may use when called
"""
# Copy the gans_metadata content to a list to ensure it is not exhausted if
# it can only be iterated once
gans_metadata = list(gans_metadata)
verifications.verify_iterable_parameter_type(gans_metadata, 'gans_metadata', object, GANMetadata)
self.__gans_metadata = {m.save_path: m for m in gans_metadata}
def __call__(self, *generation_intervals: GenerationInterval) -> pd.DataFrame:
"""
Method to generate a given sample of data, according to the received specifications
Parameters
----------
generation_intervals: Iterable[GenerationInterval]
Intervals to specify the characteristics of the generated data. A set of records is
generated according to each interval. Then, the records are all concatenated into
a single DataFrame.
Returns
-------
DataFrame with the data generated from the interval.
"""
generation_intervals = list(generation_intervals)
verifications.verify_iterable_parameter_type(
generation_intervals, 'generation_intervals',
abc.Iterable,
GenerationInterval)
synthetic_dataframes = (self.__generate_data_for_interval(interval) for interval in generation_intervals)
return pd.concat(synthetic_dataframes)
def __generate_data_for_interval(self, generation_interval: GenerationInterval) -> pd.DataFrame:
gans_paths = generation_interval.gans_paths
sample_size = generation_interval.sample_size
target_feature = generation_interval.target_feature
target_prevalence = generation_interval.prevalence
time_interval = generation_interval.time_interval
filter_obj = generation_interval.filter
transform_obj = generation_interval.transform
if len(gans_paths) == 0:
raise ValueError('Empty set of paths')
if len(gans_paths) > 1:
raise ValueError('Multiple gans_paths not yet supported. Please specify only a single gan.')
gan_path = next(iter(gans_paths))
metadata = self.__gans_metadata.get(gan_path)
if not metadata:
raise ValueError(f'Metadata not found for model saved at \'{gan_path}\'')
model = load_gan(gan_path)
target_group_sizes, target_prevalence = self.__compute_target_group_sizes(target_prevalence, sample_size)
data = model.sample(sample_size)
data = filter_obj.apply(data)
data_prevalence = feature_prevalence(data, target_feature)
while not (len(data) == sample_size and data_prevalence.is_close(target_prevalence)):
new_sample = model.sample(sample_size)
new_sample = filter_obj.apply(new_sample)
data = data.append(new_sample)
data = self.__subsample_higher_count_groups(data, target_feature, target_group_sizes)
data_prevalence = feature_prevalence(data, target_feature)
data = utils.shuffle(data)
# Reset index and drop old index to lose order before shuffle
data.reset_index(inplace=True, drop=True)
data['timestamp'] = self.__generate_synthetic_timestamp_column(metadata, time_interval, sample_size)
data['timestamp'] = data['timestamp'].dt.round('1s')
data = transform_obj.apply(data)
return data
@classmethod
def __compute_target_group_sizes(cls, target_prevalence, sample_size):
target_group_sizes = {value: int(target_prevalence[value] * sample_size) for value in target_prevalence}
group_sizes_total = sum(target_group_sizes.values())
# This can happen due to rounding errors, when we have a group size that is a fraction
# due to the multiplication group prevalence and sample size
#
# Example: Prevalence(0.5, 0.5) and group size 9 yield 4.5, 4.5 as the group sizes
# which needs to be corrected
if group_sizes_total != sample_size:
logging.warning(
'Rounding errors while splitting sample size by group. Correcting by distributing missing elements.'
)
number_of_missing_elements = sample_size - group_sizes_total
groups_to_increment = np.random.choice(
list(target_group_sizes.keys()),
number_of_missing_elements,
replace=False
)
for group in groups_to_increment:
target_group_sizes[group] += 1
target_prevalence = Prevalence({
k: v / sample_size for k, v in target_group_sizes.items()
})
logging.warning('Updated target prevalence to %s', target_prevalence)
return target_group_sizes, target_prevalence
@classmethod
def __subsample_higher_count_groups(
cls,
data: pd.DataFrame,
target_feature: str,
target_group_sizes: dict) -> pd.DataFrame:
data_group_sizes = cls.__compute_df_group_sizes(data, target_feature)
for feature_value in data_group_sizes:
if feature_value not in target_group_sizes:
raise ValueError(
f'Generated feature {target_feature} has value \'{feature_value}\' '
f'that is not in target_prevalence.'
)
data_feature_size = data_group_sizes[feature_value]
target_feature_size = target_group_sizes[feature_value]
if data_feature_size > target_feature_size:
data_with_feature = data[data[target_feature] == feature_value]
data_without_feature = data[data[target_feature] != feature_value]
subsample_data = data_with_feature.sample(target_feature_size)
data = pd.concat((data_without_feature, subsample_data))
return data
@classmethod
def __compute_df_group_sizes(cls, data: pd.DataFrame, feature: str) -> Dict:
data_group_sizes = data[feature].value_counts()
data_group_sizes = {k: v for k, v in data_group_sizes.iteritems()}
return data_group_sizes
@classmethod
def __generate_synthetic_timestamp_column(
cls,
gan_metadata: GANMetadata,
time_interval: TimeInterval,
size):
train_data = pd.read_csv(gan_metadata.train_dataset_path, index_col=0)
train_timestamps = train_data[gan_metadata.timestamp_column]
temporal_distribution = TemporalDistribution()
temporal_distribution.fit(train_timestamps)
start = time_interval.start.astype(int) // 1000
end = time_interval.end.astype(int) // 1000
synthetic_timestamps = temporal_distribution.generate_sample(
size,
start_date=start,
end_date=end)
return pd.to_datetime(synthetic_timestamps, unit='ms')
|
the-stack_106_13241
|
import serial.rs485
ser=serial.rs485.RS485(port='/dev/ttyAMA0',baudrate=2400)
ser.rs485_mode = serial.rs485.RS485Settings(False,True)
ser.write('a test'.encode('utf-8'))
while True:
c = ser.read(1)
ser.write(c)
print(c, end='')
|
the-stack_106_13245
|
END = "#"
class Trie:
def __init__(self):
self._trie = {}
def insert_word(self, text):
trie = self._trie
for char in text:
if char not in trie:
trie[char] = {}
trie = trie[char]
trie[END] = True
def find_word(self, prefix):
trie = self._trie
for char in prefix:
if char in trie:
trie = trie[char]
else:
return []
return self._elements(trie)
def _elements(self, d):
result = []
for c, v in d.items():
sub_result = [" "] if c == END else [c + s for s in self._elements(v)]
result.extend(sub_result)
return tuple(result)
trie = Trie()
words = ("depart", "detergent", "daring", "dog", "deer", "deal")
for word in words:
trie.insert_word(word)
def autocomplete_using_trie(s):
"""
>>> trie = Trie()
>>> for word in words:
... trie.insert_word(word)
...
>>> matches = autocomplete_using_trie("de")
"detergent " in matches
True
"dog " in matches
False
"""
suffixes = trie.find_word(s)
return tuple(s + w for w in suffixes)
def main():
print(autocomplete_using_trie("de"))
if __name__ == "__main__":
main()
|
the-stack_106_13246
|
''' Core transformer logic. '''
from pliers import config
from pliers.stimuli.base import Stim, _log_transformation, load_stims
from pliers.stimuli.compound import CompoundStim
from pliers.utils import (progress_bar_wrapper, isiterable,
isgenerator, listify, batch_iterable,
attempt_to_import, set_iterable_type)
import pliers
from six import with_metaclass, string_types
from abc import ABCMeta, abstractmethod, abstractproperty
import importlib
import logging
from functools import wraps
multiprocessing = attempt_to_import('pathos.multiprocessing',
'multiprocessing', ['ProcessingPool'])
_cache = {}
class Transformer(with_metaclass(ABCMeta)):
''' Base class for all pliers Transformers.
Args:
name (str): Optional name of Transformer instance. If None (default),
the class name is used.
'''
_log_attributes = ()
_loggable = True
VERSION = '0.1'
# Stim types that *can* be passed as input, but aren't mandatory. This
# allows for disjunctive specification; e.g., if _input_type is empty
# and _optional_input_type is (AudioStim, TextStim), then _at least_ one
# of the two must be passed. If both are specified in _input_type, then
# the input would have to be a CompoundStim with both audio and text slots.
_optional_input_type = ()
def __init__(self, name=None, **kwargs):
if name is None:
name = self.__class__.__name__
self.name = name
super(Transformer, self).__init__(**kwargs)
def _memoize(transform):
@wraps(transform)
def wrapper(self, stim, *args, **kwargs):
use_cache = config.get_option('cache_transformers') \
and isinstance(stim, (Stim, string_types))
if use_cache:
key = hash((hash(self), hash(stim)))
if key in _cache:
return _cache[key]
result = transform(self, stim, *args, **kwargs)
if use_cache:
if isgenerator(result):
result = list(result)
_cache[key] = result
return result
return wrapper
@_memoize
def transform(self, stims, validation='strict', *args, **kwargs):
''' Executes the transformation on the passed stim(s).
Args:
stims (str, Stim, list): One or more stimuli to process. Must be
one of:
- A string giving the path to a file that can be read in
as a Stim (e.g., a .txt file, .jpg image, etc.)
- A Stim instance of any type.
- An iterable of stims, where each element is either a
string or a Stim.
validation (str): String specifying how validation errors should
be handled. Must be one of:
- 'strict': Raise an exception on any validation error
- 'warn': Issue a warning for all validation errors
- 'loose': Silently ignore all validation errors
args: Optional positional arguments to pass onto the internal
_transform call.
kwargs: Optional positional arguments to pass onto the internal
_transform call.
'''
if isinstance(stims, string_types):
stims = load_stims(stims)
# If stims is a CompoundStim and the Transformer is expecting a single
# input type, extract all matching stims
if isinstance(stims, CompoundStim) and not isinstance(self._input_type, tuple):
stims = stims.get_stim(self._input_type, return_all=True)
if not stims:
raise ValueError("No stims of class %s found in the provided"
"CompoundStim instance." % self._input_type)
# If stims is an iterable, naively loop over elements, removing
# invalid results if needed
if isiterable(stims):
iters = self._iterate(stims, validation=validation, *args,
**kwargs)
if config.get_option('drop_bad_extractor_results'):
iters = (i for i in iters if i is not None)
iters = progress_bar_wrapper(iters, desc='Stim')
return set_iterable_type(iters)
# Validate stim, and then either pass it directly to the Transformer
# or, if a conversion occurred, recurse.
else:
try:
validated_stim = self._validate(stims)
except TypeError as err:
if validation == 'strict':
raise err
elif validation == 'warn':
logging.warn(str(err))
return
elif validation == 'loose':
return
# If a conversion occurred during validation, we recurse
if stims is not validated_stim:
return self.transform(validated_stim, *args, **kwargs)
else:
result = self._transform(validated_stim, *args, **kwargs)
result = _log_transformation(validated_stim, result, self)
if isgenerator(result):
result = list(result)
self._propagate_context(validated_stim, result)
return result
def _validate(self, stim):
# Checks whether the current Transformer can handle the passed Stim.
# If not, attempts a dynamic conversion before failing.
if not self._stim_matches_input_types(stim):
from pliers.converters.base import get_converter
in_type = self._input_type if self._input_type \
else self._optional_input_type
converter = get_converter(type(stim), in_type)
if converter:
_old_stim = stim
stim = converter.transform(stim)
stim = _log_transformation(_old_stim, stim, converter, True)
else:
msg = ("Transformers of type %s can only be applied to stimuli"
" of type(s) %s (not type %s), and no applicable "
"Converter was found.")
msg = msg % (self.__class__.__name__, in_type,
stim.__class__.__name__)
raise TypeError(msg)
return stim
def _stim_matches_input_types(self, stim):
# Checks if passed Stim meets all _input_type and _optional_input_type
# specifications.
mandatory = tuple(listify(self._input_type))
optional = tuple(listify(self._optional_input_type))
if isinstance(stim, CompoundStim):
return stim.has_types(mandatory) or \
(not mandatory and stim.has_types(optional, False))
if len(mandatory) > 1:
msg = ("Transformer of class %s requires multiple mandatory "
"inputs, so the passed input Stim must be a CompoundStim"
"--which it isn't." % self.__class__.__name__)
logging.warning(msg)
return False
return isinstance(stim, mandatory) or (not mandatory and
isinstance(stim, optional))
def _iterate(self, stims, *args, **kwargs):
if config.get_option('parallelize') and multiprocessing is not None:
def _transform(s):
return self.transform(s, *args, **kwargs)
n_jobs = config.get_option('n_jobs')
return multiprocessing.ProcessingPool(n_jobs) \
.map(_transform, stims)
return (t for t in (self.transform(s, *args, **kwargs)
for s in stims) if t)
def _propagate_context(self, stim, result):
if isiterable(result):
for r in result:
self._propagate_context(stim, r)
else:
if result.onset is None:
result.onset = stim.onset
if result.duration is None:
result.duration = stim.duration
if result.order is None:
result.order = stim.order
@abstractmethod
def _transform(self, stim):
pass
@abstractproperty
def _input_type(self):
pass
def __hash__(self):
tr_attrs = [getattr(self, attr) for attr in self._log_attributes]
return hash(self.name + str(dict(zip(self._log_attributes, tr_attrs))))
class BatchTransformerMixin(Transformer):
''' A mixin that overrides the default implicit iteration behavior. Use
whenever batch processing of multiple stimuli should be handled within the
_transform method rather than applying a naive loop--e.g., for API
Extractors that can handle list inputs.
Args:
batch_size (int): Number of Stims to process in each batch.
args, kwargs: Optional positional and keyword arguments to pass onto
the base Transformer initializer.
'''
def __init__(self, batch_size=None, *args, **kwargs):
if batch_size:
self._batch_size = batch_size
super(BatchTransformerMixin, self).__init__(*args, **kwargs)
def _iterate(self, stims, validation='strict', *args, **kwargs):
batches = batch_iterable(stims, self._batch_size)
results = []
for batch in progress_bar_wrapper(batches):
use_cache = config.get_option('cache_transformers')
target_inds = {}
non_cached = []
for stim in batch:
key = hash((hash(self), hash(stim)))
# If using the cache, only transform stims that aren't in the
# cache and haven't already appeared in the batch
if not (use_cache and (key in _cache or key in target_inds)):
target_inds[key] = len(non_cached)
non_cached.append(stim)
# _transform will likely fail if given an empty list
if len(non_cached) > 0:
batch_results = self._transform(non_cached, *args, **kwargs)
else:
batch_results = []
for i, stim in enumerate(batch):
key = hash((hash(self), hash(stim)))
# Use the target index to get the result from batch_results
if key in target_inds:
result = batch_results[target_inds[key]]
result = _log_transformation(stim, result, self)
self._propagate_context(stim, result)
if use_cache:
if isgenerator(result):
result = list(result)
_cache[key] = result
results.append(result)
# Otherwise, the result should be in the cache
else:
results.append(_cache[key])
return results
def _transform(self, stim, *args, **kwargs):
stims = listify(stim)
if all(self._stim_matches_input_types(s) for s in stims):
result = super(BatchTransformerMixin, self) \
._transform(stims, *args, **kwargs)
if isiterable(stim):
return result
else:
return result[0]
else:
return list(super(BatchTransformerMixin, self)
._iterate(stims, *args, **kwargs))
def get_transformer(name, base=None, *args, **kwargs):
''' Scans list of currently available Transformer classes and returns an
instantiation of the first one whose name perfectly matches
(case-insensitive).
Args:
name (str): The name of the transformer to retrieve. Case-insensitive;
e.g., 'stftextractor' or 'CornerDetectionExtractor'.
base (str, list): Optional name of transformer modules to search.
Valid values are 'converters', 'extractors', and 'filters'.
args, kwargs: Optional positional or keyword arguments to pass onto
the Transformer.
'''
name = name.lower()
# Default to searching all kinds of Transformers
if base is None:
base = ['extractors', 'converters', 'filters']
base = listify(base)
for b in base:
importlib.import_module('pliers.%s' % b)
mod = getattr(pliers, b)
classes = getattr(mod, '__all__')
for cls_name in classes:
if cls_name.lower() == name.lower():
cls = getattr(mod, cls_name)
return cls(*args, **kwargs)
raise KeyError("No transformer named '%s' found." % name)
|
the-stack_106_13247
|
import api
import json
from flask import Flask
from flask import request
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
@app.route('/')
def hello_world():
return json.dumps(api.get_containers())
@app.route('/<id>', methods=['PUT'])
def start(id):
if request.method == 'PUT':
print('start')
print(request.data)
# api.start_container(id)
return json.dumps({"test": "tre"})
# return json.dumps(api.stop_container(id))
# # Press the green button in the gutter to run the script.
if __name__ == '__main__':
app.run()
|
the-stack_106_13249
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import io
import os
from setuptools import find_packages, setup
# Package meta-data.
NAME = 'sqlflow'
DESCRIPTION = 'SQLFlow client library for Python.'
URL = 'https://github.com/sql-machine-learning/sqlflow'
EMAIL = '[email protected]'
AUTHOR = 'Kuisong Tong'
REQUIRES_PYTHON = '>=3.5.0'
VERSION = None
# What packages are required for this module to be executed?
REQUIRED = [
'protobuf >=3.6, <4',
'grpcio >=1.17, <2',
'ipython>=1.0',
'prettytable',
]
SETUP_REQUIRED = [
'pytest-runner'
]
TEST_REQUIRED = [
'pytest',
]
# What packages are optional?
EXTRAS = {
}
# The rest you shouldn't have to touch too much :)
# ------------------------------------------------
# Except, perhaps the License and Trove Classifiers!
# If you do change the License, remember to change the Trove Classifier for that!
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.md' is present in your MANIFEST.in file!
try:
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
# Load the package's _version.py module as a dictionary.
about = {}
if not VERSION:
with open(os.path.join(here, NAME, '_version.py')) as f:
exec(f.read(), about)
else:
about['__version__'] = VERSION
# Where the magic happens:
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(exclude=('tests',)),
package_data={'sqlflow': ['proto/*.py']},
entry_points={
'console_scripts': ['sqlflow = sqlflow.__main__:main'],
},
install_requires=REQUIRED,
setup_requires=SETUP_REQUIRED,
tests_require=TEST_REQUIRED,
extras_require=EXTRAS,
license='Apache License 2.0',
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
)
|
the-stack_106_13251
|
import os
import bpy
import colorsys
from mathutils import Matrix
from mathutils import Vector
from photogrammetry_importer.blender_utility.object_utility import (
add_collection,
add_obj,
)
from photogrammetry_importer.opengl.utility import draw_coords
from photogrammetry_importer.utility.timing_utility import StopWatch
from photogrammetry_importer.utility.type_utility import is_int
from photogrammetry_importer.blender_utility.logging_utility import log_report
def compute_principal_point_shift(camera, relativ_to_largest_extend):
"""Return the shift of the principal point in the 3D view port."""
# https://blender.stackexchange.com/questions/58235/what-are-the-units-for-camera-shift
width = camera.width
height = camera.height
p_x, p_y = camera.get_principal_point()
if relativ_to_largest_extend:
width_denominator = max(width, height)
height_denominator = max(width, height)
else:
width_denominator = width
height_denominator = height
# Note, that the direction of the y coordinate is inverted - reflecting the
# difference between computer vision vs computer graphics coordinate
# system.
shift_x = float((width / 2.0 - p_x) / float(width_denominator))
shift_y = -float((height / 2.0 - p_y) / float(height_denominator))
# log_report('INFO', 'shift_x: ' + str(shift_x), op)
# log_report('INFO', 'shift_y: ' + str(shift_y), op)
return shift_x, shift_y
def adjust_render_settings_if_possible(cameras, op=None):
"""Adjust the render settings according to the camera parameters."""
if len(cameras) == 0:
return
possible = True
width = cameras[0].width
height = cameras[0].height
# Check if the cameras have same resolution
for cam in cameras:
if cam.width != width or cam.height != height:
possible = False
break
if possible:
bpy.context.scene.render.resolution_x = width
bpy.context.scene.render.resolution_y = height
else:
log_report(
"WARNING",
"Adjustment of render settings not possible, "
+ "since the reconstructed cameras show different resolutions.",
op,
)
def _add_camera_data(camera, camera_name):
"""Add a camera as Blender data entity."""
bcamera = bpy.data.cameras.new(camera_name)
if camera.is_panoramic():
bcamera.type = "PANO"
bcamera.cycles.panorama_type = camera.get_panoramic_type()
# Adjust field of view
bcamera.angle = camera.get_field_of_view()
bcamera.shift_x, bcamera.shift_y = compute_principal_point_shift(
camera, relativ_to_largest_extend=True
)
return bcamera
def add_camera_object(
camera, camera_name, camera_collection, copy_matrix_world=True
):
"""Add a camera as Blender object."""
bcamera = _add_camera_data(camera, camera_name)
camera_object = add_obj(bcamera, camera_name, camera_collection)
if copy_matrix_world:
camera_object.matrix_world = compute_camera_matrix_world(camera)
return camera_object
def _color_from_value(val, min_val, max_val):
# source: http://stackoverflow.com/questions/10901085/range-values-to-pseudocolor
# convert val in range minval..maxval to the range 0..120 degrees which
# correspond to the colors red..green in the HSV colorspace
h = (float(val - min_val) / (max_val - min_val)) * 120
# convert hsv color (h, 1, 1) to its rgb equivalent
# note: the hsv_to_rgb() function expects h to be in the range 0..1 and not
# in 0..360
r, g, b = colorsys.hsv_to_rgb(h / 360, 1.0, 1.0)
return r, g, b, 1
def _get_camera_obj_gui_str(camera):
"""Get a string suitable for Blender's GUI describing the camera."""
# Replace special characters
# image_fp_clean = image_fp.replace("/", "_").replace("\\", "_").replace(":", "_")
image_fp_stem = os.path.splitext(camera.get_relative_fp())[0]
# Blender supports only object names with length 63
# However, we need also space for additional suffixes
image_fp_suffix = image_fp_stem[-40:]
return image_fp_suffix
def invert_y_and_z_axis(input_matrix_or_vector):
"""Invert the y and z axis of a given matrix or vector.
Many SfM / MVS libraries use coordinate systems that differ from Blender's
coordinate system in the y and the z coordinate. This function inverts the
y and the z coordinates in the corresponding matrix / vector entries, which
is equivalent to a rotation by 180 degree around the x axis.
"""
output_matrix_or_vector = input_matrix_or_vector.copy()
output_matrix_or_vector[1] = -output_matrix_or_vector[1]
output_matrix_or_vector[2] = -output_matrix_or_vector[2]
return output_matrix_or_vector
def _get_world_matrix_from_translation_vec(translation_vec, rotation):
t = Vector(translation_vec).to_4d()
camera_rotation = Matrix()
for row in range(3):
camera_rotation[row][0:3] = rotation[row]
camera_rotation.transpose() # = Inverse rotation
# Camera position in world coordinates
camera_center = -(camera_rotation @ t)
camera_center[3] = 1.0
camera_rotation = camera_rotation.copy()
camera_rotation.col[
3
] = camera_center # Set translation to camera position
return camera_rotation
def compute_camera_matrix_world(camera, convert_coordinate_system=True):
"""Compute Blender's :code:`matrix_world` for a given camera."""
translation_vec = camera.get_translation_vec()
rotation_mat = camera.get_rotation_as_rotation_mat()
if convert_coordinate_system:
# Transform the camera coordinate system from computer vision camera
# coordinate frames to the computer vision camera coordinate frames.
# That is, rotate the camera matrix around the x axis by 180 degrees,
# i.e. invert the x and y axis.
rotation_mat = invert_y_and_z_axis(rotation_mat)
translation_vec = invert_y_and_z_axis(translation_vec)
return _get_world_matrix_from_translation_vec(
translation_vec, rotation_mat
)
def add_cameras(
cameras,
parent_collection,
add_background_images=False,
add_image_planes=False,
add_depth_maps_as_point_cloud=True,
convert_camera_coordinate_system=True,
camera_collection_name="Cameras",
image_plane_collection_name="Image Planes",
depth_map_collection_name="Depth Maps",
camera_scale=1.0,
image_plane_transparency=0.5,
add_image_plane_emission=True,
depth_map_point_size=1,
use_default_depth_map_color=False,
depth_map_default_color=(1.0, 0.0, 0.0),
depth_map_display_sparsity=10,
depth_map_id_or_name_str="",
op=None,
):
"""Add a set of reconstructed cameras to Blender's 3D view port."""
log_report("INFO", "Adding Cameras: ...", op)
stop_watch = StopWatch()
camera_collection = add_collection(
camera_collection_name, parent_collection
)
if add_image_planes:
log_report("INFO", "Adding image planes: True", op)
image_planes_collection = add_collection(
image_plane_collection_name, parent_collection
)
camera_image_plane_pair_collection = add_collection(
"Camera Image Plane Pair Collection", parent_collection
)
else:
log_report("INFO", "Adding image planes: False", op)
if add_depth_maps_as_point_cloud:
log_report("INFO", "Adding depth maps as point cloud: True", op)
depth_map_collection = add_collection(
depth_map_collection_name, parent_collection
)
camera_depth_map_pair_collection = add_collection(
"Camera Depth Map Pair Collection", parent_collection
)
else:
log_report("INFO", "Adding depth maps as point cloud: False", op)
depth_map_id_or_name_str = depth_map_id_or_name_str.rstrip()
if depth_map_id_or_name_str == "":
depth_map_indices = None
else:
depth_map_indices = []
cam_rel_fp_to_idx = {}
for idx, camera in enumerate(cameras):
rel_fp = camera.get_relative_fp()
cam_rel_fp_to_idx[rel_fp] = idx
for id_or_name in depth_map_id_or_name_str.split(" "):
if is_int(id_or_name):
depth_map_indices.append(int(id_or_name))
else:
if id_or_name in cam_rel_fp_to_idx:
depth_map_indices.append(cam_rel_fp_to_idx[id_or_name])
else:
log_report(
"WARNING",
"Could not find depth map name "
+ id_or_name
+ ". "
+ "Possible values are: "
+ str(cam_rel_fp_to_idx.keys()),
op,
)
# Adding cameras and image planes:
for index, camera in enumerate(cameras):
# camera_name = "Camera %d" % index # original code
# Replace the camera name so it matches the image name (without extension)
blender_image_name_stem = _get_camera_obj_gui_str(camera)
camera_name = blender_image_name_stem + "_cam"
camera_object = add_camera_object(
camera, camera_name, camera_collection
)
camera_object.scale *= camera_scale
if not add_image_planes and not add_background_images:
continue
if camera.has_undistorted_absolute_fp():
image_path = camera.get_undistorted_absolute_fp()
else:
image_path = camera.get_absolute_fp()
if not os.path.isfile(image_path):
log_report(
"WARNING", "Could not find image at " + str(image_path), op
)
continue
else:
log_report("INFO", "Found image at " + str(image_path), op)
blender_image = bpy.data.images.load(image_path)
if add_background_images:
camera_data = bpy.data.objects[camera_name].data
camera_data.show_background_images = True
background_image = camera_data.background_images.new()
background_image.image = blender_image
if add_image_planes and not camera.is_panoramic():
# Group image plane and camera:
camera_image_plane_pair_collection_current = add_collection(
"Camera Image Plane Pair Collection %s"
% blender_image_name_stem,
camera_image_plane_pair_collection,
)
image_plane_name = blender_image_name_stem + "_image_plane"
image_plane_obj = add_camera_image_plane(
camera_object.matrix_world,
blender_image,
camera=camera,
name=image_plane_name,
transparency=image_plane_transparency,
add_image_plane_emission=add_image_plane_emission,
image_planes_collection=image_planes_collection,
op=op,
)
camera_image_plane_pair_collection_current.objects.link(
camera_object
)
camera_image_plane_pair_collection_current.objects.link(
image_plane_obj
)
if not add_depth_maps_as_point_cloud:
continue
if camera.get_depth_map_fp() is None:
continue
if depth_map_indices is not None:
if index not in depth_map_indices:
continue
# Group image plane and camera:
camera_depth_map_pair_collection_current = add_collection(
"Camera Depth Map Pair Collection %s"
% os.path.basename(camera.get_depth_map_fp()),
camera_depth_map_pair_collection,
)
depth_map_world_coords = camera.convert_depth_map_to_world_coords(
depth_map_display_sparsity=depth_map_display_sparsity
)
depth_map_world_coords = depth_map_world_coords.tolist()
if use_default_depth_map_color:
color = depth_map_default_color
else:
color = _color_from_value(
val=index, min_val=0, max_val=len(cameras)
)
depth_map_anchor_handle = draw_coords(
depth_map_world_coords,
color=color,
point_size=depth_map_point_size,
add_points_to_point_cloud_handle=True,
reconstruction_collection=depth_map_collection,
object_anchor_handle_name=_get_camera_obj_gui_str(camera)
+ "_depth_point_cloud",
op=op,
)
camera_depth_map_pair_collection_current.objects.link(camera_object)
camera_depth_map_pair_collection_current.objects.link(
depth_map_anchor_handle
)
log_report("INFO", "Duration: " + str(stop_watch.get_elapsed_time()), op)
log_report("INFO", "Adding Cameras: Done", op)
def add_camera_image_plane(
matrix_world,
blender_image,
camera,
name,
transparency,
add_image_plane_emission,
image_planes_collection,
op=None,
):
"""Add an image plane corresponding to a reconstructed camera."""
# log_report('INFO', 'add_camera_image_plane: ...', op)
# log_report('INFO', 'name: ' + str(name), op)
width = camera.width
height = camera.height
focal_length = camera.get_focal_length()
assert width is not None and height is not None
bpy.context.scene.render.engine = "CYCLES"
mesh = bpy.data.meshes.new(name)
mesh.update()
mesh.validate()
plane_distance = 1.0 # Distance from camera position
# Right vector in view frustum at plane_distance:
right = Vector((1, 0, 0)) * (width / focal_length) * plane_distance
# Up vector in view frustum at plane_distance:
up = Vector((0, 1, 0)) * (height / focal_length) * plane_distance
# Camera view direction:
view_dir = -Vector((0, 0, 1)) * plane_distance
plane_center = view_dir
shift_x, shift_y = compute_principal_point_shift(
camera, relativ_to_largest_extend=False
)
corners = ((-0.5, -0.5), (+0.5, -0.5), (+0.5, +0.5), (-0.5, +0.5))
points = [
(plane_center + (c[0] + shift_x) * right + (c[1] + shift_y) * up)[0:3]
for c in corners
]
mesh.from_pydata(points, [], [[0, 1, 2, 3]])
mesh.uv_layers.new()
# Add mesh to new image plane object:
mesh_obj = add_obj(mesh, name, image_planes_collection)
image_plane_material = bpy.data.materials.new(name="image_plane_material")
# Adds "Principled BSDF" and a "Material Output" node
image_plane_material.use_nodes = True
nodes = image_plane_material.node_tree.nodes
links = image_plane_material.node_tree.links
shader_node_tex_image = nodes.new(type="ShaderNodeTexImage")
shader_node_principled_bsdf = nodes.get("Principled BSDF")
shader_node_principled_bsdf.inputs["Alpha"].default_value = transparency
links.new(
shader_node_tex_image.outputs["Color"],
shader_node_principled_bsdf.inputs["Base Color"],
)
if add_image_plane_emission:
links.new(
shader_node_tex_image.outputs["Color"],
shader_node_principled_bsdf.inputs["Emission"],
)
shader_node_tex_image.image = blender_image
# Assign it to object
if mesh_obj.data.materials:
# assign to 1st material slot
mesh_obj.data.materials[0] = image_plane_material
else:
# no slots
mesh_obj.data.materials.append(image_plane_material)
mesh_obj.matrix_world = matrix_world
mesh.update()
mesh.validate()
# log_report('INFO', 'add_camera_image_plane: Done', op)
return mesh_obj
|
the-stack_106_13252
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: oesteban
# @Date: 2015-11-19 16:44:27
""" smriprep setup script """
def main():
""" Install entry-point """
from pathlib import Path
from inspect import getfile, currentframe
from setuptools import setup, find_packages
from smriprep.__about__ import (
__package__,
__version__,
__author__,
__email__,
__maintainer__,
__license__,
__description__,
__longdesc__,
__url__,
DOWNLOAD_URL,
CLASSIFIERS,
REQUIRES,
SETUP_REQUIRES,
LINKS_REQUIRES,
TESTS_REQUIRES,
EXTRA_REQUIRES,
)
pkg_data = {
__package__: [
'data/*.json',
'data/*.nii.gz',
'data/*.mat',
'data/boilerplate.bib',
'data/itkIdentityTransform.txt',
'data/reports/config.json',
'data/reports/report.tpl',
]
}
version = None
cmdclass = {}
root_dir = Path(getfile(currentframe())).resolve().parent
verfile = root_dir / __package__ / 'VERSION'
if verfile.is_file():
version = verfile.read_text().splitlines()[0].strip()
pkg_data[__package__].insert(0, 'VERSION')
if version is None:
import versioneer
version = versioneer.get_version()
cmdclass = versioneer.get_cmdclass()
setup(
name=__package__,
version=__version__,
description=__description__,
long_description=__longdesc__,
author=__author__,
author_email=__email__,
maintainer=__maintainer__,
maintainer_email=__email__,
url=__url__,
license=__license__,
classifiers=CLASSIFIERS,
download_url=DOWNLOAD_URL,
# Dependencies handling
setup_requires=SETUP_REQUIRES,
install_requires=REQUIRES,
tests_require=TESTS_REQUIRES,
extras_require=EXTRA_REQUIRES,
dependency_links=LINKS_REQUIRES,
package_data=pkg_data,
entry_points={'console_scripts': [
'smriprep=smriprep.cli.run:main',
]},
packages=find_packages(exclude=("tests",)),
zip_safe=False,
cmdclass=cmdclass,
)
if __name__ == '__main__':
main()
|
the-stack_106_13253
|
import unittest
from cloudsplaining.shared.utils import remove_wildcard_only_actions, remove_read_level_actions
class TestUtils(unittest.TestCase):
def test_remove_wildcard_only_actions(self):
actions = [
# 3 wildcard only actions
"secretsmanager:createsecret",
"secretsmanager:getrandompassword",
"secretsmanager:listsecrets",
# This one is wildcard OR "secret"
"secretsmanager:putsecretvalue",
]
results = remove_wildcard_only_actions(actions)
# print(results)
self.assertListEqual(results, ["secretsmanager:PutSecretValue"])
def test_remove_read_level_actions(self):
actions = [
"ssm:GetParameters",
"ecr:PutImage"
]
result = remove_read_level_actions(actions)
expected_result = ['ecr:PutImage']
self.assertListEqual(result, expected_result)
|
the-stack_106_13255
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import concurrent.futures
import logging
import random
import time
from itertools import repeat
from typing import Any, Dict, List, Optional, cast
from botocore.exceptions import ClientError
from aws_orbit.utils import boto3_client, boto3_resource, chunkify
_logger: logging.Logger = logging.getLogger(__name__)
def list_keys(bucket: str) -> List[Dict[str, str]]:
client_s3 = boto3_client("s3")
paginator = client_s3.get_paginator("list_object_versions")
response_iterator = paginator.paginate(Bucket=bucket, PaginationConfig={"PageSize": 1000})
keys: List[Dict[str, str]] = []
for page in response_iterator:
if "DeleteMarkers" in page:
for delete_marker in page["DeleteMarkers"]:
keys.append(
{
"Key": delete_marker["Key"],
"VersionId": delete_marker["VersionId"],
}
)
if "Versions" in page:
for version in page["Versions"]:
keys.append({"Key": version["Key"], "VersionId": version["VersionId"]})
return keys
def _delete_objects(bucket: str, chunk: List[Dict[str, str]]) -> None:
client_s3 = boto3_client("s3")
try:
client_s3.delete_objects(Bucket=bucket, Delete={"Objects": chunk})
except client_s3.exceptions.ClientError as ex:
if "SlowDown" in str(ex):
time.sleep(random.randint(3, 10))
client_s3.delete_objects(Bucket=bucket, Delete={"Objects": chunk})
def delete_objects(bucket: str, keys: Optional[List[str]] = None) -> None:
if keys is None:
keys_pairs: List[Dict[str, str]] = list_keys(bucket=bucket)
else:
keys_pairs = [{"Key": k} for k in keys]
if keys_pairs:
chunks: List[List[Dict[str, str]]] = chunkify(lst=keys_pairs, max_length=1_000)
with concurrent.futures.ThreadPoolExecutor(max_workers=len(chunks)) as executor:
list(executor.map(_delete_objects, repeat(bucket), chunks))
def delete_bucket(bucket: str) -> None:
client_s3 = boto3_client("s3")
try:
_logger.debug("Cleaning up bucket: %s", bucket)
delete_objects(bucket=bucket)
_logger.debug("Deleting bucket: %s", bucket)
client_s3.delete_bucket(Bucket=bucket)
except Exception as ex:
if "NoSuchBucket" in str(ex):
_logger.debug(f"Bucket ({bucket}) does not exist, skipping")
return
else:
raise ex
def upload_file(src: str, bucket: str, key: str) -> None:
client_s3 = boto3_client("s3")
client_s3.upload_file(Filename=src, Bucket=bucket, Key=key)
def list_s3_objects(bucket: str, key: str) -> Dict[str, Any]:
client_s3 = boto3_client("s3")
response = client_s3.list_objects_v2(Bucket=bucket, Prefix=key)
return cast(Dict[str, Any], response)
def delete_bucket_by_prefix(prefix: str) -> None:
client_s3 = boto3_client("s3")
for bucket in client_s3.list_buckets()["Buckets"]:
if bucket["Name"].startswith(prefix):
delete_bucket(bucket=bucket["Name"])
def object_exists(bucket: str, key: str) -> bool:
try:
boto3_resource("s3").Object(bucket, key).load()
except ClientError as e:
if e.response["Error"]["Code"] == "404":
return False
else:
raise
else:
return True
|
the-stack_106_13258
|
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import html
import http.client
import logging
import types
import urllib
from io import BytesIO
from canonicaljson import encode_canonical_json, encode_pretty_printed_json, json
from twisted.internet import defer
from twisted.python import failure
from twisted.web import resource
from twisted.web.server import NOT_DONE_YET
from twisted.web.static import NoRangeStaticProducer
from twisted.web.util import redirectTo
import synapse.events
import synapse.metrics
from synapse.api.errors import (
CodeMessageException,
Codes,
RedirectException,
SynapseError,
UnrecognizedRequestError,
)
from synapse.logging.context import preserve_fn
from synapse.logging.opentracing import trace_servlet
from synapse.util.caches import intern_dict
logger = logging.getLogger(__name__)
HTML_ERROR_TEMPLATE = """<!DOCTYPE html>
<html lang=en>
<head>
<meta charset="utf-8">
<title>Error {code}</title>
</head>
<body>
<p>{msg}</p>
</body>
</html>
"""
def wrap_json_request_handler(h):
"""Wraps a request handler method with exception handling.
Also does the wrapping with request.processing as per wrap_async_request_handler.
The handler method must have a signature of "handle_foo(self, request)",
where "request" must be a SynapseRequest.
The handler must return a deferred or a coroutine. If the deferred succeeds
we assume that a response has been sent. If the deferred fails with a SynapseError we use
it to send a JSON response with the appropriate HTTP reponse code. If the
deferred fails with any other type of error we send a 500 reponse.
"""
async def wrapped_request_handler(self, request):
try:
await h(self, request)
except SynapseError as e:
code = e.code
logger.info("%s SynapseError: %s - %s", request, code, e.msg)
# Only respond with an error response if we haven't already started
# writing, otherwise lets just kill the connection
if request.startedWriting:
if request.transport:
try:
request.transport.abortConnection()
except Exception:
# abortConnection throws if the connection is already closed
pass
else:
respond_with_json(
request,
code,
e.error_dict(),
send_cors=True,
pretty_print=_request_user_agent_is_curl(request),
)
except Exception:
# failure.Failure() fishes the original Failure out
# of our stack, and thus gives us a sensible stack
# trace.
f = failure.Failure()
logger.error(
"Failed handle request via %r: %r",
request.request_metrics.name,
request,
exc_info=(f.type, f.value, f.getTracebackObject()),
)
# Only respond with an error response if we haven't already started
# writing, otherwise lets just kill the connection
if request.startedWriting:
if request.transport:
try:
request.transport.abortConnection()
except Exception:
# abortConnection throws if the connection is already closed
pass
else:
respond_with_json(
request,
500,
{"error": "Internal server error", "errcode": Codes.UNKNOWN},
send_cors=True,
pretty_print=_request_user_agent_is_curl(request),
)
return wrap_async_request_handler(wrapped_request_handler)
def wrap_html_request_handler(h):
"""Wraps a request handler method with exception handling.
Also does the wrapping with request.processing as per wrap_async_request_handler.
The handler method must have a signature of "handle_foo(self, request)",
where "request" must be a SynapseRequest.
"""
async def wrapped_request_handler(self, request):
try:
return await h(self, request)
except Exception:
f = failure.Failure()
return _return_html_error(f, request)
return wrap_async_request_handler(wrapped_request_handler)
def _return_html_error(f, request):
"""Sends an HTML error page corresponding to the given failure
Args:
f (twisted.python.failure.Failure):
request (twisted.web.server.Request):
"""
if f.check(CodeMessageException):
cme = f.value
code = cme.code
msg = cme.msg
if isinstance(cme, RedirectException):
logger.info("%s redirect to %s", request, cme.location)
request.setHeader(b"location", cme.location)
request.cookies.extend(cme.cookies)
elif isinstance(cme, SynapseError):
logger.info("%s SynapseError: %s - %s", request, code, msg)
else:
logger.error(
"Failed handle request %r",
request,
exc_info=(f.type, f.value, f.getTracebackObject()),
)
else:
code = http.client.INTERNAL_SERVER_ERROR
msg = "Internal server error"
logger.error(
"Failed handle request %r",
request,
exc_info=(f.type, f.value, f.getTracebackObject()),
)
body = HTML_ERROR_TEMPLATE.format(code=code, msg=html.escape(msg)).encode("utf-8")
request.setResponseCode(code)
request.setHeader(b"Content-Type", b"text/html; charset=utf-8")
request.setHeader(b"Content-Length", b"%i" % (len(body),))
request.write(body)
finish_request(request)
def wrap_async_request_handler(h):
"""Wraps an async request handler so that it calls request.processing.
This helps ensure that work done by the request handler after the request is completed
is correctly recorded against the request metrics/logs.
The handler method must have a signature of "handle_foo(self, request)",
where "request" must be a SynapseRequest.
The handler may return a deferred, in which case the completion of the request isn't
logged until the deferred completes.
"""
async def wrapped_async_request_handler(self, request):
with request.processing():
await h(self, request)
# we need to preserve_fn here, because the synchronous render method won't yield for
# us (obviously)
return preserve_fn(wrapped_async_request_handler)
class HttpServer(object):
""" Interface for registering callbacks on a HTTP server
"""
def register_paths(self, method, path_patterns, callback):
""" Register a callback that gets fired if we receive a http request
with the given method for a path that matches the given regex.
If the regex contains groups these gets passed to the calback via
an unpacked tuple.
Args:
method (str): The method to listen to.
path_patterns (list<SRE_Pattern>): The regex used to match requests.
callback (function): The function to fire if we receive a matched
request. The first argument will be the request object and
subsequent arguments will be any matched groups from the regex.
This should return a tuple of (code, response).
"""
pass
class JsonResource(HttpServer, resource.Resource):
""" This implements the HttpServer interface and provides JSON support for
Resources.
Register callbacks via register_paths()
Callbacks can return a tuple of status code and a dict in which case the
the dict will automatically be sent to the client as a JSON object.
The JsonResource is primarily intended for returning JSON, but callbacks
may send something other than JSON, they may do so by using the methods
on the request object and instead returning None.
"""
isLeaf = True
_PathEntry = collections.namedtuple(
"_PathEntry", ["pattern", "callback", "servlet_classname"]
)
def __init__(self, hs, canonical_json=True):
resource.Resource.__init__(self)
self.canonical_json = canonical_json
self.clock = hs.get_clock()
self.path_regexs = {}
self.hs = hs
def register_paths(
self, method, path_patterns, callback, servlet_classname, trace=True
):
"""
Registers a request handler against a regular expression. Later request URLs are
checked against these regular expressions in order to identify an appropriate
handler for that request.
Args:
method (str): GET, POST etc
path_patterns (Iterable[str]): A list of regular expressions to which
the request URLs are compared.
callback (function): The handler for the request. Usually a Servlet
servlet_classname (str): The name of the handler to be used in prometheus
and opentracing logs.
trace (bool): Whether we should start a span to trace the servlet.
"""
method = method.encode("utf-8") # method is bytes on py3
if trace:
# We don't extract the context from the servlet because we can't
# trust the sender
callback = trace_servlet(servlet_classname)(callback)
for path_pattern in path_patterns:
logger.debug("Registering for %s %s", method, path_pattern.pattern)
self.path_regexs.setdefault(method, []).append(
self._PathEntry(path_pattern, callback, servlet_classname)
)
def render(self, request):
""" This gets called by twisted every time someone sends us a request.
"""
defer.ensureDeferred(self._async_render(request))
return NOT_DONE_YET
@wrap_json_request_handler
async def _async_render(self, request):
""" This gets called from render() every time someone sends us a request.
This checks if anyone has registered a callback for that method and
path.
"""
callback, servlet_classname, group_dict = self._get_handler_for_request(request)
# Make sure we have a name for this handler in prometheus.
request.request_metrics.name = servlet_classname
# Now trigger the callback. If it returns a response, we send it
# here. If it throws an exception, that is handled by the wrapper
# installed by @request_handler.
kwargs = intern_dict(
{
name: urllib.parse.unquote(value) if value else value
for name, value in group_dict.items()
}
)
callback_return = callback(request, **kwargs)
# Is it synchronous? We'll allow this for now.
if isinstance(callback_return, (defer.Deferred, types.CoroutineType)):
callback_return = await callback_return
if callback_return is not None:
code, response = callback_return
self._send_response(request, code, response)
def _get_handler_for_request(self, request):
"""Finds a callback method to handle the given request
Args:
request (twisted.web.http.Request):
Returns:
Tuple[Callable, str, dict[unicode, unicode]]: callback method, the
label to use for that method in prometheus metrics, and the
dict mapping keys to path components as specified in the
handler's path match regexp.
The callback will normally be a method registered via
register_paths, so will return (possibly via Deferred) either
None, or a tuple of (http code, response body).
"""
if request.method == b"OPTIONS":
return _options_handler, "options_request_handler", {}
# Loop through all the registered callbacks to check if the method
# and path regex match
for path_entry in self.path_regexs.get(request.method, []):
m = path_entry.pattern.match(request.path.decode("ascii"))
if m:
# We found a match!
return path_entry.callback, path_entry.servlet_classname, m.groupdict()
# Huh. No one wanted to handle that? Fiiiiiine. Send 400.
return _unrecognised_request_handler, "unrecognised_request_handler", {}
def _send_response(
self, request, code, response_json_object, response_code_message=None
):
# TODO: Only enable CORS for the requests that need it.
respond_with_json(
request,
code,
response_json_object,
send_cors=True,
response_code_message=response_code_message,
pretty_print=_request_user_agent_is_curl(request),
canonical_json=self.canonical_json,
)
class DirectServeResource(resource.Resource):
def render(self, request):
"""
Render the request, using an asynchronous render handler if it exists.
"""
async_render_callback_name = "_async_render_" + request.method.decode("ascii")
# Try and get the async renderer
callback = getattr(self, async_render_callback_name, None)
# No async renderer for this request method.
if not callback:
return super().render(request)
resp = trace_servlet(self.__class__.__name__)(callback)(request)
# If it's a coroutine, turn it into a Deferred
if isinstance(resp, types.CoroutineType):
defer.ensureDeferred(resp)
return NOT_DONE_YET
def _options_handler(request):
"""Request handler for OPTIONS requests
This is a request handler suitable for return from
_get_handler_for_request. It returns a 200 and an empty body.
Args:
request (twisted.web.http.Request):
Returns:
Tuple[int, dict]: http code, response body.
"""
return 200, {}
def _unrecognised_request_handler(request):
"""Request handler for unrecognised requests
This is a request handler suitable for return from
_get_handler_for_request. It actually just raises an
UnrecognizedRequestError.
Args:
request (twisted.web.http.Request):
"""
raise UnrecognizedRequestError()
class RootRedirect(resource.Resource):
"""Redirects the root '/' path to another path."""
def __init__(self, path):
resource.Resource.__init__(self)
self.url = path
def render_GET(self, request):
return redirectTo(self.url.encode("ascii"), request)
def getChild(self, name, request):
if len(name) == 0:
return self # select ourselves as the child to render
return resource.Resource.getChild(self, name, request)
def respond_with_json(
request,
code,
json_object,
send_cors=False,
response_code_message=None,
pretty_print=False,
canonical_json=True,
):
# could alternatively use request.notifyFinish() and flip a flag when
# the Deferred fires, but since the flag is RIGHT THERE it seems like
# a waste.
if request._disconnected:
logger.warning(
"Not sending response to request %s, already disconnected.", request
)
return
if pretty_print:
json_bytes = encode_pretty_printed_json(json_object) + b"\n"
else:
if canonical_json or synapse.events.USE_FROZEN_DICTS:
# canonicaljson already encodes to bytes
json_bytes = encode_canonical_json(json_object)
else:
json_bytes = json.dumps(json_object).encode("utf-8")
return respond_with_json_bytes(
request,
code,
json_bytes,
send_cors=send_cors,
response_code_message=response_code_message,
)
def respond_with_json_bytes(
request, code, json_bytes, send_cors=False, response_code_message=None
):
"""Sends encoded JSON in response to the given request.
Args:
request (twisted.web.http.Request): The http request to respond to.
code (int): The HTTP response code.
json_bytes (bytes): The json bytes to use as the response body.
send_cors (bool): Whether to send Cross-Origin Resource Sharing headers
http://www.w3.org/TR/cors/
Returns:
twisted.web.server.NOT_DONE_YET"""
request.setResponseCode(code, message=response_code_message)
request.setHeader(b"Content-Type", b"application/json")
request.setHeader(b"Content-Length", b"%d" % (len(json_bytes),))
request.setHeader(b"Cache-Control", b"no-cache, no-store, must-revalidate")
if send_cors:
set_cors_headers(request)
# todo: we can almost certainly avoid this copy and encode the json straight into
# the bytesIO, but it would involve faffing around with string->bytes wrappers.
bytes_io = BytesIO(json_bytes)
producer = NoRangeStaticProducer(request, bytes_io)
producer.start()
return NOT_DONE_YET
def set_cors_headers(request):
"""Set the CORs headers so that javascript running in a web browsers can
use this API
Args:
request (twisted.web.http.Request): The http request to add CORs to.
"""
request.setHeader(b"Access-Control-Allow-Origin", b"*")
request.setHeader(
b"Access-Control-Allow-Methods", b"GET, POST, PUT, DELETE, OPTIONS"
)
request.setHeader(
b"Access-Control-Allow-Headers",
b"Origin, X-Requested-With, Content-Type, Accept, Authorization",
)
def finish_request(request):
""" Finish writing the response to the request.
Twisted throws a RuntimeException if the connection closed before the
response was written but doesn't provide a convenient or reliable way to
determine if the connection was closed. So we catch and log the RuntimeException
You might think that ``request.notifyFinish`` could be used to tell if the
request was finished. However the deferred it returns won't fire if the
connection was already closed, meaning we'd have to have called the method
right at the start of the request. By the time we want to write the response
it will already be too late.
"""
try:
request.finish()
except RuntimeError as e:
logger.info("Connection disconnected before response was written: %r", e)
def _request_user_agent_is_curl(request):
user_agents = request.requestHeaders.getRawHeaders(b"User-Agent", default=[])
for user_agent in user_agents:
if b"curl" in user_agent:
return True
return False
|
the-stack_106_13260
|
# -*- coding: utf-8 -*-
import logging
import numbers
import os
from decimal import *
from logging.handlers import RotatingFileHandler
import pandas as pd
from zvt import LOG_PATH
getcontext().prec = 16
logger = logging.getLogger(__name__)
none_values = ['不变', '--', '-', '新进']
zero_values = ['不变', '--', '-', '新进']
def first_item_to_float(the_list):
return to_float(the_list[0])
def second_item_to_float(the_list):
return to_float(the_list[1])
def add_func_to_value(the_map, the_func):
for k, v in the_map.items():
the_map[k] = (v, the_func)
return the_map
def to_float(the_str, default=None):
if not the_str:
return default
if the_str in none_values:
return None
if '%' in the_str:
return pct_to_float(the_str)
try:
scale = 1.0
if the_str[-2:] == '万亿':
the_str = the_str[0:-2]
scale = 1000000000000
elif the_str[-1] == '亿':
the_str = the_str[0:-1]
scale = 100000000
elif the_str[-1] == '万':
the_str = the_str[0:-1]
scale = 10000
if not the_str:
return default
return float(Decimal(the_str.replace(',', '')) * Decimal(scale))
except Exception as e:
logger.error('the_str:{}'.format(the_str))
logger.exception(e)
return default
def pct_to_float(the_str, default=None):
if the_str in none_values:
return None
try:
return float(Decimal(the_str.replace('%', '')) / Decimal(100))
except Exception as e:
logger.exception(e)
return default
def json_callback_param(the_str):
return eval(the_str[the_str.index("(") + 1:the_str.index(")")])
def fill_domain_from_dict(the_domain, the_dict: dict, the_map: dict, default_func=lambda x: x):
if not the_map:
the_map = {}
for k in the_dict:
the_map[k] = (k, default_func)
for k, v in the_map.items():
if isinstance(v, tuple):
field_in_dict = v[0]
the_func = v[1]
else:
field_in_dict = v
the_func = default_func
the_value = the_dict.get(field_in_dict)
if the_value is not None:
to_value = the_value
if to_value in none_values:
setattr(the_domain, k, None)
else:
result_value = the_func(to_value)
setattr(the_domain, k, result_value)
exec('the_domain.{}=result_value'.format(k))
def init_process_log(file_name, log_dir=LOG_PATH):
root_logger = logging.getLogger()
# reset the handlers
root_logger.handlers = []
root_logger.setLevel(logging.INFO)
if log_dir:
file_name = os.path.join(log_dir, file_name)
fh = RotatingFileHandler(file_name, maxBytes=524288000, backupCount=10)
fh.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
# create formatter and add it to the handlers
formatter = logging.Formatter(
"%(levelname)s %(threadName)s %(asctime)s %(name)s:%(lineno)s %(funcName)s %(message)s")
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
root_logger.addHandler(fh)
root_logger.addHandler(ch)
SUPPORT_ENCODINGS = ['GB2312', 'GBK', 'GB18030', 'UTF-8']
def read_csv(f, encoding, sep=None, na_values=None):
encodings = [encoding] + SUPPORT_ENCODINGS
for encoding in encodings:
try:
if sep:
return pd.read_csv(f, sep=sep, encoding=encoding, na_values=na_values)
else:
return pd.read_csv(f, encoding=encoding, na_values=na_values)
except UnicodeDecodeError as e:
logger.warning('read_csv failed by using encoding:{}'.format(encoding), e)
f.seek(0)
continue
return None
def to_positive_number(number):
if isinstance(number, numbers.Number):
return abs(number)
return 0
|
the-stack_106_13261
|
from osmhm import (
connect,
db,
send_notification,
queries,
)
import fnmatch
def suspicious_filter(changesets):
"""Set of rudimentary filters towards detecting possibly bad changesets.
1: Large amount of additions
2: Large amount of modifications
3: Large amount of deletions
4: Large amount of combined actions
5: High proportion of deletions
6: High proportion of modifications
"""
whitelist = queries.query_white_list()
conn = connect.connect()
cur = conn.cursor()
for changesetid, changeset in changesets.iteritems():
if changeset['username'] in whitelist:
continue
if changeset['create'] > 1500:
info = (changeset['timestamp'], changesetid,
changeset['username'].encode('utf8'),
1, changeset['create'])
cur.execute("""INSERT INTO history_filters
(timestamp,changeset,username,flag,quantity)
VALUES (%s, %s, %s, %s, %s);""", info)
if changeset['modify'] > 1500:
info = (changeset['timestamp'], changesetid,
changeset['username'].encode('utf8'),
2, changeset['modify'])
cur.execute("""INSERT INTO history_filters
(timestamp,changeset,username,flag,quantity)
VALUES (%s, %s, %s, %s, %s);""", info)
if changeset['delete'] > 1500:
info = (changeset['timestamp'], changesetid,
changeset['username'].encode('utf8'),
3, changeset['delete'])
cur.execute("""INSERT INTO history_filters
(timestamp,changeset,username,flag,quantity)
VALUES (%s, %s, %s, %s, %s);""", info)
if changeset['create'] + changeset['modify'] + changeset['delete'] > 1500:
info = (changeset['timestamp'], changesetid,
changeset['username'].encode('utf8'),
4, changeset['create'] + changeset['modify'] + changeset['delete'])
cur.execute("""INSERT INTO history_filters
(timestamp,changeset,username,flag,quantity)
VALUES (%s, %s, %s, %s, %s);""", info)
if changeset['delete'] > 0 and float(changeset['create']+changeset['modify'])/float(changeset['delete']) < 0.001:
info = (changeset['timestamp'], changesetid,
changeset['username'].encode('utf8'),
5, float(changeset['create']+changeset['modify'])/float(changeset['delete']))
cur.execute("""INSERT INTO history_filters
(timestamp,changeset,username,flag,quantity)
VALUES (%s, %s, %s, %s, %s);""", info)
if changeset['modify'] > 0 and float(changeset['create']+changeset['delete'])/float(changeset['modify']) < 0.001:
info = (changeset['timestamp'], changesetid,
changeset['username'].encode('utf8'),
6, float(changeset['create']+changeset['delete'])/float(changeset['modify']))
cur.execute("""INSERT INTO history_filters
(timestamp,changeset,username,flag,quantity)
VALUES (%s, %s, %s, %s, %s);""", info)
conn.commit()
def user_filter(changesets, notification=False, notifier=send_notification.basic_send_mail):
notify_list = []
watched_users = queries.query_user_list()
conn = connect.connect()
cur = conn.cursor()
if watched_users:
for changesetid, changeset in changesets.iteritems():
for user in watched_users:
if fnmatch.fnmatch(changeset['username'].encode('utf-8'), user['username']):
db.add_watched_user_event(changeset, user['id'])
notify_list.append({'timestamp': changeset['timestamp'], 'changesetid': changesetid,
'username': changeset['username'].encode('utf8'), 'create': changeset['create'],
'modify': changeset['modify'], 'delete': changeset['delete'], 'author': user['author'],
'address': user['email'], 'reason': user['reason']})
if notify_list and notification:
send_notification.send_notification(notify_list, 'user', notifier=notifier)
def user_object_filter(objects, notification=False, notifier=send_notification.basic_send_mail):
notify_list = []
watched_users = queries.query_user_object_list()
conn = connect.connect()
cur = conn.cursor()
if watched_users:
for user in watched_users:
for item_id, item in objects.iteritems():
if fnmatch.fnmatch(item['username'].encode('utf-8'), user['username']):
if item['create'] == 1:
action = 'create'
elif item['modify'] == 1:
action = 'modify'
elif item['delete'] == 1:
action = 'delete'
for item_key in item['tags']:
info = (item['timestamp'], item['changeset'],
item['username'].encode('utf8'), action,
item_key, item['tags'][item_key])
cur.execute("""INSERT INTO history_users_objects
(timestamp,changeset,username,action,key,value)
VALUES (%s, %s, %s, %s, %s, %s);""", info)
conn.commit()
def object_filter(objects, notification=False, notifier=send_notification.basic_send_mail):
notify_list = []
watched_objects = queries.query_object_list()
conn = connect.connect()
cur = conn.cursor()
if watched_objects:
for obj in watched_objects:
for item_id, item in objects.iteritems():
if item_id == obj['element']:
if item['create'] == 1:
item['action'] = 1
elif item['modify'] == 1:
item['action'] = 2
elif item['delete'] == 1:
item['action'] = 4
db.add_watched_object_event(item, obj['id'])
notify_list.append({'timestamp': item['timestamp'], 'changesetid': item['changeset'],
'username': item['username'].encode('utf8'),
'action': item['action'], 'element': item_id,
'author': obj['author'], 'address': obj['email'], 'reason': obj['reason']})
if notify_list and notification:
send_notification.send_notification(notify_list, 'object', notifier=notifier)
def key_filter(objects, notification=False, notifier=send_notification.basic_send_mail):
notify_list = []
watched_keys = queries.query_key_list()
conn = connect.connect()
cur = conn.cursor()
if watched_keys:
for key in watched_keys:
for item_id, item in objects.iteritems():
for item_key in item['tags']:
if fnmatch.fnmatch(item_key,key['key']) and fnmatch.fnmatch(item['tags'][item_key],key['value']):
if item['create'] == 1:
item['action'] = 1
elif item['modify'] == 1:
item['action'] = 2
elif item['delete'] == 1:
item['action'] = 4
db.add_watched_key_event(item, item_key, key['id'])
notify_list.append({'timestamp': item['timestamp'], 'changesetid': item['changeset'],
'username': item['username'].encode('utf8'), 'action': item['action'],
'key': item_key, 'value': item['tags'][item_key],
'author': item['author'], 'address': item['email'], 'reason': item['reason']})
if notify_list and notification:
send_notification.send_notification(notify_list, 'key', notifier=notifier)
|
the-stack_106_13262
|
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from .instance import AbstractBlockchainInstanceProvider
class ObjectCache(dict):
""" This class implements an object/dict cache that comes with an
expiration. Expired items are removed from the cache.
The class implements/extends:
* __setitem__()
* __getitem__()
* __contains__
* __str__()
* get()
and provides a method to define the default expiration time.
"""
def __init__(self, initial_data={}, default_expiration=10, no_overwrite=False):
dict.__init__(self, initial_data)
# Expiration
self.set_expiration(default_expiration)
# This allows nicer testing
self.no_overwrite = no_overwrite
def __setitem__(self, key, value):
if key in self and not self.no_overwrite:
del self[key]
elif key in self and self.no_overwrite:
return
data = {
"expires": datetime.utcnow() + timedelta(seconds=self.default_expiration),
"data": value,
}
dict.__setitem__(self, key, data)
def __getitem__(self, key):
if key in self:
value = dict.__getitem__(self, key)
return value["data"]
def get(self, key, default=None):
""" Returns an element from the cache if available, else returns
the value provided as default or None
"""
if key in self:
return self[key]
else:
return default
def __contains__(self, key):
if dict.__contains__(self, key):
value = dict.__getitem__(self, key)
if datetime.utcnow() < value["expires"]:
return True
else:
# Remove from cache
dict.pop(self, key, None)
return False
def __str__(self):
return "ObjectCache(n={}, default_expiration={})".format(
len(self.keys()), self.default_expiration
)
def set_expiration(self, expiration):
""" Set new default expiration time in seconds (default: 10s)
"""
self.default_expiration = expiration
class Caching:
""" This class implements a few common methods that are used to
either cache lists or dicts
"""
def __init__(self, *args, **kwargs):
self._fetched = False
def _store_item(self, key=None):
if key is None and dict.__contains__(self, "id"):
self._cache[self.get("id")] = self
elif key:
self._cache[key] = self
self._fetched = True
def _store_items(self, key=None):
key = key or self.__class__.__name__
if key in self._cache:
self._cache[key].extend(list(self))
else:
self._cache[key] = list(self)
self._fetched = True
def incached(self, id):
""" Is an element cached?
"""
return id in self._cache
def getfromcache(self, id):
""" Get an element from the cache explicitly
"""
return self._cache.get(id, None)
def __getitem__(self, key):
if not self._fetched:
self.refresh()
return dict.__getitem__(self, key)
def items(self):
""" This overwrites items() so that refresh() is called it the
object is not already fetched
"""
if not self._fetched:
self.refresh()
return dict.items(self)
def __contains__(self, key):
if not self._fetched:
self.refresh()
return dict.__contains__(self, key)
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, str(self.identifier))
@classmethod
def clear_cache(cls):
""" Clear/Reset the entire Cache
"""
cls._cache = ObjectCache()
__str__ = __repr__
class BlockchainObjects(Caching, list):
""" This class is used internally to store **lists** of objects and
deal with the cache and indexing thereof.
"""
_cache = ObjectCache()
identifier = None
def refresh(self, *args, **kwargs):
""" Interface that needs to be implemented. This method is
called when an object is requested that has not yet been
fetched/stored
"""
raise NotImplementedError
def __init__(self, *args, **kwargs):
Caching.__init__(self, *args, **kwargs)
# Some lists are specific to some key value that is then provided as
# first argument
if len(args) > 0 and isinstance(args[0], str):
key = self._cache_key(args[0])
else:
key = self._cache_key()
if self.incached(key):
list.__init__(self, self.getfromcache(key))
else:
if kwargs.get("refresh", True):
self.refresh(*args, **kwargs)
def _cache_key(self, key=""):
if key:
# We add the key to the index
return "{}-{}".format(self.__class__.__name__, key)
else:
return self.__class__.__name__
def store(self, data, key=None, *args, **kwargs):
""" Cache the list
:param list data: List of objects to cache
"""
list.__init__(self, data)
self._store_items(self._cache_key(key))
@classmethod
def cache_objects(cls, data, key=None):
""" This classmethod allows to feed multiple objects into the
cache is is mostly used for testing
"""
return cls._import(data, key)
@classmethod
def _import(cls, data, key=None):
c = cls(key, refresh=False)
c.store(data, key)
return c
# legacy
def cache(self, key):
""" (legacy) store the current object with key ``key``.
"""
self.store(self, key)
class BlockchainObject(Caching, dict):
""" This class deals with objects from graphene-based blockchains.
It is used to validate object ids, store entire objects in
the cache and deal with indexing thereof.
"""
space_id = 1
type_id = None
type_ids = []
identifier = None
_cache = ObjectCache()
def __init__(self, data, klass=None, lazy=False, use_cache=True, *args, **kwargs):
Caching.__init__(self, *args, **kwargs)
assert self.type_id or self.type_ids
self._fetched = False
self._lazy = lazy
if "_cache_expiration" in kwargs:
self.set_expiration(kwargs["_cache_expiration"])
# We don't read lists, sets, or tuples
if isinstance(data, (list, set, tuple)):
raise ValueError(
"Cannot interpret lists! Please load elements individually!"
)
if klass and isinstance(data, klass):
self.identifier = data.get("id")
dict.__init__(self, data)
elif isinstance(data, dict):
self.identifier = data.get("id")
dict.__init__(self, data)
elif isinstance(data, int):
# This is only for block number bascially
self.identifier = data
if self.incached(str(data)):
dict.__init__(self, self.getfromcache(str(data)))
self._fetched = True
if not self._lazy and not self._fetched:
self.refresh()
# make sure to store the blocknumber for caching
self["id"] = str(data)
# Set identifier again as it is overwritten in super() in refresh()
self.identifier = data
else:
self.identifier = data
if self.test_valid_objectid(self.identifier):
# Here we assume we deal with an id
self.testid(self.identifier)
if self.incached(data):
dict.__init__(self, dict(self.getfromcache(data)))
elif not self._lazy and not self._fetched:
self.refresh()
if use_cache and not self._lazy:
self._store_item()
def store(self, data, key="id"):
""" Cache the list
:param list data: List of objects to cache
"""
dict.__init__(self, data)
self._store_item(key)
@classmethod
def cache_object(cls, data, key=None):
""" This classmethod allows to feed an object into the
cache is is mostly used for testing
"""
return cls._import(data, key)
@classmethod
def _import(cls, data, key=None):
c = cls(data, refresh=False)
c.store(data, key)
return c
@staticmethod
def objectid_valid(i):
""" Test if a string looks like a regular object id of the
form:::
xxxx.yyyyy.zzzz
with those being numbers.
"""
if "." not in i:
return False
parts = i.split(".")
if len(parts) == 3:
try:
[int(x) for x in parts]
return True
except Exception:
pass
return False
def test_valid_objectid(self, i):
""" Alias for objectid_valid
"""
return self.objectid_valid(i)
def testid(self, id):
""" In contrast to validity, this method tests if the objectid
matches the type_id provided in self.type_id or self.type_ids
"""
parts = id.split(".")
if not self.type_id:
return
if not self.type_ids:
self.type_ids = [self.type_id]
assert int(parts[0]) == self.space_id, "Valid id's for {} are {}.{}.x".format(
self.__class__.__name__, self.space_id, self.type_id
)
assert int(parts[1]) in self.type_ids, "Valid id's for {} are {}.{}.x".format(
self.__class__.__name__, self.space_id, self.type_ids
)
class Object(BlockchainObject, AbstractBlockchainInstanceProvider):
""" This class is a basic class that allows to obtain any object
from the blockchyin by fetching it through the API
"""
def refresh(self):
""" This is the refresh method that overloads the prototype in
BlockchainObject.
"""
dict.__init__(
self,
self.blockchain.rpc.get_object(self.identifier),
blockchain_instance=self.blockchain,
)
|
the-stack_106_13263
|
"""empty message
Revision ID: e85fc3effb6c
Revises: 2cb29e48a953
Create Date: 2018-08-25 19:16:12.911198
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = 'e85fc3effb6c'
down_revision = '2cb29e48a953'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('user', 'birthday',
existing_type=sa.DATE(),
nullable=True)
op.alter_column('user', 'phone',
existing_type=mysql.VARCHAR(collation='utf8_unicode_ci', length=20),
nullable=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('user', 'phone',
existing_type=mysql.VARCHAR(collation='utf8_unicode_ci', length=20),
nullable=False)
op.alter_column('user', 'birthday',
existing_type=sa.DATE(),
nullable=False)
# ### end Alembic commands ###
|
the-stack_106_13264
|
"""
classify-negs.py - parse CIGAR string and classify the errors
Nathan Lubock
REQUIRES Sam 1.4 CIGAR strings (e.g. = for a match and X for a mismatch)
"""
# Ensure Python 2/3 compatibility
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import itertools
import multiprocessing
import sys
import re
from signal import signal, SIGPIPE, SIG_DFL
from collections import defaultdict
# catch broken pipe errors to allow ex) python pyParse.py foo bar | head
# see: https://stackoverflow.com/a/30091579
signal(SIGPIPE, SIG_DFL)
#===============================================================================
def split_tag(my_tag):
"""
Split the CIGAR string into various operations with some regex. Assumes the
tag is alphanumeric!
Idea:
regex (\d+) = repeats of digits, (\D) = [^0-9] (not numbers)
(\D+) allows for matching of deletion characters
ex) 30M1I4M -> [('30','M'), ('1','I'), ('4','M')]
Note:
WILL LEAVE OFF STRAGGLING NUMERICS
ex) 30M1I4M4 -> [('30','M'), ('1','I'), ('4','M')]
"""
my_split = re.findall(r'(\d+)(\D+)', my_tag)
return ((int(x[0]), x[1]) for x in my_split)
#-------------------------------------------------------------------------------
def classify(cigar):
"""
Classify a read based on its CIGAR string
"""
split = list(split_tag(cigar))
indels = [x for x in split if x[1] in ['D', 'I']]
if len(indels) > 0:
# only track position of the first indel
pos = sum(x[0] for x in itertools.takewhile(lambda x: x[1] not in ['D', 'I'], split))
length, err = indels[0]
if length == 3 and err == 'D' :
out_class = 'Skip'
elif length == 3 and err == 'I' :
out_class = 'Add'
else:
out_class = 'Indel'
else:
out_class = 'Mismatch'
pos = 0
return (out_class, pos)
#-------------------------------------------------------------------------------
def wrap(pack):
"""
Quick wrapper for multiprocessing
"""
cigar = pack[-1]
pos = pack[-2]
cigar_class, pos_offset = classify(cigar)
return pack[:-2] + [cigar] + [str(int(pos) + pos_offset), cigar_class]
#===============================================================================
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Classify CIGAR strings. MUST BE SAM 1.4 VERSION!')
parser.add_argument('infile',
type=argparse.FileType('r'),
default=sys.stdin,
nargs='?',
help='path to flat file with CIGARS (or stdin if none).' +
' Assumes CIGAR is last column, and left-most position' +
' is the second last column.')
parser.add_argument('-j',
'--proc',
dest='proc',
type=int,
default=1,
metavar='N',
choices=range(1, multiprocessing.cpu_count()+1),
help='number of processors (default=1, max={})'.format(
multiprocessing.cpu_count()))
args = parser.parse_args()
# lazily collapse whitespace and parse last item
pool = multiprocessing.Pool(args.proc)
collapse = (line.split() for line in args.infile)
out_class = pool.imap_unordered(wrap, collapse, chunksize=10000)
for line in out_class:
print('\t'.join(line), file=sys.stdout)
|
the-stack_106_13267
|
"""
This module lets you practice DEBUGGING when LOGIC ERRORS occur.
That is, no run-time exception occurs, but the function simply
does not do the right thing.
Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher,
Valerie Galluzzi, Mark Hays, Amanda Stouder, Aaron Wilkin,
their colleagues, and Maria Bruner.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
###############################################################################
#
# DONE: 2. READ these instructions, ASKING QUESTIONS as needed.
#
# This module contains "broken" functions, as in m1 and m2.
# FOLLOW THE SAME STEPS as in the instructions of m1.py
# to find and correct the mistakes in these functions.
#
# The broken functions in here have LOGIC errors.
# The code does NOT break when you run it,
# but it does not produce the correct output.
#
# In THIS module, the mistakes may be ANYWHERE in the module
# EXCEPT:
# -- The is_prime function below is correct.
# -- The tests themselves are correct.
#
# *** IMPORTANT: ***
# Resist the urge to "fiddle" with the code until you stumble
# upon something that works. This exercise will be helpful
# to you ONLY if you use it as an opportunity to learn
# what the error messages mean and how to react to them.
#
# *** ASK QUESTIONS AS NEEDED! ***
#
# When you believe you understand these instructions,
# change the above TO DO to DONE.
#
###############################################################################
def main():
""" Calls the TEST functions in this module. """
run_test_broken_1()
###############################################################################
# Students:
# Do NOT touch the following is_prime function - it has no _TODO_.
# Do NOT copy code from the is_prime function.
#
# Instead, ** CALL ** this function as needed in the problems below.
# There are NO errors in this is_prime function.
###############################################################################
def is_prime(n):
"""
What comes in: An integer n >= 2.
What goes out:
-- Returns True if the given integer is prime,
else returns False.
Side effects: None.
Examples:
-- is_prime(11) returns True
-- is_prime(12) returns False
-- is_prime(2) returns True
Note: The algorithm used here is simple and clear but slow.
"""
for k in range(2, (n // 2) + 1):
if n % k == 0:
return False
return True
###############################################################################
# Students: Do NOT change any of the TEST functions.
# There are NO errors in the TESTS.
###############################################################################
def run_test_broken_1():
""" Tests the broken_1 function. """
print()
print('--------------------------------------------------')
print('Testing the broken_1 function:')
print('--------------------------------------------------')
expected = 3
actual = broken_1(3) # Test 1 of broken_1
print('Expected:', expected)
print('Actual: ', actual)
expected = 4
actual = broken_1(10) # Test 2 of broken_1
print('Expected:', expected)
print('Actual: ', actual)
expected = 135 # Yes, this is the correct answer
actual = broken_1(1000) # Test 3 of broken_1
print('Expected:', expected)
print('Actual: ', actual)
# -----------------------------------------------------------------------------
# DONE: 3. Follow the INSTRUCTIONS AT THE TOP OF THIS MODULE
# to correct the mistake(s) in the following function.
# -----------------------------------------------------------------------------
def broken_1(m):
"""
What comes in: a positive integer m that is at least 2.
What goes out: Returns the number of prime numbers
between m and (2m + 1) inclusive.
Side effects: None.
Examples:
If m is 3, this function returns 3 since there
are 3 primes between 3 and 7 (namely: 3, 5, and 7).
If m is 10, then this function returns 4 since there
are 4 primes between 10 and 21 (namely: 11, 13, 17 and 19).
Type hints:
:type m: int
"""
# ** For full credit you must appropriately
# ** use (call) the is_prime function that is DEFINED ABOVE.
count = 0
for k in range(m, 2*m+2):
if is_prime(k) is True:
count = count + 1
return count
# -----------------------------------------------------------------------------
# Calls main to start the ball rolling.
# -----------------------------------------------------------------------------
main()
|
the-stack_106_13268
|
import logging
import smtplib
from email import encoders as Encoders
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.nonmultipart import MIMENonMultipart
from email.mime.text import MIMEText
from email.utils import COMMASPACE, formatdate
from io import BytesIO
from scrapy.utils.misc import arg_to_iter
class SESender(object):
def __init__(self, AWS_HOST_NAME, AWS_USER_NAME, AWS_PASSWORD, AWS_PORT_NUMBER, FROM_ADDRESS):
self.host_name = AWS_HOST_NAME
self.user_name = AWS_USER_NAME
self.password = AWS_PASSWORD
self.port_number = AWS_PORT_NUMBER
self.from_address = FROM_ADDRESS
def construct_message(self, to, subject, body, cc, attachments, mimetype, charset):
# Check to see whether attachments exist
if attachments:
message = MIMEMultipart()
else:
message = MIMENonMultipart(*mimetype.split('/', 1))
to = list(arg_to_iter(to))
# Construct the message data-structure
message = MIMEMultipart()
# Add content to the body
message['From'] = self.from_address
message['To'] = COMMASPACE.join(to)
message['Date'] = formatdate(localtime = True)
message['Subject'] = subject
message['Body'] = body
recipients = to[:]
if cc:
recipients.extend(cc)
message['Cc'] = COMMASPACE.join(cc)
if charset:
message.set_charset(charset)
if attachments is not None:
message.attach(MIMEText(body, 'plain', charset or 'us-ascii'))
for attach_name, mimetype, f in attachments:
part = MIMEBase(*mimetype.split('/'))
part.set_payload(f.read())
Encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="%s"' \
% attach_name)
message.attach(part)
else:
message.attach(MIMEText(body))
result = self.send_message(recipients, message)
return result
def send_message (self, recipients, message_to_send):
try:
s = smtplib.SMTP(self.host_name, self.port_number)
s.starttls()
s.login(self.user_name, self.password)
s.sendmail(self.from_address, recipients, message_to_send.as_string())
s.quit()
return {'Result': "Mail Sent",
'To': recipients,
'CC': message_to_send['Cc'],
'Body': message_to_send['Body'],
'Subject': message_to_send['Subject']
}
except:
return {'Result': "Unable to send mail",
'To': recipients,
'CC': message_to_send['Cc'],
'Subject': message_to_send['Subject']}
|
the-stack_106_13271
|
from __future__ import absolute_import
from __future__ import unicode_literals
import mock
import postgres_copy
import six
import sqlalchemy
import os
from django.test.utils import override_settings
from mock.mock import Mock
from corehq.apps.domain.models import Domain
from corehq.apps.domain.shortcuts import create_domain
from corehq.apps.userreports.models import StaticDataSourceConfiguration
from corehq.apps.userreports.util import get_indicator_adapter, get_table_name
from corehq.sql_db.connections import connection_manager, UCR_ENGINE_ID
from io import open
def setUpModule():
if isinstance(Domain.get_db(), Mock):
# needed to skip setUp for javascript tests thread on Travis
return
_call_center_domain_mock = mock.patch(
'corehq.apps.callcenter.data_source.call_center_data_source_configuration_provider'
)
_call_center_domain_mock.start()
domain = create_domain('up-nrhm')
with override_settings(SERVER_ENVIRONMENT='production'):
configs = StaticDataSourceConfiguration.by_domain(domain.name)
adapters = [get_indicator_adapter(config) for config in configs]
for adapter in adapters:
adapter.build_table()
engine = connection_manager.get_engine(UCR_ENGINE_ID)
metadata = sqlalchemy.MetaData(bind=engine)
metadata.reflect(bind=engine, extend_existing=True)
path = os.path.join(os.path.dirname(__file__), 'fixtures')
for file_name in os.listdir(path):
with open(os.path.join(path, file_name), encoding='utf-8') as f:
table_name = get_table_name(domain.name, file_name[:-4])
table = metadata.tables[table_name]
postgres_copy.copy_from(
f, table, engine, format='csv' if six.PY3 else b'csv',
null='' if six.PY3 else b'', header=True
)
_call_center_domain_mock.stop()
def tearDownModule():
if isinstance(Domain.get_db(), Mock):
# needed to skip setUp for javascript tests thread on Travis
return
_call_center_domain_mock = mock.patch(
'corehq.apps.callcenter.data_source.call_center_data_source_configuration_provider'
)
domain = Domain.get_by_name('up-nrhm')
engine = connection_manager.get_engine(UCR_ENGINE_ID)
metadata = sqlalchemy.MetaData(bind=engine)
metadata.reflect(bind=engine, extend_existing=True)
path = os.path.join(os.path.dirname(__file__), 'fixtures')
for file_name in os.listdir(path):
table_name = get_table_name(domain.name, file_name[:-4])
table = metadata.tables[table_name]
table.drop()
_call_center_domain_mock.start()
domain.delete()
_call_center_domain_mock.stop()
|
the-stack_106_13274
|
#!/usr/bin/env python
"""
Raspberry Pi Live Image Inference
Continuously captures image from Raspberry Pi Camera module and perform
inference using provided .eim model file. Outputs probabilities in console.
Author: EdgeImpulse, Inc.
Date: June 8, 2021
License: Apache-2.0 (apache.org/licenses/LICENSE-2.0)
"""
import os, sys, time
import cv2
import numpy as np
from picamera import PiCamera
from picamera.array import PiRGBArray
from edge_impulse_linux.runner import ImpulseRunner
# Settings
model_file = "modelfile.eim" # Trained ML model from Edge Impulse
draw_fps = True # Draw FPS on screen
res_width = 96 # Resolution of camera (width)
res_height = 96 # Resolution of camera (height)
rotation = 0 # Camera rotation (0, 90, 180, or 270)
img_width = 28 # Resize width to this for inference
img_height = 28 # Resize height to this for inference
# The ImpulseRunner module will attempt to load files relative to its location,
# so we make it load files relative to this program instead
dir_path = os.path.dirname(os.path.realpath(__file__))
model_path = os.path.join(dir_path, model_file)
# Load the model file
runner = ImpulseRunner(model_path)
# Initialize model
try:
# Print model information
model_info = runner.init()
print("Model name:", model_info['project']['name'])
print("Model owner:", model_info['project']['owner'])
# Exit if we cannot initialize the model
except Exception as e:
print("ERROR: Could not initialize model")
print("Exception:", e)
if (runner):
runner.stop()
sys.exit(1)
# Initial framerate value
fps = 0
# Start the camera
with PiCamera() as camera:
# Configure camera settings
camera.resolution = (res_width, res_height)
camera.rotation = rotation
# Container for our frames
raw_capture = PiRGBArray(camera, size=(res_width, res_height))
# Continuously capture frames (this is our while loop)
for frame in camera.capture_continuous(raw_capture,
format='bgr',
use_video_port=True):
# Get timestamp for calculating actual framerate
timestamp = cv2.getTickCount()
# Get Numpy array that represents the image
img = frame.array
# Resize captured image
img_resize = cv2.resize(img, (img_width, img_height))
# Convert image to grayscale
img_resize = cv2.cvtColor(img_resize, cv2.COLOR_BGR2GRAY)
# Convert image to 1D vector of floating point numbers
features = np.reshape(img_resize, (img_width * img_height)) / 255
# Edge Impulse model expects features in list format
features = features.tolist()
# Perform inference
res = None
try:
res = runner.classify(features)
except Exception as e:
print("ERROR: Could not perform inference")
print("Exception:", e)
# Display predictions and timing data
print("Output:", res)
# Display prediction on preview
if res is not None:
# Find label with the highest probability
predictions = res['result']['classification']
max_label = ""
max_val = 0
for p in predictions:
if predictions[p] > max_val:
max_val = predictions[p]
max_label = p
# Draw predicted label on bottom of preview
cv2.putText(img,
max_label,
(0, res_height - 20),
cv2.FONT_HERSHEY_PLAIN,
1,
(255, 255, 255))
# Draw predicted class's confidence score (probability)
cv2.putText(img,
str(round(max_val, 2)),
(0, res_height - 2),
cv2.FONT_HERSHEY_PLAIN,
1,
(255, 255, 255))
# Draw framerate on frame
if draw_fps:
cv2.putText(img,
"FPS: " + str(round(fps, 2)),
(0, 12),
cv2.FONT_HERSHEY_PLAIN,
1,
(255, 255, 255))
# Show the frame
cv2.imshow("Frame", img)
# Clear the stream to prepare for next frame
raw_capture.truncate(0)
# Calculate framrate
frame_time = (cv2.getTickCount() - timestamp) / cv2.getTickFrequency()
fps = 1 / frame_time
# Press 'q' to quit
if cv2.waitKey(1) == ord('q'):
break
# Clean up
cv2.destroyAllWindows()
|
the-stack_106_13276
|
from source.model.structure_model import StraightBeam
import numpy as np
params = {
"name": "CaarcBeamPrototypeOptimizable",
"domain_size": "3D",
"system_parameters": {
"element_params": {
"type": "CRBeam",
"is_nonlinear": True
},
"material": {
"density": 7850.0,
"youngs_modulus": 2069000000,
"poisson_ratio": 0.29,
"damping_ratio": 1
},
"geometry": {
"length_x": 1.2,
"number_of_elements": 1,
"defined_on_intervals": [{
"interval_bounds": [0.0, "End"],
"length_y": [1.0],
"length_z": [1.0],
"area": [0.0001],
"shear_area_y": [0.0],
"shear_area_z": [0.0],
"moment_of_inertia_y": [0.0001],
"moment_of_inertia_z": [0.0001],
"torsional_moment_of_inertia": [0.0001],
"outrigger_mass": [0.0],
"outrigger_stiffness": [0.0]}]
}
},
"boundary_conditions": "fixed-free"
}
def test_structure_model():
beam = StraightBeam(params)
|
the-stack_106_13279
|
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import List, Optional, Tuple
import aiofiles
from polyaxon.k8s.async_manager import AsyncK8SManager
from polyaxon.k8s.logging.async_monitor import query_k8s_operation_logs
from polyaxon.polyboard.logging import V1Log, V1Logs
from polyaxon.stores.manager import list_files
from polyaxon.streams.tasks.logs import download_logs_file, download_tmp_logs
from polyaxon.types import AwareDT
def get_logs_files(run_uuid: str) -> List[str]:
files = list_files(subpath="{}/plxlogs".format(run_uuid))
if not files["files"]:
return []
return sorted([f for f in files["files"].keys()])
async def get_next_file(files: List[str], last_file: str = None) -> Optional[str]:
if not files:
return None
if not last_file:
return files[0]
i = 0
for i, f in enumerate(files):
if f == last_file:
break
i += 1
if i >= len(files):
return None
return files[i]
async def read_logs_file(logs_path) -> List[V1Log]:
if not os.path.exists(logs_path):
return []
async with aiofiles.open(logs_path, mode="r") as f:
contents = await f.read()
if contents:
logs = V1Logs.read(contents)
return logs.logs
return []
async def get_archived_operation_logs(
run_uuid: str, last_file: Optional[str], check_cache: bool = True
) -> Tuple[List[V1Log], Optional[str], List[str]]:
files = get_logs_files(run_uuid)
logs = []
last_file = await get_next_file(files=files, last_file=last_file)
if not last_file:
return logs, last_file, files
logs_path = await download_logs_file(
run_uuid=run_uuid, last_file=last_file, check_cache=check_cache
)
logs = await read_logs_file(logs_path)
return logs, last_file, files
async def get_tmp_operation_logs(
run_uuid: str, last_time: Optional[AwareDT]
) -> Tuple[List[V1Log], Optional[AwareDT]]:
logs = []
tmp_logs = await download_tmp_logs(run_uuid=run_uuid)
if not os.path.exists(tmp_logs):
return logs, None
tmp_log_files = os.listdir(tmp_logs)
if not tmp_log_files:
return logs, None
for tmp_file in tmp_log_files:
logs_path = os.path.join(tmp_logs, tmp_file)
logs += await read_logs_file(logs_path)
if last_time:
logs = [l for l in logs if l.timestamp > last_time]
if logs:
logs = sorted(logs, key=lambda x: x.timestamp)
last_time = logs[-1].timestamp
return logs, last_time
async def get_operation_logs(
k8s_manager: AsyncK8SManager,
k8s_operation: any,
instance: str,
last_time: Optional[AwareDT],
):
previous_last = last_time
operation_logs, last_time = await query_k8s_operation_logs(
instance=instance,
last_time=None,
k8s_manager=k8s_manager,
stream=True,
)
if k8s_operation["status"].get("completionTime"):
last_time = None
if previous_last:
operation_logs = [l for l in operation_logs if l.timestamp > previous_last]
return operation_logs, last_time
|
the-stack_106_13281
|
#!/usr/bin/env python
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Displays a frame with two buttons and a background image, using pyui library.
Run this example by typing in:
python pyuidemo.py
Select "Quit" button to exit demo.
"""
from __future__ import print_function
import pyui
from twisted.internet import reactor, pyuisupport
def onButton(self):
print("got a button")
def onQuit(self):
reactor.stop()
def main():
pyuisupport.install(args=(640, 480), kw={"renderer": "2d"})
w = pyui.widgets.Frame(50, 50, 400, 400, "clipme")
b = pyui.widgets.Button("A button is here", onButton)
q = pyui.widgets.Button("Quit!", onQuit)
w.addChild(b)
w.addChild(q)
w.pack()
w.setBackImage("pyui_bg.png")
reactor.run()
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.